blob: e89e09223ff857fb7d7278d09d5d1ee3cddf1133 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000025#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000026
27namespace __scudo {
28
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000029#if SANITIZER_CAN_USE_ALLOCATOR64
30const uptr AllocatorSpace = ~0ULL;
31const uptr AllocatorSize = 0x40000000000ULL;
32typedef DefaultSizeClassMap SizeClassMap;
Kostya Serebryany15647b12016-08-25 20:23:08 +000033struct AP {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034 static const uptr kSpaceBeg = AllocatorSpace;
35 static const uptr kSpaceSize = AllocatorSize;
Kostya Serebryany15647b12016-08-25 20:23:08 +000036 static const uptr kMetadataSize = 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000037 typedef __scudo::SizeClassMap SizeClassMap;
Kostya Serebryany15647b12016-08-25 20:23:08 +000038 typedef NoOpMapUnmapCallback MapUnmapCallback;
Kostya Serebryany7c5ae7c2016-08-26 00:06:03 +000039 static const uptr kFlags =
40 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany15647b12016-08-25 20:23:08 +000041};
Kostya Serebryany15647b12016-08-25 20:23:08 +000042typedef SizeClassAllocator64<AP> PrimaryAllocator;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000043#else
44// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
45// security improvements brought to the 64-bit one. This makes the 32-bit
46// version of Scudo slightly less toughened.
47static const uptr RegionSizeLog = 20;
48static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
49# if SANITIZER_WORDSIZE == 32
50typedef FlatByteMap<NumRegions> ByteMap;
51# elif SANITIZER_WORDSIZE == 64
52typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
53# endif // SANITIZER_WORDSIZE
Kostya Kortchinsky47be0ed2016-12-15 18:06:55 +000054typedef DefaultSizeClassMap SizeClassMap;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000055typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
56 RegionSizeLog, ByteMap> PrimaryAllocator;
57#endif // SANITIZER_CAN_USE_ALLOCATOR64
58
Kostya Serebryany712fc982016-06-07 01:20:26 +000059typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000060typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000061typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000062 ScudoBackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000063
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000064static ScudoBackendAllocator &getBackendAllocator();
Kostya Serebryany712fc982016-06-07 01:20:26 +000065
66static thread_local Xorshift128Plus Prng;
67// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000068static uptr Cookie;
69
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000070// We default to software CRC32 if the alternatives are not supported, either
71// at compilation or at runtime.
72static atomic_uint8_t HashAlgorithm = { CRC32Software };
73
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000074SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
75
76INLINE u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
77 // If SSE4.2 is defined here, it was enabled everywhere, as opposed to only
78 // for scudo_crc32.cpp. This means that other SSE instructions were likely
79 // emitted at other places, and as a result there is no reason to not use
80 // the hardware version of the CRC32.
81#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
82 return computeHardwareCRC32(Crc, Data);
83#else
84 if (computeHardwareCRC32 && HashType == CRC32Hardware)
85 return computeHardwareCRC32(Crc, Data);
86 else
87 return computeSoftwareCRC32(Crc, Data);
88#endif // defined(__SSE4_2__)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000089}
Kostya Serebryany712fc982016-06-07 01:20:26 +000090
Kostya Serebryany712fc982016-06-07 01:20:26 +000091struct ScudoChunk : UnpackedHeader {
92 // We can't use the offset member of the chunk itself, as we would double
93 // fetch it without any warranty that it wouldn't have been tampered. To
94 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000095 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000096 return reinterpret_cast<void *>(
97 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
98 }
99
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000100 // Returns the usable size for a chunk, meaning the amount of bytes from the
101 // beginning of the user data to the end of the backend allocated chunk.
102 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000103 uptr Size = getBackendAllocator().GetActuallyAllocatedSize(
104 getAllocBeg(Header));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000105 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000106 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000107 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
108 }
109
110 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000111 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000112 UnpackedHeader ZeroChecksumHeader = *Header;
113 ZeroChecksumHeader.Checksum = 0;
114 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
115 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +0000116 u8 HashType = atomic_load_relaxed(&HashAlgorithm);
117 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HashType);
118 for (uptr i = 0; i < ARRAY_SIZE(HeaderHolder); i++)
119 Crc = computeCRC32(Crc, HeaderHolder[i], HashType);
120 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000121 }
122
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000123 // Checks the validity of a chunk by verifying its checksum. It doesn't
124 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000125 bool isValid() {
126 UnpackedHeader NewUnpackedHeader;
127 const AtomicPackedHeader *AtomicHeader =
128 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000129 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000130 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
131 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000132 }
133
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000134 // Nulls out a chunk header. When returning the chunk to the backend, there
135 // is no need to store a valid ChunkAvailable header, as this would be
136 // computationally expensive. Zeroing out serves the same purpose by making
137 // the header invalid. In the extremely rare event where 0 would be a valid
138 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
139 COMPILER_CHECK(ChunkAvailable == 0);
140 void eraseHeader() {
141 PackedHeader NullPackedHeader = 0;
142 AtomicPackedHeader *AtomicHeader =
143 reinterpret_cast<AtomicPackedHeader *>(this);
144 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
145 }
146
Kostya Serebryany712fc982016-06-07 01:20:26 +0000147 // Loads and unpacks the header, verifying the checksum in the process.
148 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
149 const AtomicPackedHeader *AtomicHeader =
150 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000151 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000152 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000153 if (UNLIKELY(NewUnpackedHeader->Checksum !=
154 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000155 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
156 }
157 }
158
159 // Packs and stores the header, computing the checksum in the process.
160 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000161 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000162 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
163 AtomicPackedHeader *AtomicHeader =
164 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000165 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000166 }
167
168 // Packs and stores the header, computing the checksum in the process. We
169 // compare the current header with the expected provided one to ensure that
170 // we are not being raced by a corruption occurring in another thread.
171 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
172 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000173 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000174 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
175 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
176 AtomicPackedHeader *AtomicHeader =
177 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000178 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
179 &OldPackedHeader,
180 NewPackedHeader,
181 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000182 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
183 }
184 }
185};
186
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000187ScudoChunk *getScudoChunk(uptr UserBeg) {
188 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
189}
190
Kostya Serebryany712fc982016-06-07 01:20:26 +0000191static bool ScudoInitIsRunning = false;
192
193static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000194static pthread_key_t PThreadKey;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000195
196static thread_local bool ThreadInited = false;
197static thread_local bool ThreadTornDown = false;
198static thread_local AllocatorCache Cache;
199
200static void teardownThread(void *p) {
201 uptr v = reinterpret_cast<uptr>(p);
202 // The glibc POSIX thread-local-storage deallocation routine calls user
203 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
204 // We want to be called last since other destructors might call free and the
205 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
206 // quarantine and swallowing the cache.
207 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000208 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000209 return;
210 }
211 drainQuarantine();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000212 getBackendAllocator().DestroyCache(&Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000213 ThreadTornDown = true;
214}
215
216static void initInternal() {
217 SanitizerToolName = "Scudo";
218 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
219 ScudoInitIsRunning = true;
220
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000221 // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
222 if (testCPUFeature(CRC32CPUFeature)) {
223 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
224 }
225
Kostya Serebryany712fc982016-06-07 01:20:26 +0000226 initFlags();
227
228 AllocatorOptions Options;
229 Options.setFrom(getFlags(), common_flags());
230 initAllocator(Options);
231
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000232 MaybeStartBackgroudThread();
233
Kostya Serebryany712fc982016-06-07 01:20:26 +0000234 ScudoInitIsRunning = false;
235}
236
237static void initGlobal() {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000238 pthread_key_create(&PThreadKey, teardownThread);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000239 initInternal();
240}
241
242static void NOINLINE initThread() {
243 pthread_once(&GlobalInited, initGlobal);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000244 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000245 getBackendAllocator().InitCache(&Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000246 ThreadInited = true;
247}
248
249struct QuarantineCallback {
250 explicit QuarantineCallback(AllocatorCache *Cache)
251 : Cache_(Cache) {}
252
253 // Chunk recycling function, returns a quarantined chunk to the backend.
254 void Recycle(ScudoChunk *Chunk) {
255 UnpackedHeader Header;
256 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000257 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000258 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
259 Chunk);
260 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000261 Chunk->eraseHeader();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000262 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000263 getBackendAllocator().Deallocate(Cache_, Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000264 }
265
266 /// Internal quarantine allocation and deallocation functions.
267 void *Allocate(uptr Size) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000268 // TODO(kostyak): figure out the best way to protect the batches.
269 return getBackendAllocator().Allocate(Cache_, Size, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000270 }
271
272 void Deallocate(void *Ptr) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000273 getBackendAllocator().Deallocate(Cache_, Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000274 }
275
276 AllocatorCache *Cache_;
277};
278
279typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000280typedef ScudoQuarantine::Cache ScudoQuarantineCache;
281static thread_local ScudoQuarantineCache ThreadQuarantineCache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000282
283void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
284 MayReturnNull = cf->allocator_may_return_null;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000285 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000286 QuarantineSizeMb = f->QuarantineSizeMb;
287 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
288 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
289 DeleteSizeMismatch = f->DeleteSizeMismatch;
290 ZeroContents = f->ZeroContents;
291}
292
293void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
294 cf->allocator_may_return_null = MayReturnNull;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000295 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000296 f->QuarantineSizeMb = QuarantineSizeMb;
297 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
298 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
299 f->DeleteSizeMismatch = DeleteSizeMismatch;
300 f->ZeroContents = ZeroContents;
301}
302
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000303struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000304 static const uptr MaxAllowedMallocSize =
305 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000306
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000307 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000308 ScudoQuarantine AllocatorQuarantine;
309
310 // The fallback caches are used when the thread local caches have been
311 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
312 // be accessed by different threads.
313 StaticSpinMutex FallbackMutex;
314 AllocatorCache FallbackAllocatorCache;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000315 ScudoQuarantineCache FallbackQuarantineCache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000316
317 bool DeallocationTypeMismatch;
318 bool ZeroContents;
319 bool DeleteSizeMismatch;
320
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000321 explicit ScudoAllocator(LinkerInitialized)
Kostya Serebryany712fc982016-06-07 01:20:26 +0000322 : AllocatorQuarantine(LINKER_INITIALIZED),
323 FallbackQuarantineCache(LINKER_INITIALIZED) {}
324
325 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000326 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000327 // case of the Secondary allocator, it takes care of alignment and the
328 // offset will always be 0. In the case of the Primary, the worst case
329 // scenario happens in the last size class, when the backend allocation
330 // would already be aligned on the requested alignment, which would happen
331 // to be the maximum alignment that would fit in that size class. As a
332 // result, the maximum offset will be at most the maximum alignment for the
333 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000334 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000335 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000336 SizeClassMap::kMaxSize - MinAlignment);
337 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000338 MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000339 Header.Offset = MaxOffset;
340 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000341 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
342 "header\n");
343 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000344 // Verify that we can fit the maximum size or amount of unused bytes in the
345 // header. Given that the Secondary fits the allocation to a page, the worst
346 // case scenario happens in the Primary. It will depend on the second to
347 // last and last class sizes, as well as the dynamic base for the Primary.
348 // The following is an over-approximation that works for our needs.
349 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
350 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
351 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000352 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
353 "the header\n");
354 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000355
356 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
357 DeleteSizeMismatch = Options.DeleteSizeMismatch;
358 ZeroContents = Options.ZeroContents;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000359 BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000360 AllocatorQuarantine.Init(
361 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
362 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000363 BackendAllocator.InitCache(&FallbackAllocatorCache);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000364 Cookie = Prng.getNext();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000365 }
366
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000367 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000368 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000369 if (UNLIKELY(!ThreadInited))
370 initThread();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000371 if (!UserPtr)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000372 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000373 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
374 if (!IsAligned(UserBeg, MinAlignment))
375 return false;
376 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000377 }
378
Kostya Serebryany712fc982016-06-07 01:20:26 +0000379 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000380 void *allocate(uptr Size, uptr Alignment, AllocType Type,
381 bool ForceZeroContents = false) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000382 if (UNLIKELY(!ThreadInited))
383 initThread();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000384 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000385 dieWithMessage("ERROR: alignment is not a power of 2\n");
Kostya Serebryany712fc982016-06-07 01:20:26 +0000386 }
387 if (Alignment > MaxAlignment)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000388 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000389 if (Alignment < MinAlignment)
390 Alignment = MinAlignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000391 if (Size >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000392 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000393 if (Size == 0)
394 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000395
396 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000397 if (Alignment > MinAlignment)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000398 NeededSize += Alignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000399 if (NeededSize >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000400 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000401
402 // Primary backed and Secondary backed allocations have a different
403 // treatment. We deal with alignment requirements of Primary serviced
404 // allocations here, but the Secondary will take care of its own alignment
405 // needs, which means we also have to work around some limitations of the
406 // combined allocator to accommodate the situation.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000407 bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000408
409 void *Ptr;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000410 uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000411 if (LIKELY(!ThreadTornDown)) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000412 Ptr = BackendAllocator.Allocate(&Cache, NeededSize, AllocationAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000413 } else {
414 SpinMutexLock l(&FallbackMutex);
415 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000416 AllocationAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000417 }
418 if (!Ptr)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000419 return BackendAllocator.ReturnNullOrDieOnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000420
Kostya Serebryany712fc982016-06-07 01:20:26 +0000421 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000422 // If the allocation was serviced by the secondary, the returned pointer
423 // accounts for ChunkHeaderSize to pass the alignment check of the combined
424 // allocator. Adjust it here.
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000425 if (!FromPrimary) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000426 AllocBeg -= AlignedChunkHeaderSize;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000427 if (Alignment > MinAlignment)
428 NeededSize -= Alignment;
429 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000430
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000431 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000432 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000433 memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000434
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000435 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
436 if (!IsAligned(UserBeg, Alignment))
437 UserBeg = RoundUpTo(UserBeg, Alignment);
438 CHECK_LE(UserBeg + Size, AllocBeg + NeededSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000439 UnpackedHeader Header = {};
440 Header.State = ChunkAllocated;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000441 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000442 Header.Offset = Offset >> MinAlignmentLog;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000443 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000444 if (FromPrimary) {
445 Header.FromPrimary = FromPrimary;
446 Header.SizeOrUnusedBytes = Size;
447 } else {
448 // The secondary fits the allocations to a page, so the amount of unused
449 // bytes is the difference between the end of the user allocation and the
450 // next page boundary.
451 uptr PageSize = GetPageSizeCached();
452 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
453 if (TrailingBytes)
454 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
455 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000456 Header.Salt = static_cast<u8>(Prng.getNext());
457 getScudoChunk(UserBeg)->storeHeader(&Header);
458 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000459 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
460 return UserPtr;
461 }
462
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000463 // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
464 // we directly deallocate the chunk, otherwise the flow would lead to the
465 // chunk being checksummed twice, once before Put and once in Recycle, with
466 // no additional security value.
467 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
468 uptr Size) {
469 bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
470 if (BypassQuarantine) {
471 Chunk->eraseHeader();
472 void *Ptr = Chunk->getAllocBeg(Header);
473 if (LIKELY(!ThreadTornDown)) {
474 getBackendAllocator().Deallocate(&Cache, Ptr);
475 } else {
476 SpinMutexLock Lock(&FallbackMutex);
477 getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
478 }
479 } else {
480 UnpackedHeader NewHeader = *Header;
481 NewHeader.State = ChunkQuarantine;
482 Chunk->compareExchangeHeader(&NewHeader, Header);
483 if (LIKELY(!ThreadTornDown)) {
484 AllocatorQuarantine.Put(&ThreadQuarantineCache,
485 QuarantineCallback(&Cache), Chunk, Size);
486 } else {
487 SpinMutexLock l(&FallbackMutex);
488 AllocatorQuarantine.Put(&FallbackQuarantineCache,
489 QuarantineCallback(&FallbackAllocatorCache),
490 Chunk, Size);
491 }
492 }
493 }
494
Kostya Serebryany712fc982016-06-07 01:20:26 +0000495 // Deallocates a Chunk, which means adding it to the delayed free list (or
496 // Quarantine).
497 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
498 if (UNLIKELY(!ThreadInited))
499 initThread();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000500 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
501 if (!UserPtr)
502 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000503 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
504 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000505 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
506 "aligned at address %p\n", UserPtr);
507 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000508 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000509 UnpackedHeader OldHeader;
510 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000511 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000512 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000513 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000514 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000515 if (DeallocationTypeMismatch) {
516 // The deallocation type has to match the allocation one.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000517 if (OldHeader.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000518 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000519 if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000520 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000521 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000522 }
523 }
524 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000525 uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
526 Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000527 if (DeleteSizeMismatch) {
528 if (DeleteSize && DeleteSize != Size) {
529 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000530 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000531 }
532 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000533
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000534 // If a small memory amount was allocated with a larger alignment, we want
535 // to take that into account. Otherwise the Quarantine would be filled with
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000536 // tiny chunks, taking a lot of VA memory. This is an approximation of the
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000537 // usable size, that allows us to not call GetActuallyAllocatedSize.
538 uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000539 quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000540 }
541
Kostya Serebryany712fc982016-06-07 01:20:26 +0000542 // Reallocates a chunk. We can save on a new allocation if the new requested
543 // size still fits in the chunk.
544 void *reallocate(void *OldPtr, uptr NewSize) {
545 if (UNLIKELY(!ThreadInited))
546 initThread();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000547 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
548 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
549 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
550 "aligned at address %p\n", OldPtr);
551 }
552 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000553 UnpackedHeader OldHeader;
554 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000555 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000556 dieWithMessage("ERROR: invalid chunk state when reallocating address "
557 "%p\n", OldPtr);
558 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000559 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000560 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000561 OldPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000562 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000563 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000564 // The new size still fits in the current chunk, and the size difference
565 // is reasonable.
566 if (NewSize <= UsableSize &&
567 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000568 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000569 NewHeader.SizeOrUnusedBytes =
570 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000571 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
572 return OldPtr;
573 }
574 // Otherwise, we have to allocate a new chunk and copy the contents of the
575 // old one.
576 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
577 if (NewPtr) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000578 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
579 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000580 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000581 quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000582 }
583 return NewPtr;
584 }
585
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000586 // Helper function that returns the actual usable size of a chunk.
587 uptr getUsableSize(const void *Ptr) {
588 if (UNLIKELY(!ThreadInited))
589 initThread();
590 if (!Ptr)
591 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000592 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
593 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000594 UnpackedHeader Header;
595 Chunk->loadHeader(&Header);
596 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000597 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000598 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
599 Ptr);
600 }
601 return Chunk->getUsableSize(&Header);
602 }
603
Kostya Serebryany712fc982016-06-07 01:20:26 +0000604 void *calloc(uptr NMemB, uptr Size) {
605 if (UNLIKELY(!ThreadInited))
606 initThread();
607 uptr Total = NMemB * Size;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000608 if (Size != 0 && Total / Size != NMemB) // Overflow check
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000609 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000610 return allocate(Total, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000611 }
612
613 void drainQuarantine() {
614 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
615 QuarantineCallback(&Cache));
616 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000617
618 uptr getStats(AllocatorStat StatType) {
619 if (UNLIKELY(!ThreadInited))
620 initThread();
621 uptr stats[AllocatorStatCount];
622 BackendAllocator.GetStats(stats);
623 return stats[StatType];
624 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000625};
626
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000627static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000628
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000629static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000630 return Instance.BackendAllocator;
631}
632
633void initAllocator(const AllocatorOptions &Options) {
634 Instance.init(Options);
635}
636
637void drainQuarantine() {
638 Instance.drainQuarantine();
639}
640
641void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000642 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000643}
644
645void scudoFree(void *Ptr, AllocType Type) {
646 Instance.deallocate(Ptr, 0, Type);
647}
648
649void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
650 Instance.deallocate(Ptr, Size, Type);
651}
652
653void *scudoRealloc(void *Ptr, uptr Size) {
654 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000655 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000656 if (Size == 0) {
657 Instance.deallocate(Ptr, 0, FromMalloc);
658 return nullptr;
659 }
660 return Instance.reallocate(Ptr, Size);
661}
662
663void *scudoCalloc(uptr NMemB, uptr Size) {
664 return Instance.calloc(NMemB, Size);
665}
666
667void *scudoValloc(uptr Size) {
668 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
669}
670
671void *scudoMemalign(uptr Alignment, uptr Size) {
672 return Instance.allocate(Size, Alignment, FromMemalign);
673}
674
675void *scudoPvalloc(uptr Size) {
676 uptr PageSize = GetPageSizeCached();
677 Size = RoundUpTo(Size, PageSize);
678 if (Size == 0) {
679 // pvalloc(0) should allocate one page.
680 Size = PageSize;
681 }
682 return Instance.allocate(Size, PageSize, FromMemalign);
683}
684
685int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
686 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
687 return 0;
688}
689
690void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
691 // size must be a multiple of the alignment. To avoid a division, we first
692 // make sure that alignment is a power of 2.
693 CHECK(IsPowerOfTwo(Alignment));
694 CHECK_EQ((Size & (Alignment - 1)), 0);
695 return Instance.allocate(Size, Alignment, FromMalloc);
696}
697
698uptr scudoMallocUsableSize(void *Ptr) {
699 return Instance.getUsableSize(Ptr);
700}
701
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000702} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000703
704using namespace __scudo;
705
706// MallocExtension helper functions
707
708uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000709 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000710}
711
712uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000713 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000714}
715
716uptr __sanitizer_get_free_bytes() {
717 return 1;
718}
719
720uptr __sanitizer_get_unmapped_bytes() {
721 return 1;
722}
723
724uptr __sanitizer_get_estimated_allocated_size(uptr size) {
725 return size;
726}
727
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000728int __sanitizer_get_ownership(const void *Ptr) {
729 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000730}
731
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000732uptr __sanitizer_get_allocated_size(const void *Ptr) {
733 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000734}