blob: d5284a53e12536fe9ef77b3c33297642eda0fd3f [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
25#include <smmintrin.h>
26
27#include <atomic>
28#include <cstring>
29
30namespace __scudo {
31
Kostya Serebryany712fc982016-06-07 01:20:26 +000032const uptr MinAlignmentLog = 4; // 16 bytes for x64
33const uptr MaxAlignmentLog = 24;
34
Kostya Serebryany15647b12016-08-25 20:23:08 +000035struct AP {
36 static const uptr kSpaceBeg = ~0ULL;
37 static const uptr kSpaceSize = 0x10000000000ULL;
38 static const uptr kMetadataSize = 0;
39 typedef DefaultSizeClassMap SizeClassMap;
40 typedef NoOpMapUnmapCallback MapUnmapCallback;
Kostya Serebryany7c5ae7c2016-08-26 00:06:03 +000041 static const uptr kFlags =
42 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany15647b12016-08-25 20:23:08 +000043};
44
45typedef SizeClassAllocator64<AP> PrimaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000046typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
47typedef LargeMmapAllocator<> SecondaryAllocator;
48typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
49 ScudoAllocator;
50
51static ScudoAllocator &getAllocator();
52
53static thread_local Xorshift128Plus Prng;
54// Global static cookie, initialized at start-up.
55static u64 Cookie;
56
57enum ChunkState : u8 {
58 ChunkAvailable = 0,
59 ChunkAllocated = 1,
60 ChunkQuarantine = 2
61};
62
63typedef unsigned __int128 PackedHeader;
64typedef std::atomic<PackedHeader> AtomicPackedHeader;
65
66// Our header requires 128-bit of storage on x64 (the only platform supported
67// as of now), which fits nicely with the alignment requirements.
68// Having the offset saves us from using functions such as GetBlockBegin, that
69// is fairly costly. Our first implementation used the MetaData as well, which
70// offers the advantage of being stored away from the chunk itself, but
71// accessing it was costly as well.
72// The header will be atomically loaded and stored using the 16-byte primitives
73// offered by the platform (likely requires cmpxchg16b support).
74struct UnpackedHeader {
75 // 1st 8 bytes
76 u16 Checksum : 16;
77 u64 RequestedSize : 40; // Needed for reallocation purposes.
78 u8 State : 2; // available, allocated, or quarantined
79 u8 AllocType : 2; // malloc, new, new[], or memalign
80 u8 Unused_0_ : 4;
81 // 2nd 8 bytes
82 u64 Offset : 20; // Offset from the beginning of the backend
83 // allocation to the beginning chunk itself, in
84 // multiples of MinAlignment. See comment about its
Kostya Serebryany707894b2016-08-02 22:25:38 +000085 // maximum value and test in init().
Kostya Serebryany712fc982016-06-07 01:20:26 +000086 u64 Unused_1_ : 28;
87 u16 Salt : 16;
88};
89
90COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
91
92const uptr ChunkHeaderSize = sizeof(PackedHeader);
93
94struct ScudoChunk : UnpackedHeader {
95 // We can't use the offset member of the chunk itself, as we would double
96 // fetch it without any warranty that it wouldn't have been tampered. To
97 // prevent this, we work with a local copy of the header.
98 void *AllocBeg(UnpackedHeader *Header) {
99 return reinterpret_cast<void *>(
100 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
101 }
102
103 // CRC32 checksum of the Chunk pointer and its ChunkHeader.
104 // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
105 u16 Checksum(UnpackedHeader *Header) const {
106 u64 HeaderHolder[2];
107 memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
108 u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
109 // This is somewhat of a shortcut. The checksum is stored in the 16 least
110 // significant bits of the first 8 bytes of the header, hence zero-ing
111 // those bits out. It would be more valid to zero the checksum field of the
112 // UnpackedHeader, but would require holding an additional copy of it.
113 Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
114 Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
115 return static_cast<u16>(Crc);
116 }
117
118 // Loads and unpacks the header, verifying the checksum in the process.
119 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
120 const AtomicPackedHeader *AtomicHeader =
121 reinterpret_cast<const AtomicPackedHeader *>(this);
122 PackedHeader NewPackedHeader =
123 AtomicHeader->load(std::memory_order_relaxed);
124 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
125 if ((NewUnpackedHeader->Unused_0_ != 0) ||
126 (NewUnpackedHeader->Unused_1_ != 0) ||
127 (NewUnpackedHeader->Checksum != Checksum(NewUnpackedHeader))) {
128 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
129 }
130 }
131
132 // Packs and stores the header, computing the checksum in the process.
133 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
134 NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
135 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
136 AtomicPackedHeader *AtomicHeader =
137 reinterpret_cast<AtomicPackedHeader *>(this);
138 AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
139 }
140
141 // Packs and stores the header, computing the checksum in the process. We
142 // compare the current header with the expected provided one to ensure that
143 // we are not being raced by a corruption occurring in another thread.
144 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
145 UnpackedHeader *OldUnpackedHeader) {
146 NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
147 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
148 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
149 AtomicPackedHeader *AtomicHeader =
150 reinterpret_cast<AtomicPackedHeader *>(this);
151 if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
152 NewPackedHeader,
153 std::memory_order_relaxed,
154 std::memory_order_relaxed)) {
155 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
156 }
157 }
158};
159
160static bool ScudoInitIsRunning = false;
161
162static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
163static pthread_key_t pkey;
164
165static thread_local bool ThreadInited = false;
166static thread_local bool ThreadTornDown = false;
167static thread_local AllocatorCache Cache;
168
169static void teardownThread(void *p) {
170 uptr v = reinterpret_cast<uptr>(p);
171 // The glibc POSIX thread-local-storage deallocation routine calls user
172 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
173 // We want to be called last since other destructors might call free and the
174 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
175 // quarantine and swallowing the cache.
176 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
177 pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
178 return;
179 }
180 drainQuarantine();
181 getAllocator().DestroyCache(&Cache);
182 ThreadTornDown = true;
183}
184
185static void initInternal() {
186 SanitizerToolName = "Scudo";
187 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
188 ScudoInitIsRunning = true;
189
190 initFlags();
191
192 AllocatorOptions Options;
193 Options.setFrom(getFlags(), common_flags());
194 initAllocator(Options);
195
196 ScudoInitIsRunning = false;
197}
198
199static void initGlobal() {
200 pthread_key_create(&pkey, teardownThread);
201 initInternal();
202}
203
204static void NOINLINE initThread() {
205 pthread_once(&GlobalInited, initGlobal);
206 pthread_setspecific(pkey, reinterpret_cast<void *>(1));
207 getAllocator().InitCache(&Cache);
208 ThreadInited = true;
209}
210
211struct QuarantineCallback {
212 explicit QuarantineCallback(AllocatorCache *Cache)
213 : Cache_(Cache) {}
214
215 // Chunk recycling function, returns a quarantined chunk to the backend.
216 void Recycle(ScudoChunk *Chunk) {
217 UnpackedHeader Header;
218 Chunk->loadHeader(&Header);
219 if (Header.State != ChunkQuarantine) {
220 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
221 Chunk);
222 }
223 void *Ptr = Chunk->AllocBeg(&Header);
224 getAllocator().Deallocate(Cache_, Ptr);
225 }
226
227 /// Internal quarantine allocation and deallocation functions.
228 void *Allocate(uptr Size) {
229 // The internal quarantine memory cannot be protected by us. But the only
230 // structures allocated are QuarantineBatch, that are 8KB for x64. So we
231 // will use mmap for those, and given that Deallocate doesn't pass a size
232 // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
233 // TODO(kostyak): switching to mmap impacts greatly performances, we have
234 // to find another solution
235 // CHECK_EQ(Size, sizeof(QuarantineBatch));
236 // return MmapOrDie(Size, "QuarantineBatch");
237 return getAllocator().Allocate(Cache_, Size, 1, false);
238 }
239
240 void Deallocate(void *Ptr) {
241 // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
242 getAllocator().Deallocate(Cache_, Ptr);
243 }
244
245 AllocatorCache *Cache_;
246};
247
248typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
249typedef ScudoQuarantine::Cache QuarantineCache;
250static thread_local QuarantineCache ThreadQuarantineCache;
251
252void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
253 MayReturnNull = cf->allocator_may_return_null;
254 QuarantineSizeMb = f->QuarantineSizeMb;
255 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
256 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
257 DeleteSizeMismatch = f->DeleteSizeMismatch;
258 ZeroContents = f->ZeroContents;
259}
260
261void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
262 cf->allocator_may_return_null = MayReturnNull;
263 f->QuarantineSizeMb = QuarantineSizeMb;
264 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
265 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
266 f->DeleteSizeMismatch = DeleteSizeMismatch;
267 f->ZeroContents = ZeroContents;
268}
269
270struct Allocator {
271 static const uptr MaxAllowedMallocSize = 1ULL << 40;
272 static const uptr MinAlignment = 1 << MinAlignmentLog;
273 static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB
274
275 ScudoAllocator BackendAllocator;
276 ScudoQuarantine AllocatorQuarantine;
277
278 // The fallback caches are used when the thread local caches have been
279 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
280 // be accessed by different threads.
281 StaticSpinMutex FallbackMutex;
282 AllocatorCache FallbackAllocatorCache;
283 QuarantineCache FallbackQuarantineCache;
284
285 bool DeallocationTypeMismatch;
286 bool ZeroContents;
287 bool DeleteSizeMismatch;
288
289 explicit Allocator(LinkerInitialized)
290 : AllocatorQuarantine(LINKER_INITIALIZED),
291 FallbackQuarantineCache(LINKER_INITIALIZED) {}
292
293 void init(const AllocatorOptions &Options) {
294 // Currently SSE 4.2 support is required. This might change later.
295 CHECK(testCPUFeature(SSE4_2)); // for crc32
296
297 // Verify that the header offset field can hold the maximum offset. In the
298 // worst case scenario, the backend allocation is already aligned on
299 // MaxAlignment, so in order to store the header and still be aligned, we
300 // add an extra MaxAlignment. As a result, the offset from the beginning of
301 // the backend allocation to the chunk will be MaxAlignment -
302 // ChunkHeaderSize.
303 UnpackedHeader Header = {};
304 uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog;
305 Header.Offset = MaximumOffset;
306 if (Header.Offset != MaximumOffset) {
307 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
308 "header\n");
309 }
310
311 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
312 DeleteSizeMismatch = Options.DeleteSizeMismatch;
313 ZeroContents = Options.ZeroContents;
314 BackendAllocator.Init(Options.MayReturnNull);
315 AllocatorQuarantine.Init(static_cast<uptr>(Options.QuarantineSizeMb) << 20,
316 static_cast<uptr>(
317 Options.ThreadLocalQuarantineSizeKb) << 10);
318 BackendAllocator.InitCache(&FallbackAllocatorCache);
319 Cookie = Prng.Next();
320 }
321
322 // Allocates a chunk.
323 void *allocate(uptr Size, uptr Alignment, AllocType Type) {
324 if (UNLIKELY(!ThreadInited))
325 initThread();
326 if (!IsPowerOfTwo(Alignment)) {
327 dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
328 }
329 if (Alignment > MaxAlignment)
330 return BackendAllocator.ReturnNullOrDie();
331 if (Alignment < MinAlignment)
332 Alignment = MinAlignment;
333 if (Size == 0)
334 Size = 1;
335 if (Size >= MaxAllowedMallocSize)
336 return BackendAllocator.ReturnNullOrDie();
337 uptr RoundedSize = RoundUpTo(Size, MinAlignment);
338 uptr ExtraBytes = ChunkHeaderSize;
339 if (Alignment > MinAlignment)
340 ExtraBytes += Alignment;
341 uptr NeededSize = RoundedSize + ExtraBytes;
342 if (NeededSize >= MaxAllowedMallocSize)
343 return BackendAllocator.ReturnNullOrDie();
344
345 void *Ptr;
346 if (LIKELY(!ThreadTornDown)) {
347 Ptr = BackendAllocator.Allocate(&Cache, NeededSize, MinAlignment);
348 } else {
349 SpinMutexLock l(&FallbackMutex);
350 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
351 MinAlignment);
352 }
353 if (!Ptr)
354 return BackendAllocator.ReturnNullOrDie();
355
356 // If requested, we will zero out the entire contents of the returned chunk.
357 if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
358 memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
359
360 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
361 uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
362 if (!IsAligned(ChunkBeg, Alignment))
363 ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
364 CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
365 ScudoChunk *Chunk =
366 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
367 UnpackedHeader Header = {};
368 Header.State = ChunkAllocated;
369 Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
370 Header.AllocType = Type;
371 Header.RequestedSize = Size;
372 Header.Salt = static_cast<u16>(Prng.Next());
373 Chunk->storeHeader(&Header);
374 void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
375 // TODO(kostyak): hooks sound like a terrible idea security wise but might
376 // be needed for things to work properly?
377 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
378 return UserPtr;
379 }
380
381 // Deallocates a Chunk, which means adding it to the delayed free list (or
382 // Quarantine).
383 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
384 if (UNLIKELY(!ThreadInited))
385 initThread();
386 // TODO(kostyak): see hook comment above
387 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
388 if (!UserPtr)
389 return;
390 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
391 if (!IsAligned(ChunkBeg, MinAlignment)) {
392 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
393 "aligned at address %p\n", UserPtr);
394 }
395 ScudoChunk *Chunk =
396 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
397 UnpackedHeader OldHeader;
398 Chunk->loadHeader(&OldHeader);
399 if (OldHeader.State != ChunkAllocated) {
400 dieWithMessage("ERROR: invalid chunk state when deallocating address "
401 "%p\n", Chunk);
402 }
403 UnpackedHeader NewHeader = OldHeader;
404 NewHeader.State = ChunkQuarantine;
405 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
406 if (DeallocationTypeMismatch) {
407 // The deallocation type has to match the allocation one.
408 if (NewHeader.AllocType != Type) {
409 // With the exception of memalign'd Chunks, that can be still be free'd.
410 if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
411 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
412 Chunk);
413 }
414 }
415 }
416 uptr Size = NewHeader.RequestedSize;
417 if (DeleteSizeMismatch) {
418 if (DeleteSize && DeleteSize != Size) {
419 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
420 Chunk);
421 }
422 }
423 if (LIKELY(!ThreadTornDown)) {
424 AllocatorQuarantine.Put(&ThreadQuarantineCache,
425 QuarantineCallback(&Cache), Chunk, Size);
426 } else {
427 SpinMutexLock l(&FallbackMutex);
428 AllocatorQuarantine.Put(&FallbackQuarantineCache,
429 QuarantineCallback(&FallbackAllocatorCache),
430 Chunk, Size);
431 }
432 }
433
434 // Returns the actual usable size of a chunk. Since this requires loading the
435 // header, we will return it in the second parameter, as it can be required
436 // by the caller to perform additional processing.
437 uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
438 if (UNLIKELY(!ThreadInited))
439 initThread();
440 if (!Ptr)
441 return 0;
442 uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
443 ScudoChunk *Chunk =
444 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
445 Chunk->loadHeader(Header);
446 // Getting the usable size of a chunk only makes sense if it's allocated.
447 if (Header->State != ChunkAllocated) {
448 dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
449 "address %p\n", Chunk);
450 }
451 uptr Size =
452 BackendAllocator.GetActuallyAllocatedSize(Chunk->AllocBeg(Header));
453 // UsableSize works as malloc_usable_size, which is also what (AFAIU)
454 // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
455 // means we will return the size of the chunk from the user beginning to
456 // the end of the 'user' allocation, hence us subtracting the header size
457 // and the offset from the size.
458 if (Size == 0)
459 return Size;
460 return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
461 }
462
463 // Helper function that doesn't care about the header.
464 uptr getUsableSize(const void *Ptr) {
465 UnpackedHeader Header;
466 return getUsableSize(Ptr, &Header);
467 }
468
469 // Reallocates a chunk. We can save on a new allocation if the new requested
470 // size still fits in the chunk.
471 void *reallocate(void *OldPtr, uptr NewSize) {
472 if (UNLIKELY(!ThreadInited))
473 initThread();
474 UnpackedHeader OldHeader;
475 uptr Size = getUsableSize(OldPtr, &OldHeader);
476 uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
477 ScudoChunk *Chunk =
478 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
479 if (OldHeader.AllocType != FromMalloc) {
480 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
481 Chunk);
482 }
483 UnpackedHeader NewHeader = OldHeader;
484 // The new size still fits in the current chunk.
485 if (NewSize <= Size) {
486 NewHeader.RequestedSize = NewSize;
487 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
488 return OldPtr;
489 }
490 // Otherwise, we have to allocate a new chunk and copy the contents of the
491 // old one.
492 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
493 if (NewPtr) {
494 uptr OldSize = OldHeader.RequestedSize;
495 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
496 NewHeader.State = ChunkQuarantine;
497 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
498 if (LIKELY(!ThreadTornDown)) {
499 AllocatorQuarantine.Put(&ThreadQuarantineCache,
500 QuarantineCallback(&Cache), Chunk, OldSize);
501 } else {
502 SpinMutexLock l(&FallbackMutex);
503 AllocatorQuarantine.Put(&FallbackQuarantineCache,
504 QuarantineCallback(&FallbackAllocatorCache),
505 Chunk, OldSize);
506 }
507 }
508 return NewPtr;
509 }
510
511 void *calloc(uptr NMemB, uptr Size) {
512 if (UNLIKELY(!ThreadInited))
513 initThread();
514 uptr Total = NMemB * Size;
515 if (Size != 0 && Total / Size != NMemB) // Overflow check
516 return BackendAllocator.ReturnNullOrDie();
517 void *Ptr = allocate(Total, MinAlignment, FromMalloc);
518 // If ZeroContents, the content of the chunk has already been zero'd out.
519 if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
520 memset(Ptr, 0, getUsableSize(Ptr));
521 return Ptr;
522 }
523
524 void drainQuarantine() {
525 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
526 QuarantineCallback(&Cache));
527 }
528};
529
530static Allocator Instance(LINKER_INITIALIZED);
531
532static ScudoAllocator &getAllocator() {
533 return Instance.BackendAllocator;
534}
535
536void initAllocator(const AllocatorOptions &Options) {
537 Instance.init(Options);
538}
539
540void drainQuarantine() {
541 Instance.drainQuarantine();
542}
543
544void *scudoMalloc(uptr Size, AllocType Type) {
545 return Instance.allocate(Size, Allocator::MinAlignment, Type);
546}
547
548void scudoFree(void *Ptr, AllocType Type) {
549 Instance.deallocate(Ptr, 0, Type);
550}
551
552void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
553 Instance.deallocate(Ptr, Size, Type);
554}
555
556void *scudoRealloc(void *Ptr, uptr Size) {
557 if (!Ptr)
558 return Instance.allocate(Size, Allocator::MinAlignment, FromMalloc);
559 if (Size == 0) {
560 Instance.deallocate(Ptr, 0, FromMalloc);
561 return nullptr;
562 }
563 return Instance.reallocate(Ptr, Size);
564}
565
566void *scudoCalloc(uptr NMemB, uptr Size) {
567 return Instance.calloc(NMemB, Size);
568}
569
570void *scudoValloc(uptr Size) {
571 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
572}
573
574void *scudoMemalign(uptr Alignment, uptr Size) {
575 return Instance.allocate(Size, Alignment, FromMemalign);
576}
577
578void *scudoPvalloc(uptr Size) {
579 uptr PageSize = GetPageSizeCached();
580 Size = RoundUpTo(Size, PageSize);
581 if (Size == 0) {
582 // pvalloc(0) should allocate one page.
583 Size = PageSize;
584 }
585 return Instance.allocate(Size, PageSize, FromMemalign);
586}
587
588int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
589 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
590 return 0;
591}
592
593void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
594 // size must be a multiple of the alignment. To avoid a division, we first
595 // make sure that alignment is a power of 2.
596 CHECK(IsPowerOfTwo(Alignment));
597 CHECK_EQ((Size & (Alignment - 1)), 0);
598 return Instance.allocate(Size, Alignment, FromMalloc);
599}
600
601uptr scudoMallocUsableSize(void *Ptr) {
602 return Instance.getUsableSize(Ptr);
603}
604
605} // namespace __scudo
606
607using namespace __scudo;
608
609// MallocExtension helper functions
610
611uptr __sanitizer_get_current_allocated_bytes() {
612 uptr stats[AllocatorStatCount];
613 getAllocator().GetStats(stats);
614 return stats[AllocatorStatAllocated];
615}
616
617uptr __sanitizer_get_heap_size() {
618 uptr stats[AllocatorStatCount];
619 getAllocator().GetStats(stats);
620 return stats[AllocatorStatMapped];
621}
622
623uptr __sanitizer_get_free_bytes() {
624 return 1;
625}
626
627uptr __sanitizer_get_unmapped_bytes() {
628 return 1;
629}
630
631uptr __sanitizer_get_estimated_allocated_size(uptr size) {
632 return size;
633}
634
635int __sanitizer_get_ownership(const void *p) {
636 return Instance.getUsableSize(p) != 0;
637}
638
639uptr __sanitizer_get_allocated_size(const void *p) {
640 return Instance.getUsableSize(p);
641}