blob: b5d8fc8bf879f49d699d1b7c818ad79597c9315b [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
25#include <smmintrin.h>
26
27#include <atomic>
28#include <cstring>
29
30namespace __scudo {
31
Kostya Serebryany712fc982016-06-07 01:20:26 +000032const uptr MinAlignmentLog = 4; // 16 bytes for x64
33const uptr MaxAlignmentLog = 24;
34
Kostya Serebryany15647b12016-08-25 20:23:08 +000035struct AP {
36 static const uptr kSpaceBeg = ~0ULL;
37 static const uptr kSpaceSize = 0x10000000000ULL;
38 static const uptr kMetadataSize = 0;
39 typedef DefaultSizeClassMap SizeClassMap;
40 typedef NoOpMapUnmapCallback MapUnmapCallback;
41};
42
43typedef SizeClassAllocator64<AP> PrimaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000044typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
45typedef LargeMmapAllocator<> SecondaryAllocator;
46typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
47 ScudoAllocator;
48
49static ScudoAllocator &getAllocator();
50
51static thread_local Xorshift128Plus Prng;
52// Global static cookie, initialized at start-up.
53static u64 Cookie;
54
55enum ChunkState : u8 {
56 ChunkAvailable = 0,
57 ChunkAllocated = 1,
58 ChunkQuarantine = 2
59};
60
61typedef unsigned __int128 PackedHeader;
62typedef std::atomic<PackedHeader> AtomicPackedHeader;
63
64// Our header requires 128-bit of storage on x64 (the only platform supported
65// as of now), which fits nicely with the alignment requirements.
66// Having the offset saves us from using functions such as GetBlockBegin, that
67// is fairly costly. Our first implementation used the MetaData as well, which
68// offers the advantage of being stored away from the chunk itself, but
69// accessing it was costly as well.
70// The header will be atomically loaded and stored using the 16-byte primitives
71// offered by the platform (likely requires cmpxchg16b support).
72struct UnpackedHeader {
73 // 1st 8 bytes
74 u16 Checksum : 16;
75 u64 RequestedSize : 40; // Needed for reallocation purposes.
76 u8 State : 2; // available, allocated, or quarantined
77 u8 AllocType : 2; // malloc, new, new[], or memalign
78 u8 Unused_0_ : 4;
79 // 2nd 8 bytes
80 u64 Offset : 20; // Offset from the beginning of the backend
81 // allocation to the beginning chunk itself, in
82 // multiples of MinAlignment. See comment about its
Kostya Serebryany707894b2016-08-02 22:25:38 +000083 // maximum value and test in init().
Kostya Serebryany712fc982016-06-07 01:20:26 +000084 u64 Unused_1_ : 28;
85 u16 Salt : 16;
86};
87
88COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
89
90const uptr ChunkHeaderSize = sizeof(PackedHeader);
91
92struct ScudoChunk : UnpackedHeader {
93 // We can't use the offset member of the chunk itself, as we would double
94 // fetch it without any warranty that it wouldn't have been tampered. To
95 // prevent this, we work with a local copy of the header.
96 void *AllocBeg(UnpackedHeader *Header) {
97 return reinterpret_cast<void *>(
98 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
99 }
100
101 // CRC32 checksum of the Chunk pointer and its ChunkHeader.
102 // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
103 u16 Checksum(UnpackedHeader *Header) const {
104 u64 HeaderHolder[2];
105 memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
106 u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
107 // This is somewhat of a shortcut. The checksum is stored in the 16 least
108 // significant bits of the first 8 bytes of the header, hence zero-ing
109 // those bits out. It would be more valid to zero the checksum field of the
110 // UnpackedHeader, but would require holding an additional copy of it.
111 Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
112 Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
113 return static_cast<u16>(Crc);
114 }
115
116 // Loads and unpacks the header, verifying the checksum in the process.
117 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
118 const AtomicPackedHeader *AtomicHeader =
119 reinterpret_cast<const AtomicPackedHeader *>(this);
120 PackedHeader NewPackedHeader =
121 AtomicHeader->load(std::memory_order_relaxed);
122 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
123 if ((NewUnpackedHeader->Unused_0_ != 0) ||
124 (NewUnpackedHeader->Unused_1_ != 0) ||
125 (NewUnpackedHeader->Checksum != Checksum(NewUnpackedHeader))) {
126 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
127 }
128 }
129
130 // Packs and stores the header, computing the checksum in the process.
131 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
132 NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
133 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
134 AtomicPackedHeader *AtomicHeader =
135 reinterpret_cast<AtomicPackedHeader *>(this);
136 AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
137 }
138
139 // Packs and stores the header, computing the checksum in the process. We
140 // compare the current header with the expected provided one to ensure that
141 // we are not being raced by a corruption occurring in another thread.
142 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
143 UnpackedHeader *OldUnpackedHeader) {
144 NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
145 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
146 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
147 AtomicPackedHeader *AtomicHeader =
148 reinterpret_cast<AtomicPackedHeader *>(this);
149 if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
150 NewPackedHeader,
151 std::memory_order_relaxed,
152 std::memory_order_relaxed)) {
153 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
154 }
155 }
156};
157
158static bool ScudoInitIsRunning = false;
159
160static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
161static pthread_key_t pkey;
162
163static thread_local bool ThreadInited = false;
164static thread_local bool ThreadTornDown = false;
165static thread_local AllocatorCache Cache;
166
167static void teardownThread(void *p) {
168 uptr v = reinterpret_cast<uptr>(p);
169 // The glibc POSIX thread-local-storage deallocation routine calls user
170 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
171 // We want to be called last since other destructors might call free and the
172 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
173 // quarantine and swallowing the cache.
174 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
175 pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
176 return;
177 }
178 drainQuarantine();
179 getAllocator().DestroyCache(&Cache);
180 ThreadTornDown = true;
181}
182
183static void initInternal() {
184 SanitizerToolName = "Scudo";
185 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
186 ScudoInitIsRunning = true;
187
188 initFlags();
189
190 AllocatorOptions Options;
191 Options.setFrom(getFlags(), common_flags());
192 initAllocator(Options);
193
194 ScudoInitIsRunning = false;
195}
196
197static void initGlobal() {
198 pthread_key_create(&pkey, teardownThread);
199 initInternal();
200}
201
202static void NOINLINE initThread() {
203 pthread_once(&GlobalInited, initGlobal);
204 pthread_setspecific(pkey, reinterpret_cast<void *>(1));
205 getAllocator().InitCache(&Cache);
206 ThreadInited = true;
207}
208
209struct QuarantineCallback {
210 explicit QuarantineCallback(AllocatorCache *Cache)
211 : Cache_(Cache) {}
212
213 // Chunk recycling function, returns a quarantined chunk to the backend.
214 void Recycle(ScudoChunk *Chunk) {
215 UnpackedHeader Header;
216 Chunk->loadHeader(&Header);
217 if (Header.State != ChunkQuarantine) {
218 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
219 Chunk);
220 }
221 void *Ptr = Chunk->AllocBeg(&Header);
222 getAllocator().Deallocate(Cache_, Ptr);
223 }
224
225 /// Internal quarantine allocation and deallocation functions.
226 void *Allocate(uptr Size) {
227 // The internal quarantine memory cannot be protected by us. But the only
228 // structures allocated are QuarantineBatch, that are 8KB for x64. So we
229 // will use mmap for those, and given that Deallocate doesn't pass a size
230 // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
231 // TODO(kostyak): switching to mmap impacts greatly performances, we have
232 // to find another solution
233 // CHECK_EQ(Size, sizeof(QuarantineBatch));
234 // return MmapOrDie(Size, "QuarantineBatch");
235 return getAllocator().Allocate(Cache_, Size, 1, false);
236 }
237
238 void Deallocate(void *Ptr) {
239 // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
240 getAllocator().Deallocate(Cache_, Ptr);
241 }
242
243 AllocatorCache *Cache_;
244};
245
246typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
247typedef ScudoQuarantine::Cache QuarantineCache;
248static thread_local QuarantineCache ThreadQuarantineCache;
249
250void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
251 MayReturnNull = cf->allocator_may_return_null;
252 QuarantineSizeMb = f->QuarantineSizeMb;
253 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
254 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
255 DeleteSizeMismatch = f->DeleteSizeMismatch;
256 ZeroContents = f->ZeroContents;
257}
258
259void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
260 cf->allocator_may_return_null = MayReturnNull;
261 f->QuarantineSizeMb = QuarantineSizeMb;
262 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
263 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
264 f->DeleteSizeMismatch = DeleteSizeMismatch;
265 f->ZeroContents = ZeroContents;
266}
267
268struct Allocator {
269 static const uptr MaxAllowedMallocSize = 1ULL << 40;
270 static const uptr MinAlignment = 1 << MinAlignmentLog;
271 static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB
272
273 ScudoAllocator BackendAllocator;
274 ScudoQuarantine AllocatorQuarantine;
275
276 // The fallback caches are used when the thread local caches have been
277 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
278 // be accessed by different threads.
279 StaticSpinMutex FallbackMutex;
280 AllocatorCache FallbackAllocatorCache;
281 QuarantineCache FallbackQuarantineCache;
282
283 bool DeallocationTypeMismatch;
284 bool ZeroContents;
285 bool DeleteSizeMismatch;
286
287 explicit Allocator(LinkerInitialized)
288 : AllocatorQuarantine(LINKER_INITIALIZED),
289 FallbackQuarantineCache(LINKER_INITIALIZED) {}
290
291 void init(const AllocatorOptions &Options) {
292 // Currently SSE 4.2 support is required. This might change later.
293 CHECK(testCPUFeature(SSE4_2)); // for crc32
294
295 // Verify that the header offset field can hold the maximum offset. In the
296 // worst case scenario, the backend allocation is already aligned on
297 // MaxAlignment, so in order to store the header and still be aligned, we
298 // add an extra MaxAlignment. As a result, the offset from the beginning of
299 // the backend allocation to the chunk will be MaxAlignment -
300 // ChunkHeaderSize.
301 UnpackedHeader Header = {};
302 uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog;
303 Header.Offset = MaximumOffset;
304 if (Header.Offset != MaximumOffset) {
305 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
306 "header\n");
307 }
308
309 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
310 DeleteSizeMismatch = Options.DeleteSizeMismatch;
311 ZeroContents = Options.ZeroContents;
312 BackendAllocator.Init(Options.MayReturnNull);
313 AllocatorQuarantine.Init(static_cast<uptr>(Options.QuarantineSizeMb) << 20,
314 static_cast<uptr>(
315 Options.ThreadLocalQuarantineSizeKb) << 10);
316 BackendAllocator.InitCache(&FallbackAllocatorCache);
317 Cookie = Prng.Next();
318 }
319
320 // Allocates a chunk.
321 void *allocate(uptr Size, uptr Alignment, AllocType Type) {
322 if (UNLIKELY(!ThreadInited))
323 initThread();
324 if (!IsPowerOfTwo(Alignment)) {
325 dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
326 }
327 if (Alignment > MaxAlignment)
328 return BackendAllocator.ReturnNullOrDie();
329 if (Alignment < MinAlignment)
330 Alignment = MinAlignment;
331 if (Size == 0)
332 Size = 1;
333 if (Size >= MaxAllowedMallocSize)
334 return BackendAllocator.ReturnNullOrDie();
335 uptr RoundedSize = RoundUpTo(Size, MinAlignment);
336 uptr ExtraBytes = ChunkHeaderSize;
337 if (Alignment > MinAlignment)
338 ExtraBytes += Alignment;
339 uptr NeededSize = RoundedSize + ExtraBytes;
340 if (NeededSize >= MaxAllowedMallocSize)
341 return BackendAllocator.ReturnNullOrDie();
342
343 void *Ptr;
344 if (LIKELY(!ThreadTornDown)) {
345 Ptr = BackendAllocator.Allocate(&Cache, NeededSize, MinAlignment);
346 } else {
347 SpinMutexLock l(&FallbackMutex);
348 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
349 MinAlignment);
350 }
351 if (!Ptr)
352 return BackendAllocator.ReturnNullOrDie();
353
354 // If requested, we will zero out the entire contents of the returned chunk.
355 if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
356 memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
357
358 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
359 uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
360 if (!IsAligned(ChunkBeg, Alignment))
361 ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
362 CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
363 ScudoChunk *Chunk =
364 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
365 UnpackedHeader Header = {};
366 Header.State = ChunkAllocated;
367 Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
368 Header.AllocType = Type;
369 Header.RequestedSize = Size;
370 Header.Salt = static_cast<u16>(Prng.Next());
371 Chunk->storeHeader(&Header);
372 void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
373 // TODO(kostyak): hooks sound like a terrible idea security wise but might
374 // be needed for things to work properly?
375 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
376 return UserPtr;
377 }
378
379 // Deallocates a Chunk, which means adding it to the delayed free list (or
380 // Quarantine).
381 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
382 if (UNLIKELY(!ThreadInited))
383 initThread();
384 // TODO(kostyak): see hook comment above
385 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
386 if (!UserPtr)
387 return;
388 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
389 if (!IsAligned(ChunkBeg, MinAlignment)) {
390 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
391 "aligned at address %p\n", UserPtr);
392 }
393 ScudoChunk *Chunk =
394 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
395 UnpackedHeader OldHeader;
396 Chunk->loadHeader(&OldHeader);
397 if (OldHeader.State != ChunkAllocated) {
398 dieWithMessage("ERROR: invalid chunk state when deallocating address "
399 "%p\n", Chunk);
400 }
401 UnpackedHeader NewHeader = OldHeader;
402 NewHeader.State = ChunkQuarantine;
403 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
404 if (DeallocationTypeMismatch) {
405 // The deallocation type has to match the allocation one.
406 if (NewHeader.AllocType != Type) {
407 // With the exception of memalign'd Chunks, that can be still be free'd.
408 if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
409 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
410 Chunk);
411 }
412 }
413 }
414 uptr Size = NewHeader.RequestedSize;
415 if (DeleteSizeMismatch) {
416 if (DeleteSize && DeleteSize != Size) {
417 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
418 Chunk);
419 }
420 }
421 if (LIKELY(!ThreadTornDown)) {
422 AllocatorQuarantine.Put(&ThreadQuarantineCache,
423 QuarantineCallback(&Cache), Chunk, Size);
424 } else {
425 SpinMutexLock l(&FallbackMutex);
426 AllocatorQuarantine.Put(&FallbackQuarantineCache,
427 QuarantineCallback(&FallbackAllocatorCache),
428 Chunk, Size);
429 }
430 }
431
432 // Returns the actual usable size of a chunk. Since this requires loading the
433 // header, we will return it in the second parameter, as it can be required
434 // by the caller to perform additional processing.
435 uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
436 if (UNLIKELY(!ThreadInited))
437 initThread();
438 if (!Ptr)
439 return 0;
440 uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
441 ScudoChunk *Chunk =
442 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
443 Chunk->loadHeader(Header);
444 // Getting the usable size of a chunk only makes sense if it's allocated.
445 if (Header->State != ChunkAllocated) {
446 dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
447 "address %p\n", Chunk);
448 }
449 uptr Size =
450 BackendAllocator.GetActuallyAllocatedSize(Chunk->AllocBeg(Header));
451 // UsableSize works as malloc_usable_size, which is also what (AFAIU)
452 // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
453 // means we will return the size of the chunk from the user beginning to
454 // the end of the 'user' allocation, hence us subtracting the header size
455 // and the offset from the size.
456 if (Size == 0)
457 return Size;
458 return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
459 }
460
461 // Helper function that doesn't care about the header.
462 uptr getUsableSize(const void *Ptr) {
463 UnpackedHeader Header;
464 return getUsableSize(Ptr, &Header);
465 }
466
467 // Reallocates a chunk. We can save on a new allocation if the new requested
468 // size still fits in the chunk.
469 void *reallocate(void *OldPtr, uptr NewSize) {
470 if (UNLIKELY(!ThreadInited))
471 initThread();
472 UnpackedHeader OldHeader;
473 uptr Size = getUsableSize(OldPtr, &OldHeader);
474 uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
475 ScudoChunk *Chunk =
476 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
477 if (OldHeader.AllocType != FromMalloc) {
478 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
479 Chunk);
480 }
481 UnpackedHeader NewHeader = OldHeader;
482 // The new size still fits in the current chunk.
483 if (NewSize <= Size) {
484 NewHeader.RequestedSize = NewSize;
485 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
486 return OldPtr;
487 }
488 // Otherwise, we have to allocate a new chunk and copy the contents of the
489 // old one.
490 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
491 if (NewPtr) {
492 uptr OldSize = OldHeader.RequestedSize;
493 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
494 NewHeader.State = ChunkQuarantine;
495 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
496 if (LIKELY(!ThreadTornDown)) {
497 AllocatorQuarantine.Put(&ThreadQuarantineCache,
498 QuarantineCallback(&Cache), Chunk, OldSize);
499 } else {
500 SpinMutexLock l(&FallbackMutex);
501 AllocatorQuarantine.Put(&FallbackQuarantineCache,
502 QuarantineCallback(&FallbackAllocatorCache),
503 Chunk, OldSize);
504 }
505 }
506 return NewPtr;
507 }
508
509 void *calloc(uptr NMemB, uptr Size) {
510 if (UNLIKELY(!ThreadInited))
511 initThread();
512 uptr Total = NMemB * Size;
513 if (Size != 0 && Total / Size != NMemB) // Overflow check
514 return BackendAllocator.ReturnNullOrDie();
515 void *Ptr = allocate(Total, MinAlignment, FromMalloc);
516 // If ZeroContents, the content of the chunk has already been zero'd out.
517 if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
518 memset(Ptr, 0, getUsableSize(Ptr));
519 return Ptr;
520 }
521
522 void drainQuarantine() {
523 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
524 QuarantineCallback(&Cache));
525 }
526};
527
528static Allocator Instance(LINKER_INITIALIZED);
529
530static ScudoAllocator &getAllocator() {
531 return Instance.BackendAllocator;
532}
533
534void initAllocator(const AllocatorOptions &Options) {
535 Instance.init(Options);
536}
537
538void drainQuarantine() {
539 Instance.drainQuarantine();
540}
541
542void *scudoMalloc(uptr Size, AllocType Type) {
543 return Instance.allocate(Size, Allocator::MinAlignment, Type);
544}
545
546void scudoFree(void *Ptr, AllocType Type) {
547 Instance.deallocate(Ptr, 0, Type);
548}
549
550void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
551 Instance.deallocate(Ptr, Size, Type);
552}
553
554void *scudoRealloc(void *Ptr, uptr Size) {
555 if (!Ptr)
556 return Instance.allocate(Size, Allocator::MinAlignment, FromMalloc);
557 if (Size == 0) {
558 Instance.deallocate(Ptr, 0, FromMalloc);
559 return nullptr;
560 }
561 return Instance.reallocate(Ptr, Size);
562}
563
564void *scudoCalloc(uptr NMemB, uptr Size) {
565 return Instance.calloc(NMemB, Size);
566}
567
568void *scudoValloc(uptr Size) {
569 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
570}
571
572void *scudoMemalign(uptr Alignment, uptr Size) {
573 return Instance.allocate(Size, Alignment, FromMemalign);
574}
575
576void *scudoPvalloc(uptr Size) {
577 uptr PageSize = GetPageSizeCached();
578 Size = RoundUpTo(Size, PageSize);
579 if (Size == 0) {
580 // pvalloc(0) should allocate one page.
581 Size = PageSize;
582 }
583 return Instance.allocate(Size, PageSize, FromMemalign);
584}
585
586int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
587 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
588 return 0;
589}
590
591void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
592 // size must be a multiple of the alignment. To avoid a division, we first
593 // make sure that alignment is a power of 2.
594 CHECK(IsPowerOfTwo(Alignment));
595 CHECK_EQ((Size & (Alignment - 1)), 0);
596 return Instance.allocate(Size, Alignment, FromMalloc);
597}
598
599uptr scudoMallocUsableSize(void *Ptr) {
600 return Instance.getUsableSize(Ptr);
601}
602
603} // namespace __scudo
604
605using namespace __scudo;
606
607// MallocExtension helper functions
608
609uptr __sanitizer_get_current_allocated_bytes() {
610 uptr stats[AllocatorStatCount];
611 getAllocator().GetStats(stats);
612 return stats[AllocatorStatAllocated];
613}
614
615uptr __sanitizer_get_heap_size() {
616 uptr stats[AllocatorStatCount];
617 getAllocator().GetStats(stats);
618 return stats[AllocatorStatMapped];
619}
620
621uptr __sanitizer_get_free_bytes() {
622 return 1;
623}
624
625uptr __sanitizer_get_unmapped_bytes() {
626 return 1;
627}
628
629uptr __sanitizer_get_estimated_allocated_size(uptr size) {
630 return size;
631}
632
633int __sanitizer_get_ownership(const void *p) {
634 return Instance.getUsableSize(p) != 0;
635}
636
637uptr __sanitizer_get_allocated_size(const void *p) {
638 return Instance.getUsableSize(p);
639}