blob: 9de7cb64c371b7c29691caf349784cd13aa8abf8 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
25#include <smmintrin.h>
26
Kostya Serebryany712fc982016-06-07 01:20:26 +000027#include <cstring>
28
29namespace __scudo {
30
Kostya Serebryany15647b12016-08-25 20:23:08 +000031struct AP {
32 static const uptr kSpaceBeg = ~0ULL;
33 static const uptr kSpaceSize = 0x10000000000ULL;
34 static const uptr kMetadataSize = 0;
35 typedef DefaultSizeClassMap SizeClassMap;
36 typedef NoOpMapUnmapCallback MapUnmapCallback;
Kostya Serebryany7c5ae7c2016-08-26 00:06:03 +000037 static const uptr kFlags =
38 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany15647b12016-08-25 20:23:08 +000039};
40
41typedef SizeClassAllocator64<AP> PrimaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000042typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000043typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000044typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
45 ScudoAllocator;
46
47static ScudoAllocator &getAllocator();
48
49static thread_local Xorshift128Plus Prng;
50// Global static cookie, initialized at start-up.
51static u64 Cookie;
52
Kostya Serebryany712fc982016-06-07 01:20:26 +000053struct ScudoChunk : UnpackedHeader {
54 // We can't use the offset member of the chunk itself, as we would double
55 // fetch it without any warranty that it wouldn't have been tampered. To
56 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000057 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000058 return reinterpret_cast<void *>(
59 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
60 }
61
62 // CRC32 checksum of the Chunk pointer and its ChunkHeader.
63 // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000064 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Serebryany712fc982016-06-07 01:20:26 +000065 u64 HeaderHolder[2];
66 memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
67 u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
68 // This is somewhat of a shortcut. The checksum is stored in the 16 least
69 // significant bits of the first 8 bytes of the header, hence zero-ing
70 // those bits out. It would be more valid to zero the checksum field of the
71 // UnpackedHeader, but would require holding an additional copy of it.
72 Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
73 Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
74 return static_cast<u16>(Crc);
75 }
76
77 // Loads and unpacks the header, verifying the checksum in the process.
78 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
79 const AtomicPackedHeader *AtomicHeader =
80 reinterpret_cast<const AtomicPackedHeader *>(this);
81 PackedHeader NewPackedHeader =
82 AtomicHeader->load(std::memory_order_relaxed);
83 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
84 if ((NewUnpackedHeader->Unused_0_ != 0) ||
85 (NewUnpackedHeader->Unused_1_ != 0) ||
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000086 (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000087 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
88 }
89 }
90
91 // Packs and stores the header, computing the checksum in the process.
92 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000093 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +000094 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
95 AtomicPackedHeader *AtomicHeader =
96 reinterpret_cast<AtomicPackedHeader *>(this);
97 AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
98 }
99
100 // Packs and stores the header, computing the checksum in the process. We
101 // compare the current header with the expected provided one to ensure that
102 // we are not being raced by a corruption occurring in another thread.
103 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
104 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000105 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000106 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
107 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
108 AtomicPackedHeader *AtomicHeader =
109 reinterpret_cast<AtomicPackedHeader *>(this);
110 if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
111 NewPackedHeader,
112 std::memory_order_relaxed,
113 std::memory_order_relaxed)) {
114 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
115 }
116 }
117};
118
119static bool ScudoInitIsRunning = false;
120
121static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
122static pthread_key_t pkey;
123
124static thread_local bool ThreadInited = false;
125static thread_local bool ThreadTornDown = false;
126static thread_local AllocatorCache Cache;
127
128static void teardownThread(void *p) {
129 uptr v = reinterpret_cast<uptr>(p);
130 // The glibc POSIX thread-local-storage deallocation routine calls user
131 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
132 // We want to be called last since other destructors might call free and the
133 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
134 // quarantine and swallowing the cache.
135 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
136 pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
137 return;
138 }
139 drainQuarantine();
140 getAllocator().DestroyCache(&Cache);
141 ThreadTornDown = true;
142}
143
144static void initInternal() {
145 SanitizerToolName = "Scudo";
146 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
147 ScudoInitIsRunning = true;
148
149 initFlags();
150
151 AllocatorOptions Options;
152 Options.setFrom(getFlags(), common_flags());
153 initAllocator(Options);
154
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000155 MaybeStartBackgroudThread();
156
Kostya Serebryany712fc982016-06-07 01:20:26 +0000157 ScudoInitIsRunning = false;
158}
159
160static void initGlobal() {
161 pthread_key_create(&pkey, teardownThread);
162 initInternal();
163}
164
165static void NOINLINE initThread() {
166 pthread_once(&GlobalInited, initGlobal);
167 pthread_setspecific(pkey, reinterpret_cast<void *>(1));
168 getAllocator().InitCache(&Cache);
169 ThreadInited = true;
170}
171
172struct QuarantineCallback {
173 explicit QuarantineCallback(AllocatorCache *Cache)
174 : Cache_(Cache) {}
175
176 // Chunk recycling function, returns a quarantined chunk to the backend.
177 void Recycle(ScudoChunk *Chunk) {
178 UnpackedHeader Header;
179 Chunk->loadHeader(&Header);
180 if (Header.State != ChunkQuarantine) {
181 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
182 Chunk);
183 }
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000184 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000185 getAllocator().Deallocate(Cache_, Ptr);
186 }
187
188 /// Internal quarantine allocation and deallocation functions.
189 void *Allocate(uptr Size) {
190 // The internal quarantine memory cannot be protected by us. But the only
191 // structures allocated are QuarantineBatch, that are 8KB for x64. So we
192 // will use mmap for those, and given that Deallocate doesn't pass a size
193 // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
194 // TODO(kostyak): switching to mmap impacts greatly performances, we have
195 // to find another solution
196 // CHECK_EQ(Size, sizeof(QuarantineBatch));
197 // return MmapOrDie(Size, "QuarantineBatch");
198 return getAllocator().Allocate(Cache_, Size, 1, false);
199 }
200
201 void Deallocate(void *Ptr) {
202 // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
203 getAllocator().Deallocate(Cache_, Ptr);
204 }
205
206 AllocatorCache *Cache_;
207};
208
209typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
210typedef ScudoQuarantine::Cache QuarantineCache;
211static thread_local QuarantineCache ThreadQuarantineCache;
212
213void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
214 MayReturnNull = cf->allocator_may_return_null;
215 QuarantineSizeMb = f->QuarantineSizeMb;
216 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
217 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
218 DeleteSizeMismatch = f->DeleteSizeMismatch;
219 ZeroContents = f->ZeroContents;
220}
221
222void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
223 cf->allocator_may_return_null = MayReturnNull;
224 f->QuarantineSizeMb = QuarantineSizeMb;
225 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
226 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
227 f->DeleteSizeMismatch = DeleteSizeMismatch;
228 f->ZeroContents = ZeroContents;
229}
230
231struct Allocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000232 static const uptr MaxAllowedMallocSize =
233 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000234
235 ScudoAllocator BackendAllocator;
236 ScudoQuarantine AllocatorQuarantine;
237
238 // The fallback caches are used when the thread local caches have been
239 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
240 // be accessed by different threads.
241 StaticSpinMutex FallbackMutex;
242 AllocatorCache FallbackAllocatorCache;
243 QuarantineCache FallbackQuarantineCache;
244
245 bool DeallocationTypeMismatch;
246 bool ZeroContents;
247 bool DeleteSizeMismatch;
248
249 explicit Allocator(LinkerInitialized)
250 : AllocatorQuarantine(LINKER_INITIALIZED),
251 FallbackQuarantineCache(LINKER_INITIALIZED) {}
252
253 void init(const AllocatorOptions &Options) {
254 // Currently SSE 4.2 support is required. This might change later.
255 CHECK(testCPUFeature(SSE4_2)); // for crc32
256
257 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000258 // case of the Secondary allocator, it takes care of alignment and the
259 // offset will always be 0. In the case of the Primary, the worst case
260 // scenario happens in the last size class, when the backend allocation
261 // would already be aligned on the requested alignment, which would happen
262 // to be the maximum alignment that would fit in that size class. As a
263 // result, the maximum offset will be at most the maximum alignment for the
264 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000265 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000266 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
267 PrimaryAllocator::SizeClassMap::kMaxSize - MinAlignment);
268 uptr MaximumOffset = (MaxPrimaryAlignment - ChunkHeaderSize) >>
269 MinAlignmentLog;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000270 Header.Offset = MaximumOffset;
271 if (Header.Offset != MaximumOffset) {
272 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
273 "header\n");
274 }
275
276 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
277 DeleteSizeMismatch = Options.DeleteSizeMismatch;
278 ZeroContents = Options.ZeroContents;
279 BackendAllocator.Init(Options.MayReturnNull);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000280 AllocatorQuarantine.Init(
281 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
282 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000283 BackendAllocator.InitCache(&FallbackAllocatorCache);
284 Cookie = Prng.Next();
285 }
286
287 // Allocates a chunk.
288 void *allocate(uptr Size, uptr Alignment, AllocType Type) {
289 if (UNLIKELY(!ThreadInited))
290 initThread();
291 if (!IsPowerOfTwo(Alignment)) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000292 dieWithMessage("ERROR: alignment is not a power of 2\n");
Kostya Serebryany712fc982016-06-07 01:20:26 +0000293 }
294 if (Alignment > MaxAlignment)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000295 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000296 if (Alignment < MinAlignment)
297 Alignment = MinAlignment;
298 if (Size == 0)
299 Size = 1;
300 if (Size >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000301 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000302 uptr RoundedSize = RoundUpTo(Size, MinAlignment);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000303 uptr NeededSize = RoundedSize + ChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000304 if (Alignment > MinAlignment)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000305 NeededSize += Alignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000306 if (NeededSize >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000307 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000308 bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000309
310 void *Ptr;
311 if (LIKELY(!ThreadTornDown)) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000312 Ptr = BackendAllocator.Allocate(&Cache, NeededSize,
313 FromPrimary ? MinAlignment : Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000314 } else {
315 SpinMutexLock l(&FallbackMutex);
316 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000317 FromPrimary ? MinAlignment : Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000318 }
319 if (!Ptr)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000320 return BackendAllocator.ReturnNullOrDieOnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000321
322 // If requested, we will zero out the entire contents of the returned chunk.
323 if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
324 memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
325
326 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000327 // If the allocation was serviced by the secondary, the returned pointer
328 // accounts for ChunkHeaderSize to pass the alignment check of the combined
329 // allocator. Adjust it here.
330 if (!FromPrimary)
331 AllocBeg -= ChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000332 uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
333 if (!IsAligned(ChunkBeg, Alignment))
334 ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
335 CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
336 ScudoChunk *Chunk =
337 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
338 UnpackedHeader Header = {};
339 Header.State = ChunkAllocated;
340 Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
341 Header.AllocType = Type;
342 Header.RequestedSize = Size;
343 Header.Salt = static_cast<u16>(Prng.Next());
344 Chunk->storeHeader(&Header);
345 void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
346 // TODO(kostyak): hooks sound like a terrible idea security wise but might
347 // be needed for things to work properly?
348 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
349 return UserPtr;
350 }
351
352 // Deallocates a Chunk, which means adding it to the delayed free list (or
353 // Quarantine).
354 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
355 if (UNLIKELY(!ThreadInited))
356 initThread();
357 // TODO(kostyak): see hook comment above
358 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
359 if (!UserPtr)
360 return;
361 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
362 if (!IsAligned(ChunkBeg, MinAlignment)) {
363 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
364 "aligned at address %p\n", UserPtr);
365 }
366 ScudoChunk *Chunk =
367 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
368 UnpackedHeader OldHeader;
369 Chunk->loadHeader(&OldHeader);
370 if (OldHeader.State != ChunkAllocated) {
371 dieWithMessage("ERROR: invalid chunk state when deallocating address "
372 "%p\n", Chunk);
373 }
374 UnpackedHeader NewHeader = OldHeader;
375 NewHeader.State = ChunkQuarantine;
376 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
377 if (DeallocationTypeMismatch) {
378 // The deallocation type has to match the allocation one.
379 if (NewHeader.AllocType != Type) {
380 // With the exception of memalign'd Chunks, that can be still be free'd.
381 if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
382 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
383 Chunk);
384 }
385 }
386 }
387 uptr Size = NewHeader.RequestedSize;
388 if (DeleteSizeMismatch) {
389 if (DeleteSize && DeleteSize != Size) {
390 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
391 Chunk);
392 }
393 }
394 if (LIKELY(!ThreadTornDown)) {
395 AllocatorQuarantine.Put(&ThreadQuarantineCache,
396 QuarantineCallback(&Cache), Chunk, Size);
397 } else {
398 SpinMutexLock l(&FallbackMutex);
399 AllocatorQuarantine.Put(&FallbackQuarantineCache,
400 QuarantineCallback(&FallbackAllocatorCache),
401 Chunk, Size);
402 }
403 }
404
405 // Returns the actual usable size of a chunk. Since this requires loading the
406 // header, we will return it in the second parameter, as it can be required
407 // by the caller to perform additional processing.
408 uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
409 if (UNLIKELY(!ThreadInited))
410 initThread();
411 if (!Ptr)
412 return 0;
413 uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
414 ScudoChunk *Chunk =
415 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
416 Chunk->loadHeader(Header);
417 // Getting the usable size of a chunk only makes sense if it's allocated.
418 if (Header->State != ChunkAllocated) {
419 dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
420 "address %p\n", Chunk);
421 }
422 uptr Size =
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000423 BackendAllocator.GetActuallyAllocatedSize(Chunk->getAllocBeg(Header));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000424 // UsableSize works as malloc_usable_size, which is also what (AFAIU)
425 // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
426 // means we will return the size of the chunk from the user beginning to
427 // the end of the 'user' allocation, hence us subtracting the header size
428 // and the offset from the size.
429 if (Size == 0)
430 return Size;
431 return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
432 }
433
434 // Helper function that doesn't care about the header.
435 uptr getUsableSize(const void *Ptr) {
436 UnpackedHeader Header;
437 return getUsableSize(Ptr, &Header);
438 }
439
440 // Reallocates a chunk. We can save on a new allocation if the new requested
441 // size still fits in the chunk.
442 void *reallocate(void *OldPtr, uptr NewSize) {
443 if (UNLIKELY(!ThreadInited))
444 initThread();
445 UnpackedHeader OldHeader;
446 uptr Size = getUsableSize(OldPtr, &OldHeader);
447 uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
448 ScudoChunk *Chunk =
449 reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
450 if (OldHeader.AllocType != FromMalloc) {
451 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
452 Chunk);
453 }
454 UnpackedHeader NewHeader = OldHeader;
455 // The new size still fits in the current chunk.
456 if (NewSize <= Size) {
457 NewHeader.RequestedSize = NewSize;
458 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
459 return OldPtr;
460 }
461 // Otherwise, we have to allocate a new chunk and copy the contents of the
462 // old one.
463 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
464 if (NewPtr) {
465 uptr OldSize = OldHeader.RequestedSize;
466 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
467 NewHeader.State = ChunkQuarantine;
468 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
469 if (LIKELY(!ThreadTornDown)) {
470 AllocatorQuarantine.Put(&ThreadQuarantineCache,
471 QuarantineCallback(&Cache), Chunk, OldSize);
472 } else {
473 SpinMutexLock l(&FallbackMutex);
474 AllocatorQuarantine.Put(&FallbackQuarantineCache,
475 QuarantineCallback(&FallbackAllocatorCache),
476 Chunk, OldSize);
477 }
478 }
479 return NewPtr;
480 }
481
482 void *calloc(uptr NMemB, uptr Size) {
483 if (UNLIKELY(!ThreadInited))
484 initThread();
485 uptr Total = NMemB * Size;
486 if (Size != 0 && Total / Size != NMemB) // Overflow check
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000487 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000488 void *Ptr = allocate(Total, MinAlignment, FromMalloc);
489 // If ZeroContents, the content of the chunk has already been zero'd out.
490 if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
491 memset(Ptr, 0, getUsableSize(Ptr));
492 return Ptr;
493 }
494
495 void drainQuarantine() {
496 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
497 QuarantineCallback(&Cache));
498 }
499};
500
501static Allocator Instance(LINKER_INITIALIZED);
502
503static ScudoAllocator &getAllocator() {
504 return Instance.BackendAllocator;
505}
506
507void initAllocator(const AllocatorOptions &Options) {
508 Instance.init(Options);
509}
510
511void drainQuarantine() {
512 Instance.drainQuarantine();
513}
514
515void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000516 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000517}
518
519void scudoFree(void *Ptr, AllocType Type) {
520 Instance.deallocate(Ptr, 0, Type);
521}
522
523void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
524 Instance.deallocate(Ptr, Size, Type);
525}
526
527void *scudoRealloc(void *Ptr, uptr Size) {
528 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000529 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000530 if (Size == 0) {
531 Instance.deallocate(Ptr, 0, FromMalloc);
532 return nullptr;
533 }
534 return Instance.reallocate(Ptr, Size);
535}
536
537void *scudoCalloc(uptr NMemB, uptr Size) {
538 return Instance.calloc(NMemB, Size);
539}
540
541void *scudoValloc(uptr Size) {
542 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
543}
544
545void *scudoMemalign(uptr Alignment, uptr Size) {
546 return Instance.allocate(Size, Alignment, FromMemalign);
547}
548
549void *scudoPvalloc(uptr Size) {
550 uptr PageSize = GetPageSizeCached();
551 Size = RoundUpTo(Size, PageSize);
552 if (Size == 0) {
553 // pvalloc(0) should allocate one page.
554 Size = PageSize;
555 }
556 return Instance.allocate(Size, PageSize, FromMemalign);
557}
558
559int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
560 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
561 return 0;
562}
563
564void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
565 // size must be a multiple of the alignment. To avoid a division, we first
566 // make sure that alignment is a power of 2.
567 CHECK(IsPowerOfTwo(Alignment));
568 CHECK_EQ((Size & (Alignment - 1)), 0);
569 return Instance.allocate(Size, Alignment, FromMalloc);
570}
571
572uptr scudoMallocUsableSize(void *Ptr) {
573 return Instance.getUsableSize(Ptr);
574}
575
576} // namespace __scudo
577
578using namespace __scudo;
579
580// MallocExtension helper functions
581
582uptr __sanitizer_get_current_allocated_bytes() {
583 uptr stats[AllocatorStatCount];
584 getAllocator().GetStats(stats);
585 return stats[AllocatorStatAllocated];
586}
587
588uptr __sanitizer_get_heap_size() {
589 uptr stats[AllocatorStatCount];
590 getAllocator().GetStats(stats);
591 return stats[AllocatorStatMapped];
592}
593
594uptr __sanitizer_get_free_bytes() {
595 return 1;
596}
597
598uptr __sanitizer_get_unmapped_bytes() {
599 return 1;
600}
601
602uptr __sanitizer_get_estimated_allocated_size(uptr size) {
603 return size;
604}
605
606int __sanitizer_get_ownership(const void *p) {
607 return Instance.getUsableSize(p) != 0;
608}
609
610uptr __sanitizer_get_allocated_size(const void *p) {
611 return Instance.getUsableSize(p);
612}