blob: 890f8aef373694bed0fe3707c5e1f72fa019577a [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000025
Kostya Serebryany712fc982016-06-07 01:20:26 +000026#include <cstring>
27
28namespace __scudo {
29
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000030#if SANITIZER_CAN_USE_ALLOCATOR64
31const uptr AllocatorSpace = ~0ULL;
32const uptr AllocatorSize = 0x40000000000ULL;
33typedef DefaultSizeClassMap SizeClassMap;
Kostya Serebryany15647b12016-08-25 20:23:08 +000034struct AP {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000035 static const uptr kSpaceBeg = AllocatorSpace;
36 static const uptr kSpaceSize = AllocatorSize;
Kostya Serebryany15647b12016-08-25 20:23:08 +000037 static const uptr kMetadataSize = 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000038 typedef __scudo::SizeClassMap SizeClassMap;
Kostya Serebryany15647b12016-08-25 20:23:08 +000039 typedef NoOpMapUnmapCallback MapUnmapCallback;
Kostya Serebryany7c5ae7c2016-08-26 00:06:03 +000040 static const uptr kFlags =
41 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany15647b12016-08-25 20:23:08 +000042};
Kostya Serebryany15647b12016-08-25 20:23:08 +000043typedef SizeClassAllocator64<AP> PrimaryAllocator;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000044#else
45// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
46// security improvements brought to the 64-bit one. This makes the 32-bit
47// version of Scudo slightly less toughened.
48static const uptr RegionSizeLog = 20;
49static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
50# if SANITIZER_WORDSIZE == 32
51typedef FlatByteMap<NumRegions> ByteMap;
52# elif SANITIZER_WORDSIZE == 64
53typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
54# endif // SANITIZER_WORDSIZE
55typedef SizeClassMap<3, 4, 8, 16, 64, 14> SizeClassMap;
56typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
57 RegionSizeLog, ByteMap> PrimaryAllocator;
58#endif // SANITIZER_CAN_USE_ALLOCATOR64
59
Kostya Serebryany712fc982016-06-07 01:20:26 +000060typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000061typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000062typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
63 ScudoAllocator;
64
65static ScudoAllocator &getAllocator();
66
67static thread_local Xorshift128Plus Prng;
68// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000069static uptr Cookie;
70
71enum : u8 {
72 CRC32Software = 0,
73 CRC32Hardware = 1,
74};
75// We default to software CRC32 if the alternatives are not supported, either
76// at compilation or at runtime.
77static atomic_uint8_t HashAlgorithm = { CRC32Software };
78
79// Hardware CRC32 is supported at compilation via the following:
80// - for i386 & x86_64: -msse4.2
81// - for ARM & AArch64: -march=armv8-a+crc
82// An additional check must be performed at runtime as well to make sure the
83// emitted instructions are valid on the target host.
84#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
85# ifdef __SSE4_2__
86# include <smmintrin.h>
87# define HW_CRC32 FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
88# endif
89# ifdef __ARM_FEATURE_CRC32
90# include <arm_acle.h>
91# define HW_CRC32 FIRST_32_SECOND_64(__crc32cw, __crc32cd)
92# endif
93#endif
94
95// Helper function that will compute the chunk checksum, being passed all the
96// the needed information as uptrs. It will opt for the hardware version of
97// the checksumming function if available.
98INLINE u32 hashUptrs(uptr Pointer, uptr *Array, uptr ArraySize, u8 HashType) {
99 u32 Crc;
100#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
101 if (HashType == CRC32Hardware) {
102 Crc = HW_CRC32(Cookie, Pointer);
103 for (uptr i = 0; i < ArraySize; i++)
104 Crc = HW_CRC32(Crc, Array[i]);
105 return Crc;
106 }
107#endif
108 Crc = computeCRC32(Cookie, Pointer);
109 for (uptr i = 0; i < ArraySize; i++)
110 Crc = computeCRC32(Crc, Array[i]);
111 return Crc;
112}
Kostya Serebryany712fc982016-06-07 01:20:26 +0000113
Kostya Serebryany712fc982016-06-07 01:20:26 +0000114struct ScudoChunk : UnpackedHeader {
115 // We can't use the offset member of the chunk itself, as we would double
116 // fetch it without any warranty that it wouldn't have been tampered. To
117 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000118 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000119 return reinterpret_cast<void *>(
120 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
121 }
122
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000123 // Returns the usable size for a chunk, meaning the amount of bytes from the
124 // beginning of the user data to the end of the backend allocated chunk.
125 uptr getUsableSize(UnpackedHeader *Header) {
126 uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header));
127 if (Size == 0)
128 return Size;
129 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
130 }
131
132 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000133 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000134 UnpackedHeader ZeroChecksumHeader = *Header;
135 ZeroChecksumHeader.Checksum = 0;
136 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
137 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
138 u32 Hash = hashUptrs(reinterpret_cast<uptr>(this),
139 HeaderHolder,
140 ARRAY_SIZE(HeaderHolder),
141 atomic_load_relaxed(&HashAlgorithm));
142 return static_cast<u16>(Hash);
143 }
144
145 // Checks the validity of a chunk by verifying its checksum.
146 bool isValid() {
147 UnpackedHeader NewUnpackedHeader;
148 const AtomicPackedHeader *AtomicHeader =
149 reinterpret_cast<const AtomicPackedHeader *>(this);
150 PackedHeader NewPackedHeader =
151 AtomicHeader->load(std::memory_order_relaxed);
152 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
153 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000154 }
155
156 // Loads and unpacks the header, verifying the checksum in the process.
157 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
158 const AtomicPackedHeader *AtomicHeader =
159 reinterpret_cast<const AtomicPackedHeader *>(this);
160 PackedHeader NewPackedHeader =
161 AtomicHeader->load(std::memory_order_relaxed);
162 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000163 if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000164 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
165 }
166 }
167
168 // Packs and stores the header, computing the checksum in the process.
169 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000170 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000171 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
172 AtomicPackedHeader *AtomicHeader =
173 reinterpret_cast<AtomicPackedHeader *>(this);
174 AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
175 }
176
177 // Packs and stores the header, computing the checksum in the process. We
178 // compare the current header with the expected provided one to ensure that
179 // we are not being raced by a corruption occurring in another thread.
180 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
181 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000182 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000183 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
184 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
185 AtomicPackedHeader *AtomicHeader =
186 reinterpret_cast<AtomicPackedHeader *>(this);
187 if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
188 NewPackedHeader,
189 std::memory_order_relaxed,
190 std::memory_order_relaxed)) {
191 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
192 }
193 }
194};
195
196static bool ScudoInitIsRunning = false;
197
198static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000199static pthread_key_t PThreadKey;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000200
201static thread_local bool ThreadInited = false;
202static thread_local bool ThreadTornDown = false;
203static thread_local AllocatorCache Cache;
204
205static void teardownThread(void *p) {
206 uptr v = reinterpret_cast<uptr>(p);
207 // The glibc POSIX thread-local-storage deallocation routine calls user
208 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
209 // We want to be called last since other destructors might call free and the
210 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
211 // quarantine and swallowing the cache.
212 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000213 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000214 return;
215 }
216 drainQuarantine();
217 getAllocator().DestroyCache(&Cache);
218 ThreadTornDown = true;
219}
220
221static void initInternal() {
222 SanitizerToolName = "Scudo";
223 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
224 ScudoInitIsRunning = true;
225
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000226 // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
227 if (testCPUFeature(CRC32CPUFeature)) {
228 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
229 }
230
Kostya Serebryany712fc982016-06-07 01:20:26 +0000231 initFlags();
232
233 AllocatorOptions Options;
234 Options.setFrom(getFlags(), common_flags());
235 initAllocator(Options);
236
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000237 MaybeStartBackgroudThread();
238
Kostya Serebryany712fc982016-06-07 01:20:26 +0000239 ScudoInitIsRunning = false;
240}
241
242static void initGlobal() {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000243 pthread_key_create(&PThreadKey, teardownThread);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000244 initInternal();
245}
246
247static void NOINLINE initThread() {
248 pthread_once(&GlobalInited, initGlobal);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000249 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000250 getAllocator().InitCache(&Cache);
251 ThreadInited = true;
252}
253
254struct QuarantineCallback {
255 explicit QuarantineCallback(AllocatorCache *Cache)
256 : Cache_(Cache) {}
257
258 // Chunk recycling function, returns a quarantined chunk to the backend.
259 void Recycle(ScudoChunk *Chunk) {
260 UnpackedHeader Header;
261 Chunk->loadHeader(&Header);
262 if (Header.State != ChunkQuarantine) {
263 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
264 Chunk);
265 }
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000266 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000267 getAllocator().Deallocate(Cache_, Ptr);
268 }
269
270 /// Internal quarantine allocation and deallocation functions.
271 void *Allocate(uptr Size) {
272 // The internal quarantine memory cannot be protected by us. But the only
273 // structures allocated are QuarantineBatch, that are 8KB for x64. So we
274 // will use mmap for those, and given that Deallocate doesn't pass a size
275 // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
276 // TODO(kostyak): switching to mmap impacts greatly performances, we have
277 // to find another solution
278 // CHECK_EQ(Size, sizeof(QuarantineBatch));
279 // return MmapOrDie(Size, "QuarantineBatch");
280 return getAllocator().Allocate(Cache_, Size, 1, false);
281 }
282
283 void Deallocate(void *Ptr) {
284 // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
285 getAllocator().Deallocate(Cache_, Ptr);
286 }
287
288 AllocatorCache *Cache_;
289};
290
291typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
292typedef ScudoQuarantine::Cache QuarantineCache;
293static thread_local QuarantineCache ThreadQuarantineCache;
294
295void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
296 MayReturnNull = cf->allocator_may_return_null;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000297 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000298 QuarantineSizeMb = f->QuarantineSizeMb;
299 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
300 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
301 DeleteSizeMismatch = f->DeleteSizeMismatch;
302 ZeroContents = f->ZeroContents;
303}
304
305void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
306 cf->allocator_may_return_null = MayReturnNull;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000307 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000308 f->QuarantineSizeMb = QuarantineSizeMb;
309 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
310 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
311 f->DeleteSizeMismatch = DeleteSizeMismatch;
312 f->ZeroContents = ZeroContents;
313}
314
315struct Allocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000316 static const uptr MaxAllowedMallocSize =
317 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000318
319 ScudoAllocator BackendAllocator;
320 ScudoQuarantine AllocatorQuarantine;
321
322 // The fallback caches are used when the thread local caches have been
323 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
324 // be accessed by different threads.
325 StaticSpinMutex FallbackMutex;
326 AllocatorCache FallbackAllocatorCache;
327 QuarantineCache FallbackQuarantineCache;
328
329 bool DeallocationTypeMismatch;
330 bool ZeroContents;
331 bool DeleteSizeMismatch;
332
333 explicit Allocator(LinkerInitialized)
334 : AllocatorQuarantine(LINKER_INITIALIZED),
335 FallbackQuarantineCache(LINKER_INITIALIZED) {}
336
337 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000338 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000339 // case of the Secondary allocator, it takes care of alignment and the
340 // offset will always be 0. In the case of the Primary, the worst case
341 // scenario happens in the last size class, when the backend allocation
342 // would already be aligned on the requested alignment, which would happen
343 // to be the maximum alignment that would fit in that size class. As a
344 // result, the maximum offset will be at most the maximum alignment for the
345 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000346 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000347 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000348 SizeClassMap::kMaxSize - MinAlignment);
349 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000350 MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000351 Header.Offset = MaxOffset;
352 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000353 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
354 "header\n");
355 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000356 // Verify that we can fit the maximum amount of unused bytes in the header.
357 // The worst case scenario would be when allocating 1 byte on a MaxAlignment
358 // alignment. Since the combined allocator currently rounds the size up to
359 // the alignment before passing it to the secondary, we end up with
360 // MaxAlignment - 1 extra bytes.
361 uptr MaxUnusedBytes = MaxAlignment - 1;
362 Header.UnusedBytes = MaxUnusedBytes;
363 if (Header.UnusedBytes != MaxUnusedBytes) {
364 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
365 "the header\n");
366 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000367
368 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
369 DeleteSizeMismatch = Options.DeleteSizeMismatch;
370 ZeroContents = Options.ZeroContents;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000371 BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000372 AllocatorQuarantine.Init(
373 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
374 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000375 BackendAllocator.InitCache(&FallbackAllocatorCache);
376 Cookie = Prng.Next();
377 }
378
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000379 // Helper function that checks for a valid Scudo chunk.
380 bool isValidPointer(const void *UserPtr) {
381 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
382 if (!IsAligned(ChunkBeg, MinAlignment)) {
383 return false;
384 }
385 ScudoChunk *Chunk =
386 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
387 return Chunk->isValid();
388 }
389
Kostya Serebryany712fc982016-06-07 01:20:26 +0000390 // Allocates a chunk.
391 void *allocate(uptr Size, uptr Alignment, AllocType Type) {
392 if (UNLIKELY(!ThreadInited))
393 initThread();
394 if (!IsPowerOfTwo(Alignment)) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000395 dieWithMessage("ERROR: alignment is not a power of 2\n");
Kostya Serebryany712fc982016-06-07 01:20:26 +0000396 }
397 if (Alignment > MaxAlignment)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000398 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000399 if (Alignment < MinAlignment)
400 Alignment = MinAlignment;
401 if (Size == 0)
402 Size = 1;
403 if (Size >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000404 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000405 uptr RoundedSize = RoundUpTo(Size, MinAlignment);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000406 uptr NeededSize = RoundedSize + AlignedChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000407 if (Alignment > MinAlignment)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000408 NeededSize += Alignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000409 if (NeededSize >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000410 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000411 bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000412
413 void *Ptr;
414 if (LIKELY(!ThreadTornDown)) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000415 Ptr = BackendAllocator.Allocate(&Cache, NeededSize,
416 FromPrimary ? MinAlignment : Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000417 } else {
418 SpinMutexLock l(&FallbackMutex);
419 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000420 FromPrimary ? MinAlignment : Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000421 }
422 if (!Ptr)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000423 return BackendAllocator.ReturnNullOrDieOnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000424
Kostya Serebryany712fc982016-06-07 01:20:26 +0000425 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000426 // If the allocation was serviced by the secondary, the returned pointer
427 // accounts for ChunkHeaderSize to pass the alignment check of the combined
428 // allocator. Adjust it here.
429 if (!FromPrimary)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000430 AllocBeg -= AlignedChunkHeaderSize;
431
432 uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
433 reinterpret_cast<void *>(AllocBeg));
434 // If requested, we will zero out the entire contents of the returned chunk.
435 if (ZeroContents && FromPrimary)
436 memset(Ptr, 0, ActuallyAllocatedSize);
437
438 uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000439 if (!IsAligned(ChunkBeg, Alignment))
440 ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
441 CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
442 ScudoChunk *Chunk =
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000443 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000444 UnpackedHeader Header = {};
445 Header.State = ChunkAllocated;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000446 uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg;
447 Header.Offset = Offset >> MinAlignmentLog;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000448 Header.AllocType = Type;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000449 Header.UnusedBytes = ActuallyAllocatedSize - Offset -
450 AlignedChunkHeaderSize - Size;
451 Header.Salt = static_cast<u8>(Prng.Next());
Kostya Serebryany712fc982016-06-07 01:20:26 +0000452 Chunk->storeHeader(&Header);
453 void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
454 // TODO(kostyak): hooks sound like a terrible idea security wise but might
455 // be needed for things to work properly?
456 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
457 return UserPtr;
458 }
459
460 // Deallocates a Chunk, which means adding it to the delayed free list (or
461 // Quarantine).
462 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
463 if (UNLIKELY(!ThreadInited))
464 initThread();
465 // TODO(kostyak): see hook comment above
466 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
467 if (!UserPtr)
468 return;
469 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
470 if (!IsAligned(ChunkBeg, MinAlignment)) {
471 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
472 "aligned at address %p\n", UserPtr);
473 }
474 ScudoChunk *Chunk =
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000475 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000476 UnpackedHeader OldHeader;
477 Chunk->loadHeader(&OldHeader);
478 if (OldHeader.State != ChunkAllocated) {
479 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000480 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000481 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000482 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000483 UnpackedHeader NewHeader = OldHeader;
484 NewHeader.State = ChunkQuarantine;
485 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
486 if (DeallocationTypeMismatch) {
487 // The deallocation type has to match the allocation one.
488 if (NewHeader.AllocType != Type) {
489 // With the exception of memalign'd Chunks, that can be still be free'd.
490 if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
491 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
492 Chunk);
493 }
494 }
495 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000496 uptr Size = UsableSize - OldHeader.UnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000497 if (DeleteSizeMismatch) {
498 if (DeleteSize && DeleteSize != Size) {
499 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
500 Chunk);
501 }
502 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000503
Kostya Serebryany712fc982016-06-07 01:20:26 +0000504 if (LIKELY(!ThreadTornDown)) {
505 AllocatorQuarantine.Put(&ThreadQuarantineCache,
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000506 QuarantineCallback(&Cache), Chunk, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000507 } else {
508 SpinMutexLock l(&FallbackMutex);
509 AllocatorQuarantine.Put(&FallbackQuarantineCache,
510 QuarantineCallback(&FallbackAllocatorCache),
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000511 Chunk, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000512 }
513 }
514
Kostya Serebryany712fc982016-06-07 01:20:26 +0000515 // Reallocates a chunk. We can save on a new allocation if the new requested
516 // size still fits in the chunk.
517 void *reallocate(void *OldPtr, uptr NewSize) {
518 if (UNLIKELY(!ThreadInited))
519 initThread();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000520 uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
521 ScudoChunk *Chunk =
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000522 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
523 UnpackedHeader OldHeader;
524 Chunk->loadHeader(&OldHeader);
525 if (OldHeader.State != ChunkAllocated) {
526 dieWithMessage("ERROR: invalid chunk state when reallocating address "
527 "%p\n", OldPtr);
528 }
529 uptr Size = Chunk->getUsableSize(&OldHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000530 if (OldHeader.AllocType != FromMalloc) {
531 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
532 Chunk);
533 }
534 UnpackedHeader NewHeader = OldHeader;
535 // The new size still fits in the current chunk.
536 if (NewSize <= Size) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000537 NewHeader.UnusedBytes = Size - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000538 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
539 return OldPtr;
540 }
541 // Otherwise, we have to allocate a new chunk and copy the contents of the
542 // old one.
543 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
544 if (NewPtr) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000545 uptr OldSize = Size - OldHeader.UnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000546 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
547 NewHeader.State = ChunkQuarantine;
548 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
549 if (LIKELY(!ThreadTornDown)) {
550 AllocatorQuarantine.Put(&ThreadQuarantineCache,
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000551 QuarantineCallback(&Cache), Chunk, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000552 } else {
553 SpinMutexLock l(&FallbackMutex);
554 AllocatorQuarantine.Put(&FallbackQuarantineCache,
555 QuarantineCallback(&FallbackAllocatorCache),
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000556 Chunk, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000557 }
558 }
559 return NewPtr;
560 }
561
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000562 // Helper function that returns the actual usable size of a chunk.
563 uptr getUsableSize(const void *Ptr) {
564 if (UNLIKELY(!ThreadInited))
565 initThread();
566 if (!Ptr)
567 return 0;
568 uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
569 ScudoChunk *Chunk =
570 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
571 UnpackedHeader Header;
572 Chunk->loadHeader(&Header);
573 // Getting the usable size of a chunk only makes sense if it's allocated.
574 if (Header.State != ChunkAllocated) {
575 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
576 Ptr);
577 }
578 return Chunk->getUsableSize(&Header);
579 }
580
Kostya Serebryany712fc982016-06-07 01:20:26 +0000581 void *calloc(uptr NMemB, uptr Size) {
582 if (UNLIKELY(!ThreadInited))
583 initThread();
584 uptr Total = NMemB * Size;
585 if (Size != 0 && Total / Size != NMemB) // Overflow check
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000586 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000587 void *Ptr = allocate(Total, MinAlignment, FromMalloc);
588 // If ZeroContents, the content of the chunk has already been zero'd out.
589 if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
590 memset(Ptr, 0, getUsableSize(Ptr));
591 return Ptr;
592 }
593
594 void drainQuarantine() {
595 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
596 QuarantineCallback(&Cache));
597 }
598};
599
600static Allocator Instance(LINKER_INITIALIZED);
601
602static ScudoAllocator &getAllocator() {
603 return Instance.BackendAllocator;
604}
605
606void initAllocator(const AllocatorOptions &Options) {
607 Instance.init(Options);
608}
609
610void drainQuarantine() {
611 Instance.drainQuarantine();
612}
613
614void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000615 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000616}
617
618void scudoFree(void *Ptr, AllocType Type) {
619 Instance.deallocate(Ptr, 0, Type);
620}
621
622void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
623 Instance.deallocate(Ptr, Size, Type);
624}
625
626void *scudoRealloc(void *Ptr, uptr Size) {
627 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000628 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000629 if (Size == 0) {
630 Instance.deallocate(Ptr, 0, FromMalloc);
631 return nullptr;
632 }
633 return Instance.reallocate(Ptr, Size);
634}
635
636void *scudoCalloc(uptr NMemB, uptr Size) {
637 return Instance.calloc(NMemB, Size);
638}
639
640void *scudoValloc(uptr Size) {
641 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
642}
643
644void *scudoMemalign(uptr Alignment, uptr Size) {
645 return Instance.allocate(Size, Alignment, FromMemalign);
646}
647
648void *scudoPvalloc(uptr Size) {
649 uptr PageSize = GetPageSizeCached();
650 Size = RoundUpTo(Size, PageSize);
651 if (Size == 0) {
652 // pvalloc(0) should allocate one page.
653 Size = PageSize;
654 }
655 return Instance.allocate(Size, PageSize, FromMemalign);
656}
657
658int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
659 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
660 return 0;
661}
662
663void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
664 // size must be a multiple of the alignment. To avoid a division, we first
665 // make sure that alignment is a power of 2.
666 CHECK(IsPowerOfTwo(Alignment));
667 CHECK_EQ((Size & (Alignment - 1)), 0);
668 return Instance.allocate(Size, Alignment, FromMalloc);
669}
670
671uptr scudoMallocUsableSize(void *Ptr) {
672 return Instance.getUsableSize(Ptr);
673}
674
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000675} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000676
677using namespace __scudo;
678
679// MallocExtension helper functions
680
681uptr __sanitizer_get_current_allocated_bytes() {
682 uptr stats[AllocatorStatCount];
683 getAllocator().GetStats(stats);
684 return stats[AllocatorStatAllocated];
685}
686
687uptr __sanitizer_get_heap_size() {
688 uptr stats[AllocatorStatCount];
689 getAllocator().GetStats(stats);
690 return stats[AllocatorStatMapped];
691}
692
693uptr __sanitizer_get_free_bytes() {
694 return 1;
695}
696
697uptr __sanitizer_get_unmapped_bytes() {
698 return 1;
699}
700
701uptr __sanitizer_get_estimated_allocated_size(uptr size) {
702 return size;
703}
704
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000705int __sanitizer_get_ownership(const void *Ptr) {
706 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000707}
708
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000709uptr __sanitizer_get_allocated_size(const void *Ptr) {
710 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000711}