blob: dab6abedcb3e2507fc32005ffa8ae700046c6293 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000025
Kostya Serebryany712fc982016-06-07 01:20:26 +000026#include <cstring>
27
28namespace __scudo {
29
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000030#if SANITIZER_CAN_USE_ALLOCATOR64
31const uptr AllocatorSpace = ~0ULL;
32const uptr AllocatorSize = 0x40000000000ULL;
33typedef DefaultSizeClassMap SizeClassMap;
Kostya Serebryany15647b12016-08-25 20:23:08 +000034struct AP {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000035 static const uptr kSpaceBeg = AllocatorSpace;
36 static const uptr kSpaceSize = AllocatorSize;
Kostya Serebryany15647b12016-08-25 20:23:08 +000037 static const uptr kMetadataSize = 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000038 typedef __scudo::SizeClassMap SizeClassMap;
Kostya Serebryany15647b12016-08-25 20:23:08 +000039 typedef NoOpMapUnmapCallback MapUnmapCallback;
Kostya Serebryany7c5ae7c2016-08-26 00:06:03 +000040 static const uptr kFlags =
41 SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
Kostya Serebryany15647b12016-08-25 20:23:08 +000042};
Kostya Serebryany15647b12016-08-25 20:23:08 +000043typedef SizeClassAllocator64<AP> PrimaryAllocator;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000044#else
45// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the
46// security improvements brought to the 64-bit one. This makes the 32-bit
47// version of Scudo slightly less toughened.
48static const uptr RegionSizeLog = 20;
49static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
50# if SANITIZER_WORDSIZE == 32
51typedef FlatByteMap<NumRegions> ByteMap;
52# elif SANITIZER_WORDSIZE == 64
53typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap;
54# endif // SANITIZER_WORDSIZE
Kostya Kortchinsky47be0ed2016-12-15 18:06:55 +000055typedef DefaultSizeClassMap SizeClassMap;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000056typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap,
57 RegionSizeLog, ByteMap> PrimaryAllocator;
58#endif // SANITIZER_CAN_USE_ALLOCATOR64
59
Kostya Serebryany712fc982016-06-07 01:20:26 +000060typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000061typedef ScudoLargeMmapAllocator SecondaryAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +000062typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
63 ScudoAllocator;
64
65static ScudoAllocator &getAllocator();
66
67static thread_local Xorshift128Plus Prng;
68// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000069static uptr Cookie;
70
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000071// We default to software CRC32 if the alternatives are not supported, either
72// at compilation or at runtime.
73static atomic_uint8_t HashAlgorithm = { CRC32Software };
74
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000075SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
76
77INLINE u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
78 // If SSE4.2 is defined here, it was enabled everywhere, as opposed to only
79 // for scudo_crc32.cpp. This means that other SSE instructions were likely
80 // emitted at other places, and as a result there is no reason to not use
81 // the hardware version of the CRC32.
82#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
83 return computeHardwareCRC32(Crc, Data);
84#else
85 if (computeHardwareCRC32 && HashType == CRC32Hardware)
86 return computeHardwareCRC32(Crc, Data);
87 else
88 return computeSoftwareCRC32(Crc, Data);
89#endif // defined(__SSE4_2__)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000090}
Kostya Serebryany712fc982016-06-07 01:20:26 +000091
Kostya Serebryany712fc982016-06-07 01:20:26 +000092struct ScudoChunk : UnpackedHeader {
93 // We can't use the offset member of the chunk itself, as we would double
94 // fetch it without any warranty that it wouldn't have been tampered. To
95 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000096 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000097 return reinterpret_cast<void *>(
98 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
99 }
100
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000101 // Returns the usable size for a chunk, meaning the amount of bytes from the
102 // beginning of the user data to the end of the backend allocated chunk.
103 uptr getUsableSize(UnpackedHeader *Header) {
104 uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header));
105 if (Size == 0)
106 return Size;
107 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
108 }
109
110 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000111 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000112 UnpackedHeader ZeroChecksumHeader = *Header;
113 ZeroChecksumHeader.Checksum = 0;
114 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
115 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +0000116 u8 HashType = atomic_load_relaxed(&HashAlgorithm);
117 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HashType);
118 for (uptr i = 0; i < ARRAY_SIZE(HeaderHolder); i++)
119 Crc = computeCRC32(Crc, HeaderHolder[i], HashType);
120 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000121 }
122
123 // Checks the validity of a chunk by verifying its checksum.
124 bool isValid() {
125 UnpackedHeader NewUnpackedHeader;
126 const AtomicPackedHeader *AtomicHeader =
127 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000128 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000129 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
130 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000131 }
132
133 // Loads and unpacks the header, verifying the checksum in the process.
134 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
135 const AtomicPackedHeader *AtomicHeader =
136 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000137 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000138 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000139 if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000140 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
141 }
142 }
143
144 // Packs and stores the header, computing the checksum in the process.
145 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000146 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000147 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
148 AtomicPackedHeader *AtomicHeader =
149 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000150 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000151 }
152
153 // Packs and stores the header, computing the checksum in the process. We
154 // compare the current header with the expected provided one to ensure that
155 // we are not being raced by a corruption occurring in another thread.
156 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
157 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000158 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000159 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
160 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
161 AtomicPackedHeader *AtomicHeader =
162 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000163 if (!atomic_compare_exchange_strong(AtomicHeader,
164 &OldPackedHeader,
165 NewPackedHeader,
166 memory_order_relaxed)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000167 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
168 }
169 }
170};
171
172static bool ScudoInitIsRunning = false;
173
174static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000175static pthread_key_t PThreadKey;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000176
177static thread_local bool ThreadInited = false;
178static thread_local bool ThreadTornDown = false;
179static thread_local AllocatorCache Cache;
180
181static void teardownThread(void *p) {
182 uptr v = reinterpret_cast<uptr>(p);
183 // The glibc POSIX thread-local-storage deallocation routine calls user
184 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
185 // We want to be called last since other destructors might call free and the
186 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
187 // quarantine and swallowing the cache.
188 if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000189 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(v + 1));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000190 return;
191 }
192 drainQuarantine();
193 getAllocator().DestroyCache(&Cache);
194 ThreadTornDown = true;
195}
196
197static void initInternal() {
198 SanitizerToolName = "Scudo";
199 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
200 ScudoInitIsRunning = true;
201
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000202 // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
203 if (testCPUFeature(CRC32CPUFeature)) {
204 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
205 }
206
Kostya Serebryany712fc982016-06-07 01:20:26 +0000207 initFlags();
208
209 AllocatorOptions Options;
210 Options.setFrom(getFlags(), common_flags());
211 initAllocator(Options);
212
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000213 MaybeStartBackgroudThread();
214
Kostya Serebryany712fc982016-06-07 01:20:26 +0000215 ScudoInitIsRunning = false;
216}
217
218static void initGlobal() {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000219 pthread_key_create(&PThreadKey, teardownThread);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000220 initInternal();
221}
222
223static void NOINLINE initThread() {
224 pthread_once(&GlobalInited, initGlobal);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000225 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000226 getAllocator().InitCache(&Cache);
227 ThreadInited = true;
228}
229
230struct QuarantineCallback {
231 explicit QuarantineCallback(AllocatorCache *Cache)
232 : Cache_(Cache) {}
233
234 // Chunk recycling function, returns a quarantined chunk to the backend.
235 void Recycle(ScudoChunk *Chunk) {
236 UnpackedHeader Header;
237 Chunk->loadHeader(&Header);
238 if (Header.State != ChunkQuarantine) {
239 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
240 Chunk);
241 }
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000242 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000243 getAllocator().Deallocate(Cache_, Ptr);
244 }
245
246 /// Internal quarantine allocation and deallocation functions.
247 void *Allocate(uptr Size) {
248 // The internal quarantine memory cannot be protected by us. But the only
249 // structures allocated are QuarantineBatch, that are 8KB for x64. So we
250 // will use mmap for those, and given that Deallocate doesn't pass a size
251 // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
252 // TODO(kostyak): switching to mmap impacts greatly performances, we have
253 // to find another solution
254 // CHECK_EQ(Size, sizeof(QuarantineBatch));
255 // return MmapOrDie(Size, "QuarantineBatch");
256 return getAllocator().Allocate(Cache_, Size, 1, false);
257 }
258
259 void Deallocate(void *Ptr) {
260 // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
261 getAllocator().Deallocate(Cache_, Ptr);
262 }
263
264 AllocatorCache *Cache_;
265};
266
267typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
268typedef ScudoQuarantine::Cache QuarantineCache;
269static thread_local QuarantineCache ThreadQuarantineCache;
270
271void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
272 MayReturnNull = cf->allocator_may_return_null;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000273 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000274 QuarantineSizeMb = f->QuarantineSizeMb;
275 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
276 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
277 DeleteSizeMismatch = f->DeleteSizeMismatch;
278 ZeroContents = f->ZeroContents;
279}
280
281void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
282 cf->allocator_may_return_null = MayReturnNull;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000283 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000284 f->QuarantineSizeMb = QuarantineSizeMb;
285 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
286 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
287 f->DeleteSizeMismatch = DeleteSizeMismatch;
288 f->ZeroContents = ZeroContents;
289}
290
291struct Allocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000292 static const uptr MaxAllowedMallocSize =
293 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000294
295 ScudoAllocator BackendAllocator;
296 ScudoQuarantine AllocatorQuarantine;
297
298 // The fallback caches are used when the thread local caches have been
299 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
300 // be accessed by different threads.
301 StaticSpinMutex FallbackMutex;
302 AllocatorCache FallbackAllocatorCache;
303 QuarantineCache FallbackQuarantineCache;
304
305 bool DeallocationTypeMismatch;
306 bool ZeroContents;
307 bool DeleteSizeMismatch;
308
309 explicit Allocator(LinkerInitialized)
310 : AllocatorQuarantine(LINKER_INITIALIZED),
311 FallbackQuarantineCache(LINKER_INITIALIZED) {}
312
313 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000314 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000315 // case of the Secondary allocator, it takes care of alignment and the
316 // offset will always be 0. In the case of the Primary, the worst case
317 // scenario happens in the last size class, when the backend allocation
318 // would already be aligned on the requested alignment, which would happen
319 // to be the maximum alignment that would fit in that size class. As a
320 // result, the maximum offset will be at most the maximum alignment for the
321 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000322 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000323 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000324 SizeClassMap::kMaxSize - MinAlignment);
325 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000326 MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000327 Header.Offset = MaxOffset;
328 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000329 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
330 "header\n");
331 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000332 // Verify that we can fit the maximum amount of unused bytes in the header.
Kostya Kortchinsky47be0ed2016-12-15 18:06:55 +0000333 // Given that the Secondary fits the allocation to a page, the worst case
334 // scenario happens in the Primary. It will depend on the second to last
335 // and last class sizes, as well as the dynamic base for the Primary. The
336 // following is an over-approximation that works for our needs.
337 uptr MaxUnusedBytes = SizeClassMap::kMaxSize - 1 - AlignedChunkHeaderSize;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000338 Header.UnusedBytes = MaxUnusedBytes;
339 if (Header.UnusedBytes != MaxUnusedBytes) {
340 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
341 "the header\n");
342 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000343
344 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
345 DeleteSizeMismatch = Options.DeleteSizeMismatch;
346 ZeroContents = Options.ZeroContents;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000347 BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000348 AllocatorQuarantine.Init(
349 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
350 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000351 BackendAllocator.InitCache(&FallbackAllocatorCache);
352 Cookie = Prng.Next();
353 }
354
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000355 // Helper function that checks for a valid Scudo chunk.
356 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000357 if (UNLIKELY(!ThreadInited))
358 initThread();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000359 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
360 if (!IsAligned(ChunkBeg, MinAlignment)) {
361 return false;
362 }
363 ScudoChunk *Chunk =
364 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
365 return Chunk->isValid();
366 }
367
Kostya Serebryany712fc982016-06-07 01:20:26 +0000368 // Allocates a chunk.
369 void *allocate(uptr Size, uptr Alignment, AllocType Type) {
370 if (UNLIKELY(!ThreadInited))
371 initThread();
372 if (!IsPowerOfTwo(Alignment)) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000373 dieWithMessage("ERROR: alignment is not a power of 2\n");
Kostya Serebryany712fc982016-06-07 01:20:26 +0000374 }
375 if (Alignment > MaxAlignment)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000376 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000377 if (Alignment < MinAlignment)
378 Alignment = MinAlignment;
379 if (Size == 0)
380 Size = 1;
381 if (Size >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000382 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000383
384 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000385 if (Alignment > MinAlignment)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000386 NeededSize += Alignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000387 if (NeededSize >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000388 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000389
390 // Primary backed and Secondary backed allocations have a different
391 // treatment. We deal with alignment requirements of Primary serviced
392 // allocations here, but the Secondary will take care of its own alignment
393 // needs, which means we also have to work around some limitations of the
394 // combined allocator to accommodate the situation.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000395 bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000396
397 void *Ptr;
398 if (LIKELY(!ThreadTornDown)) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000399 Ptr = BackendAllocator.Allocate(&Cache, NeededSize,
400 FromPrimary ? MinAlignment : Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000401 } else {
402 SpinMutexLock l(&FallbackMutex);
403 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000404 FromPrimary ? MinAlignment : Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000405 }
406 if (!Ptr)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000407 return BackendAllocator.ReturnNullOrDieOnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000408
Kostya Serebryany712fc982016-06-07 01:20:26 +0000409 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000410 // If the allocation was serviced by the secondary, the returned pointer
411 // accounts for ChunkHeaderSize to pass the alignment check of the combined
412 // allocator. Adjust it here.
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000413 if (!FromPrimary) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000414 AllocBeg -= AlignedChunkHeaderSize;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000415 if (Alignment > MinAlignment)
416 NeededSize -= Alignment;
417 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000418
419 uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
420 reinterpret_cast<void *>(AllocBeg));
421 // If requested, we will zero out the entire contents of the returned chunk.
422 if (ZeroContents && FromPrimary)
423 memset(Ptr, 0, ActuallyAllocatedSize);
424
425 uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000426 if (!IsAligned(ChunkBeg, Alignment))
427 ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
428 CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
429 ScudoChunk *Chunk =
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000430 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000431 UnpackedHeader Header = {};
432 Header.State = ChunkAllocated;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000433 uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg;
434 Header.Offset = Offset >> MinAlignmentLog;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000435 Header.AllocType = Type;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000436 Header.UnusedBytes = ActuallyAllocatedSize - Offset -
437 AlignedChunkHeaderSize - Size;
438 Header.Salt = static_cast<u8>(Prng.Next());
Kostya Serebryany712fc982016-06-07 01:20:26 +0000439 Chunk->storeHeader(&Header);
440 void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
441 // TODO(kostyak): hooks sound like a terrible idea security wise but might
442 // be needed for things to work properly?
443 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
444 return UserPtr;
445 }
446
447 // Deallocates a Chunk, which means adding it to the delayed free list (or
448 // Quarantine).
449 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
450 if (UNLIKELY(!ThreadInited))
451 initThread();
452 // TODO(kostyak): see hook comment above
453 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
454 if (!UserPtr)
455 return;
456 uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
457 if (!IsAligned(ChunkBeg, MinAlignment)) {
458 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
459 "aligned at address %p\n", UserPtr);
460 }
461 ScudoChunk *Chunk =
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000462 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000463 UnpackedHeader OldHeader;
464 Chunk->loadHeader(&OldHeader);
465 if (OldHeader.State != ChunkAllocated) {
466 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000467 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000468 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000469 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000470 UnpackedHeader NewHeader = OldHeader;
471 NewHeader.State = ChunkQuarantine;
472 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
473 if (DeallocationTypeMismatch) {
474 // The deallocation type has to match the allocation one.
475 if (NewHeader.AllocType != Type) {
476 // With the exception of memalign'd Chunks, that can be still be free'd.
477 if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
478 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
479 Chunk);
480 }
481 }
482 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000483 uptr Size = UsableSize - OldHeader.UnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000484 if (DeleteSizeMismatch) {
485 if (DeleteSize && DeleteSize != Size) {
486 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
487 Chunk);
488 }
489 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000490
Kostya Serebryany712fc982016-06-07 01:20:26 +0000491 if (LIKELY(!ThreadTornDown)) {
492 AllocatorQuarantine.Put(&ThreadQuarantineCache,
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000493 QuarantineCallback(&Cache), Chunk, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000494 } else {
495 SpinMutexLock l(&FallbackMutex);
496 AllocatorQuarantine.Put(&FallbackQuarantineCache,
497 QuarantineCallback(&FallbackAllocatorCache),
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000498 Chunk, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000499 }
500 }
501
Kostya Serebryany712fc982016-06-07 01:20:26 +0000502 // Reallocates a chunk. We can save on a new allocation if the new requested
503 // size still fits in the chunk.
504 void *reallocate(void *OldPtr, uptr NewSize) {
505 if (UNLIKELY(!ThreadInited))
506 initThread();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000507 uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
508 ScudoChunk *Chunk =
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000509 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
510 UnpackedHeader OldHeader;
511 Chunk->loadHeader(&OldHeader);
512 if (OldHeader.State != ChunkAllocated) {
513 dieWithMessage("ERROR: invalid chunk state when reallocating address "
514 "%p\n", OldPtr);
515 }
516 uptr Size = Chunk->getUsableSize(&OldHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000517 if (OldHeader.AllocType != FromMalloc) {
518 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
519 Chunk);
520 }
521 UnpackedHeader NewHeader = OldHeader;
522 // The new size still fits in the current chunk.
523 if (NewSize <= Size) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000524 NewHeader.UnusedBytes = Size - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000525 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
526 return OldPtr;
527 }
528 // Otherwise, we have to allocate a new chunk and copy the contents of the
529 // old one.
530 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
531 if (NewPtr) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000532 uptr OldSize = Size - OldHeader.UnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000533 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
534 NewHeader.State = ChunkQuarantine;
535 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
536 if (LIKELY(!ThreadTornDown)) {
537 AllocatorQuarantine.Put(&ThreadQuarantineCache,
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000538 QuarantineCallback(&Cache), Chunk, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000539 } else {
540 SpinMutexLock l(&FallbackMutex);
541 AllocatorQuarantine.Put(&FallbackQuarantineCache,
542 QuarantineCallback(&FallbackAllocatorCache),
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000543 Chunk, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000544 }
545 }
546 return NewPtr;
547 }
548
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000549 // Helper function that returns the actual usable size of a chunk.
550 uptr getUsableSize(const void *Ptr) {
551 if (UNLIKELY(!ThreadInited))
552 initThread();
553 if (!Ptr)
554 return 0;
555 uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
556 ScudoChunk *Chunk =
557 reinterpret_cast<ScudoChunk *>(ChunkBeg - AlignedChunkHeaderSize);
558 UnpackedHeader Header;
559 Chunk->loadHeader(&Header);
560 // Getting the usable size of a chunk only makes sense if it's allocated.
561 if (Header.State != ChunkAllocated) {
562 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
563 Ptr);
564 }
565 return Chunk->getUsableSize(&Header);
566 }
567
Kostya Serebryany712fc982016-06-07 01:20:26 +0000568 void *calloc(uptr NMemB, uptr Size) {
569 if (UNLIKELY(!ThreadInited))
570 initThread();
571 uptr Total = NMemB * Size;
572 if (Size != 0 && Total / Size != NMemB) // Overflow check
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000573 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000574 void *Ptr = allocate(Total, MinAlignment, FromMalloc);
575 // If ZeroContents, the content of the chunk has already been zero'd out.
576 if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
577 memset(Ptr, 0, getUsableSize(Ptr));
578 return Ptr;
579 }
580
581 void drainQuarantine() {
582 AllocatorQuarantine.Drain(&ThreadQuarantineCache,
583 QuarantineCallback(&Cache));
584 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000585
586 uptr getStats(AllocatorStat StatType) {
587 if (UNLIKELY(!ThreadInited))
588 initThread();
589 uptr stats[AllocatorStatCount];
590 BackendAllocator.GetStats(stats);
591 return stats[StatType];
592 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000593};
594
595static Allocator Instance(LINKER_INITIALIZED);
596
597static ScudoAllocator &getAllocator() {
598 return Instance.BackendAllocator;
599}
600
601void initAllocator(const AllocatorOptions &Options) {
602 Instance.init(Options);
603}
604
605void drainQuarantine() {
606 Instance.drainQuarantine();
607}
608
609void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000610 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000611}
612
613void scudoFree(void *Ptr, AllocType Type) {
614 Instance.deallocate(Ptr, 0, Type);
615}
616
617void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
618 Instance.deallocate(Ptr, Size, Type);
619}
620
621void *scudoRealloc(void *Ptr, uptr Size) {
622 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000623 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000624 if (Size == 0) {
625 Instance.deallocate(Ptr, 0, FromMalloc);
626 return nullptr;
627 }
628 return Instance.reallocate(Ptr, Size);
629}
630
631void *scudoCalloc(uptr NMemB, uptr Size) {
632 return Instance.calloc(NMemB, Size);
633}
634
635void *scudoValloc(uptr Size) {
636 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
637}
638
639void *scudoMemalign(uptr Alignment, uptr Size) {
640 return Instance.allocate(Size, Alignment, FromMemalign);
641}
642
643void *scudoPvalloc(uptr Size) {
644 uptr PageSize = GetPageSizeCached();
645 Size = RoundUpTo(Size, PageSize);
646 if (Size == 0) {
647 // pvalloc(0) should allocate one page.
648 Size = PageSize;
649 }
650 return Instance.allocate(Size, PageSize, FromMemalign);
651}
652
653int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
654 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
655 return 0;
656}
657
658void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
659 // size must be a multiple of the alignment. To avoid a division, we first
660 // make sure that alignment is a power of 2.
661 CHECK(IsPowerOfTwo(Alignment));
662 CHECK_EQ((Size & (Alignment - 1)), 0);
663 return Instance.allocate(Size, Alignment, FromMalloc);
664}
665
666uptr scudoMallocUsableSize(void *Ptr) {
667 return Instance.getUsableSize(Ptr);
668}
669
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000670} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000671
672using namespace __scudo;
673
674// MallocExtension helper functions
675
676uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000677 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000678}
679
680uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000681 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000682}
683
684uptr __sanitizer_get_free_bytes() {
685 return 1;
686}
687
688uptr __sanitizer_get_unmapped_bytes() {
689 return 1;
690}
691
692uptr __sanitizer_get_estimated_allocated_size(uptr size) {
693 return size;
694}
695
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000696int __sanitizer_get_ownership(const void *Ptr) {
697 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000698}
699
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000700uptr __sanitizer_get_allocated_size(const void *Ptr) {
701 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000702}