blob: 92155797ca40a6dbe6668900cced7ececd0155c0 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky43917722017-08-16 16:40:48 +000019#include "scudo_flags.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000020#include "scudo_tls.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000021#include "scudo_utils.h"
22
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000023#include "sanitizer_common/sanitizer_allocator_checks.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000024#include "sanitizer_common/sanitizer_allocator_interface.h"
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +000025#include "sanitizer_common/sanitizer_errno.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000026#include "sanitizer_common/sanitizer_quarantine.h"
27
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000028#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000029
30namespace __scudo {
31
Kostya Serebryany712fc982016-06-07 01:20:26 +000032// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000033static uptr Cookie;
34
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000035// We default to software CRC32 if the alternatives are not supported, either
36// at compilation or at runtime.
37static atomic_uint8_t HashAlgorithm = { CRC32Software };
38
Kostya Kortchinsky43917722017-08-16 16:40:48 +000039INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000040 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
41 // as opposed to only for scudo_crc32.cpp. This means that other hardware
42 // specific instructions were likely emitted at other places, and as a
43 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000044#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000045 Crc = CRC32_INTRINSIC(Crc, Value);
46 for (uptr i = 0; i < ArraySize; i++)
47 Crc = CRC32_INTRINSIC(Crc, Array[i]);
48 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000049#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000050 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
51 Crc = computeHardwareCRC32(Crc, Value);
52 for (uptr i = 0; i < ArraySize; i++)
53 Crc = computeHardwareCRC32(Crc, Array[i]);
54 return Crc;
55 }
56 Crc = computeSoftwareCRC32(Crc, Value);
57 for (uptr i = 0; i < ArraySize; i++)
58 Crc = computeSoftwareCRC32(Crc, Array[i]);
59 return Crc;
60#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000061}
Kostya Serebryany712fc982016-06-07 01:20:26 +000062
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000063static ScudoBackendAllocator &getBackendAllocator();
64
Kostya Serebryany712fc982016-06-07 01:20:26 +000065struct ScudoChunk : UnpackedHeader {
66 // We can't use the offset member of the chunk itself, as we would double
67 // fetch it without any warranty that it wouldn't have been tampered. To
68 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000069 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000070 return reinterpret_cast<void *>(
71 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
72 }
73
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000074 // Returns the usable size for a chunk, meaning the amount of bytes from the
75 // beginning of the user data to the end of the backend allocated chunk.
76 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +000077 uptr Size =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +000078 getBackendAllocator().getActuallyAllocatedSize(getAllocBeg(Header),
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +000079 Header->FromPrimary);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000080 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000081 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000082 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
83 }
84
85 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000086 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000087 UnpackedHeader ZeroChecksumHeader = *Header;
88 ZeroChecksumHeader.Checksum = 0;
89 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
90 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinsky43917722017-08-16 16:40:48 +000091 u32 Crc = computeCRC32(static_cast<u32>(Cookie),
92 reinterpret_cast<uptr>(this), HeaderHolder,
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000093 ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000094 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000095 }
96
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000097 // Checks the validity of a chunk by verifying its checksum. It doesn't
98 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000099 bool isValid() {
100 UnpackedHeader NewUnpackedHeader;
101 const AtomicPackedHeader *AtomicHeader =
102 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000103 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000104 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
105 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000106 }
107
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000108 // Nulls out a chunk header. When returning the chunk to the backend, there
109 // is no need to store a valid ChunkAvailable header, as this would be
110 // computationally expensive. Zeroing out serves the same purpose by making
111 // the header invalid. In the extremely rare event where 0 would be a valid
112 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
113 COMPILER_CHECK(ChunkAvailable == 0);
114 void eraseHeader() {
115 PackedHeader NullPackedHeader = 0;
116 AtomicPackedHeader *AtomicHeader =
117 reinterpret_cast<AtomicPackedHeader *>(this);
118 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
119 }
120
Kostya Serebryany712fc982016-06-07 01:20:26 +0000121 // Loads and unpacks the header, verifying the checksum in the process.
122 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
123 const AtomicPackedHeader *AtomicHeader =
124 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000125 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000126 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000127 if (UNLIKELY(NewUnpackedHeader->Checksum !=
128 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000129 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
130 }
131 }
132
133 // Packs and stores the header, computing the checksum in the process.
134 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000135 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000136 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
137 AtomicPackedHeader *AtomicHeader =
138 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000139 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000140 }
141
142 // Packs and stores the header, computing the checksum in the process. We
143 // compare the current header with the expected provided one to ensure that
144 // we are not being raced by a corruption occurring in another thread.
145 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
146 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000147 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000148 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
149 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
150 AtomicPackedHeader *AtomicHeader =
151 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000152 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
153 &OldPackedHeader,
154 NewPackedHeader,
155 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000156 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
157 }
158 }
159};
160
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000161ScudoChunk *getScudoChunk(uptr UserBeg) {
162 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
163}
164
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000165struct AllocatorOptions {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000166 u32 QuarantineSizeKb;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000167 u32 ThreadLocalQuarantineSizeKb;
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000168 u32 QuarantineChunksUpToSize;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000169 bool MayReturnNull;
170 s32 ReleaseToOSIntervalMs;
171 bool DeallocationTypeMismatch;
172 bool DeleteSizeMismatch;
173 bool ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000174
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000175 void setFrom(const Flags *f, const CommonFlags *cf);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000176};
Kostya Serebryany712fc982016-06-07 01:20:26 +0000177
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000178void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
179 MayReturnNull = cf->allocator_may_return_null;
180 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000181 QuarantineSizeKb = f->QuarantineSizeKb;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000182 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000183 QuarantineChunksUpToSize = f->QuarantineChunksUpToSize;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000184 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
185 DeleteSizeMismatch = f->DeleteSizeMismatch;
186 ZeroContents = f->ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000187}
188
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000189static void initScudoInternal(const AllocatorOptions &Options);
190
191static bool ScudoInitIsRunning = false;
192
193void initScudo() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000194 SanitizerToolName = "Scudo";
195 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
196 ScudoInitIsRunning = true;
197
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +0000198 // Check if hardware CRC32 is supported in the binary and by the platform, if
199 // so, opt for the CRC32 hardware version of the checksum.
200 if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000201 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000202
Kostya Serebryany712fc982016-06-07 01:20:26 +0000203 initFlags();
204
205 AllocatorOptions Options;
206 Options.setFrom(getFlags(), common_flags());
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000207 initScudoInternal(Options);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000208
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000209 // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000210
Kostya Serebryany712fc982016-06-07 01:20:26 +0000211 ScudoInitIsRunning = false;
212}
213
Kostya Serebryany712fc982016-06-07 01:20:26 +0000214struct QuarantineCallback {
215 explicit QuarantineCallback(AllocatorCache *Cache)
216 : Cache_(Cache) {}
217
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000218 // Chunk recycling function, returns a quarantined chunk to the backend,
219 // first making sure it hasn't been tampered with.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000220 void Recycle(ScudoChunk *Chunk) {
221 UnpackedHeader Header;
222 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000223 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000224 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
225 Chunk);
226 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000227 Chunk->eraseHeader();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000228 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000229 if (Header.FromPrimary)
230 getBackendAllocator().deallocatePrimary(Cache_, Ptr);
231 else
232 getBackendAllocator().deallocateSecondary(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000233 }
234
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000235 // Internal quarantine allocation and deallocation functions. We first check
236 // that the batches are indeed serviced by the Primary.
237 // TODO(kostyak): figure out the best way to protect the batches.
238 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000239 void *Allocate(uptr Size) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000240 return getBackendAllocator().allocatePrimary(Cache_, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000241 }
242
243 void Deallocate(void *Ptr) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000244 getBackendAllocator().deallocatePrimary(Cache_, Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000245 }
246
247 AllocatorCache *Cache_;
248};
249
250typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000251typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000252COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
253 sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000254
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000255AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
256 return &ThreadContext->Cache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000257}
258
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000259ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
260 return reinterpret_cast<
261 ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
262}
263
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000264ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000265 return &ThreadContext->Prng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000266}
267
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000268struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000269 static const uptr MaxAllowedMallocSize =
270 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000271
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000272 typedef ReturnNullOrDieOnFailure FailureHandler;
273
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000274 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000275 ScudoQuarantine AllocatorQuarantine;
276
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000277 StaticSpinMutex GlobalPrngMutex;
278 ScudoPrng GlobalPrng;
279
Kostya Serebryany712fc982016-06-07 01:20:26 +0000280 // The fallback caches are used when the thread local caches have been
281 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
282 // be accessed by different threads.
283 StaticSpinMutex FallbackMutex;
284 AllocatorCache FallbackAllocatorCache;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000285 ScudoQuarantineCache FallbackQuarantineCache;
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000286 ScudoPrng FallbackPrng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000287
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000288 u32 QuarantineChunksUpToSize;
289
Kostya Serebryany712fc982016-06-07 01:20:26 +0000290 bool DeallocationTypeMismatch;
291 bool ZeroContents;
292 bool DeleteSizeMismatch;
293
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000294 explicit ScudoAllocator(LinkerInitialized)
Kostya Serebryany712fc982016-06-07 01:20:26 +0000295 : AllocatorQuarantine(LINKER_INITIALIZED),
296 FallbackQuarantineCache(LINKER_INITIALIZED) {}
297
298 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000299 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000300 // case of the Secondary allocator, it takes care of alignment and the
301 // offset will always be 0. In the case of the Primary, the worst case
302 // scenario happens in the last size class, when the backend allocation
303 // would already be aligned on the requested alignment, which would happen
304 // to be the maximum alignment that would fit in that size class. As a
305 // result, the maximum offset will be at most the maximum alignment for the
306 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000307 UnpackedHeader Header = {};
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000308 uptr MaxPrimaryAlignment =
309 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
310 uptr MaxOffset =
311 (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000312 Header.Offset = MaxOffset;
313 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000314 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
315 "header\n");
316 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000317 // Verify that we can fit the maximum size or amount of unused bytes in the
318 // header. Given that the Secondary fits the allocation to a page, the worst
319 // case scenario happens in the Primary. It will depend on the second to
320 // last and last class sizes, as well as the dynamic base for the Primary.
321 // The following is an over-approximation that works for our needs.
322 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
323 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
324 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000325 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
326 "the header\n");
327 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000328
329 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
330 DeleteSizeMismatch = Options.DeleteSizeMismatch;
331 ZeroContents = Options.ZeroContents;
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000332 SetAllocatorMayReturnNull(Options.MayReturnNull);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000333 BackendAllocator.init(Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000334 AllocatorQuarantine.Init(
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000335 static_cast<uptr>(Options.QuarantineSizeKb) << 10,
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000336 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000337 QuarantineChunksUpToSize = Options.QuarantineChunksUpToSize;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000338 GlobalPrng.init();
339 Cookie = GlobalPrng.getU64();
340 BackendAllocator.initCache(&FallbackAllocatorCache);
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000341 FallbackPrng.init();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000342 }
343
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000344 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000345 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000346 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000347 if (UNLIKELY(!UserPtr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000348 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000349 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
350 if (!IsAligned(UserBeg, MinAlignment))
351 return false;
352 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000353 }
354
Kostya Serebryany712fc982016-06-07 01:20:26 +0000355 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000356 void *allocate(uptr Size, uptr Alignment, AllocType Type,
357 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000358 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000359 if (UNLIKELY(Alignment > MaxAlignment))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000360 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000361 if (UNLIKELY(Alignment < MinAlignment))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000362 Alignment = MinAlignment;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000363 if (UNLIKELY(Size >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000364 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000365 if (UNLIKELY(Size == 0))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000366 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000367
368 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000369 uptr AlignedSize = (Alignment > MinAlignment) ?
370 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000371 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000372 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000373
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000374 // Primary and Secondary backed allocations have a different treatment. We
375 // deal with alignment requirements of Primary serviced allocations here,
376 // but the Secondary will take care of its own alignment needs.
377 bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000378
379 void *Ptr;
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000380 u8 Salt;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000381 uptr AllocSize;
382 if (FromPrimary) {
383 AllocSize = AlignedSize;
384 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
385 if (LIKELY(ThreadContext)) {
386 Salt = getPrng(ThreadContext)->getU8();
387 Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext),
388 AllocSize);
389 ThreadContext->unlock();
390 } else {
391 SpinMutexLock l(&FallbackMutex);
392 Salt = FallbackPrng.getU8();
393 Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache,
394 AllocSize);
395 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000396 } else {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000397 {
398 SpinMutexLock l(&GlobalPrngMutex);
399 Salt = GlobalPrng.getU8();
400 }
401 AllocSize = NeededSize;
402 Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000403 }
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000404 if (UNLIKELY(!Ptr))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000405 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000406
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000407 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000408 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000409 memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize(
410 Ptr, /*FromPrimary=*/true));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000411
Kostya Serebryany712fc982016-06-07 01:20:26 +0000412 UnpackedHeader Header = {};
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000413 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
414 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000415 if (UNLIKELY(!IsAligned(UserBeg, Alignment))) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000416 // Since the Secondary takes care of alignment, a non-aligned pointer
417 // means it is from the Primary. It is also the only case where the offset
418 // field of the header would be non-zero.
419 CHECK(FromPrimary);
420 UserBeg = RoundUpTo(UserBeg, Alignment);
421 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
422 Header.Offset = Offset >> MinAlignmentLog;
423 }
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000424 CHECK_LE(UserBeg + Size, AllocBeg + AllocSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000425 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000426 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000427 if (FromPrimary) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000428 Header.FromPrimary = 1;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000429 Header.SizeOrUnusedBytes = Size;
430 } else {
431 // The secondary fits the allocations to a page, so the amount of unused
432 // bytes is the difference between the end of the user allocation and the
433 // next page boundary.
434 uptr PageSize = GetPageSizeCached();
435 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
436 if (TrailingBytes)
437 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
438 }
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000439 Header.Salt = Salt;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000440 getScudoChunk(UserBeg)->storeHeader(&Header);
441 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000442 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
443 return UserPtr;
444 }
445
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000446 // Place a chunk in the quarantine or directly deallocate it in the event of
447 // a zero-sized quarantine, or if the size of the chunk is greater than the
448 // quarantine chunk size threshold.
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000449 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
450 uptr Size) {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000451 const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
452 (Size > QuarantineChunksUpToSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000453 if (BypassQuarantine) {
454 Chunk->eraseHeader();
455 void *Ptr = Chunk->getAllocBeg(Header);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000456 if (Header->FromPrimary) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000457 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
458 if (LIKELY(ThreadContext)) {
459 getBackendAllocator().deallocatePrimary(
460 getAllocatorCache(ThreadContext), Ptr);
461 ThreadContext->unlock();
462 } else {
463 SpinMutexLock Lock(&FallbackMutex);
464 getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
465 }
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000466 } else {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000467 getBackendAllocator().deallocateSecondary(Ptr);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000468 }
469 } else {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000470 // If a small memory amount was allocated with a larger alignment, we want
471 // to take that into account. Otherwise the Quarantine would be filled
472 // with tiny chunks, taking a lot of VA memory. This is an approximation
473 // of the usable size, that allows us to not call
474 // GetActuallyAllocatedSize.
475 uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000476 UnpackedHeader NewHeader = *Header;
477 NewHeader.State = ChunkQuarantine;
478 Chunk->compareExchangeHeader(&NewHeader, Header);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000479 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000480 if (LIKELY(ThreadContext)) {
481 AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
482 QuarantineCallback(
483 getAllocatorCache(ThreadContext)),
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000484 Chunk, EstimatedSize);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000485 ThreadContext->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000486 } else {
487 SpinMutexLock l(&FallbackMutex);
488 AllocatorQuarantine.Put(&FallbackQuarantineCache,
489 QuarantineCallback(&FallbackAllocatorCache),
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000490 Chunk, EstimatedSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000491 }
492 }
493 }
494
Kostya Serebryany712fc982016-06-07 01:20:26 +0000495 // Deallocates a Chunk, which means adding it to the delayed free list (or
496 // Quarantine).
497 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky040c2112017-09-11 19:59:40 +0000498 // For a deallocation, we only ensure minimal initialization, meaning thread
499 // local data will be left uninitialized for now (when using ELF TLS). The
500 // fallback cache will be used instead. This is a workaround for a situation
501 // where the only heap operation performed in a thread would be a free past
502 // the TLS destructors, ending up in initialized thread specific data never
503 // being destroyed properly. Any other heap operation will do a full init.
504 initThreadMaybe(/*MinimalInit=*/true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000505 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000506 if (UNLIKELY(!UserPtr))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000507 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000508 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
509 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000510 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
511 "aligned at address %p\n", UserPtr);
512 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000513 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000514 UnpackedHeader Header;
515 Chunk->loadHeader(&Header);
516 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000517 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000518 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000519 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000520 if (DeallocationTypeMismatch) {
521 // The deallocation type has to match the allocation one.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000522 if (Header.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000523 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000524 if (Header.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000525 dieWithMessage("ERROR: allocation type mismatch when deallocating "
526 "address %p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000527 }
528 }
529 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000530 uptr Size = Header.FromPrimary ? Header.SizeOrUnusedBytes :
531 Chunk->getUsableSize(&Header) - Header.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000532 if (DeleteSizeMismatch) {
533 if (DeleteSize && DeleteSize != Size) {
534 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000535 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000536 }
537 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000538 quarantineOrDeallocateChunk(Chunk, &Header, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000539 }
540
Kostya Serebryany712fc982016-06-07 01:20:26 +0000541 // Reallocates a chunk. We can save on a new allocation if the new requested
542 // size still fits in the chunk.
543 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000544 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000545 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
546 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
547 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
548 "aligned at address %p\n", OldPtr);
549 }
550 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000551 UnpackedHeader OldHeader;
552 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000553 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000554 dieWithMessage("ERROR: invalid chunk state when reallocating address "
555 "%p\n", OldPtr);
556 }
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000557 if (DeallocationTypeMismatch) {
558 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
559 dieWithMessage("ERROR: allocation type mismatch when reallocating "
560 "address %p\n", OldPtr);
561 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000562 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000563 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000564 // The new size still fits in the current chunk, and the size difference
565 // is reasonable.
566 if (NewSize <= UsableSize &&
567 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000568 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000569 NewHeader.SizeOrUnusedBytes =
570 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000571 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
572 return OldPtr;
573 }
574 // Otherwise, we have to allocate a new chunk and copy the contents of the
575 // old one.
576 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
577 if (NewPtr) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000578 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
579 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000580 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000581 quarantineOrDeallocateChunk(Chunk, &OldHeader, OldSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000582 }
583 return NewPtr;
584 }
585
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000586 // Helper function that returns the actual usable size of a chunk.
587 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000588 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000589 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000590 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000591 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
592 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000593 UnpackedHeader Header;
594 Chunk->loadHeader(&Header);
595 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000596 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000597 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
598 Ptr);
599 }
600 return Chunk->getUsableSize(&Header);
601 }
602
Kostya Serebryany712fc982016-06-07 01:20:26 +0000603 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000604 initThreadMaybe();
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000605 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000606 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000607 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000608 }
609
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000610 void commitBack(ScudoThreadContext *ThreadContext) {
611 AllocatorCache *Cache = getAllocatorCache(ThreadContext);
612 AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
613 QuarantineCallback(Cache));
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000614 BackendAllocator.destroyCache(Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000615 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000616
617 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000618 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000619 uptr stats[AllocatorStatCount];
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000620 BackendAllocator.getStats(stats);
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000621 return stats[StatType];
622 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000623};
624
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000625static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000626
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000627static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000628 return Instance.BackendAllocator;
629}
630
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000631static void initScudoInternal(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000632 Instance.init(Options);
633}
634
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000635void ScudoThreadContext::init() {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000636 getBackendAllocator().initCache(&Cache);
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000637 Prng.init();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000638 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
639}
640
641void ScudoThreadContext::commitBack() {
642 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000643}
644
645void *scudoMalloc(uptr Size, AllocType Type) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000646 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000647}
648
649void scudoFree(void *Ptr, AllocType Type) {
650 Instance.deallocate(Ptr, 0, Type);
651}
652
653void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
654 Instance.deallocate(Ptr, Size, Type);
655}
656
657void *scudoRealloc(void *Ptr, uptr Size) {
658 if (!Ptr)
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000659 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000660 if (Size == 0) {
661 Instance.deallocate(Ptr, 0, FromMalloc);
662 return nullptr;
663 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000664 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000665}
666
667void *scudoCalloc(uptr NMemB, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000668 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000669}
670
671void *scudoValloc(uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000672 return SetErrnoOnNull(
673 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000674}
675
Kostya Serebryany712fc982016-06-07 01:20:26 +0000676void *scudoPvalloc(uptr Size) {
677 uptr PageSize = GetPageSizeCached();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000678 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
679 errno = errno_ENOMEM;
680 return ScudoAllocator::FailureHandler::OnBadRequest();
681 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000682 // pvalloc(0) should allocate one page.
683 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000684 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000685}
686
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000687void *scudoMemalign(uptr Alignment, uptr Size) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000688 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
689 errno = errno_EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000690 return ScudoAllocator::FailureHandler::OnBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000691 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000692 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000693}
694
Kostya Serebryany712fc982016-06-07 01:20:26 +0000695int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000696 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000697 ScudoAllocator::FailureHandler::OnBadRequest();
698 return errno_EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000699 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000700 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000701 if (UNLIKELY(!Ptr))
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000702 return errno_ENOMEM;
703 *MemPtr = Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000704 return 0;
705}
706
707void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000708 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000709 errno = errno_EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000710 return ScudoAllocator::FailureHandler::OnBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000711 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000712 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000713}
714
715uptr scudoMallocUsableSize(void *Ptr) {
716 return Instance.getUsableSize(Ptr);
717}
718
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000719} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000720
721using namespace __scudo;
722
723// MallocExtension helper functions
724
725uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000726 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000727}
728
729uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000730 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000731}
732
733uptr __sanitizer_get_free_bytes() {
734 return 1;
735}
736
737uptr __sanitizer_get_unmapped_bytes() {
738 return 1;
739}
740
741uptr __sanitizer_get_estimated_allocated_size(uptr size) {
742 return size;
743}
744
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000745int __sanitizer_get_ownership(const void *Ptr) {
746 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000747}
748
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000749uptr __sanitizer_get_allocated_size(const void *Ptr) {
750 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000751}