blob: e1758568b530c995e43b2f6ec25bb928aae9a79b [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000019#include "scudo_tls.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000020#include "scudo_utils.h"
21
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000022#include "sanitizer_common/sanitizer_allocator_checks.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000023#include "sanitizer_common/sanitizer_allocator_interface.h"
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +000024#include "sanitizer_common/sanitizer_errno.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000025#include "sanitizer_common/sanitizer_quarantine.h"
26
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000027#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000028
29namespace __scudo {
30
Kostya Serebryany712fc982016-06-07 01:20:26 +000031// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000032static uptr Cookie;
33
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034// We default to software CRC32 if the alternatives are not supported, either
35// at compilation or at runtime.
36static atomic_uint8_t HashAlgorithm = { CRC32Software };
37
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000038INLINE u32 computeCRC32(uptr Crc, uptr Value, uptr *Array, uptr ArraySize) {
39 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
40 // as opposed to only for scudo_crc32.cpp. This means that other hardware
41 // specific instructions were likely emitted at other places, and as a
42 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000043#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000044 Crc = CRC32_INTRINSIC(Crc, Value);
45 for (uptr i = 0; i < ArraySize; i++)
46 Crc = CRC32_INTRINSIC(Crc, Array[i]);
47 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000048#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000049 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
50 Crc = computeHardwareCRC32(Crc, Value);
51 for (uptr i = 0; i < ArraySize; i++)
52 Crc = computeHardwareCRC32(Crc, Array[i]);
53 return Crc;
54 }
55 Crc = computeSoftwareCRC32(Crc, Value);
56 for (uptr i = 0; i < ArraySize; i++)
57 Crc = computeSoftwareCRC32(Crc, Array[i]);
58 return Crc;
59#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000060}
Kostya Serebryany712fc982016-06-07 01:20:26 +000061
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000062static ScudoBackendAllocator &getBackendAllocator();
63
Kostya Serebryany712fc982016-06-07 01:20:26 +000064struct ScudoChunk : UnpackedHeader {
65 // We can't use the offset member of the chunk itself, as we would double
66 // fetch it without any warranty that it wouldn't have been tampered. To
67 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000068 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000069 return reinterpret_cast<void *>(
70 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
71 }
72
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000073 // Returns the usable size for a chunk, meaning the amount of bytes from the
74 // beginning of the user data to the end of the backend allocated chunk.
75 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +000076 uptr Size =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +000077 getBackendAllocator().getActuallyAllocatedSize(getAllocBeg(Header),
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +000078 Header->FromPrimary);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000079 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000080 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000081 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
82 }
83
84 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000085 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000086 UnpackedHeader ZeroChecksumHeader = *Header;
87 ZeroChecksumHeader.Checksum = 0;
88 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
89 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000090 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HeaderHolder,
91 ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000092 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000093 }
94
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000095 // Checks the validity of a chunk by verifying its checksum. It doesn't
96 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000097 bool isValid() {
98 UnpackedHeader NewUnpackedHeader;
99 const AtomicPackedHeader *AtomicHeader =
100 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000101 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000102 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
103 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000104 }
105
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000106 // Nulls out a chunk header. When returning the chunk to the backend, there
107 // is no need to store a valid ChunkAvailable header, as this would be
108 // computationally expensive. Zeroing out serves the same purpose by making
109 // the header invalid. In the extremely rare event where 0 would be a valid
110 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
111 COMPILER_CHECK(ChunkAvailable == 0);
112 void eraseHeader() {
113 PackedHeader NullPackedHeader = 0;
114 AtomicPackedHeader *AtomicHeader =
115 reinterpret_cast<AtomicPackedHeader *>(this);
116 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
117 }
118
Kostya Serebryany712fc982016-06-07 01:20:26 +0000119 // Loads and unpacks the header, verifying the checksum in the process.
120 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
121 const AtomicPackedHeader *AtomicHeader =
122 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000123 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000124 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000125 if (UNLIKELY(NewUnpackedHeader->Checksum !=
126 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000127 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
128 }
129 }
130
131 // Packs and stores the header, computing the checksum in the process.
132 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000133 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000134 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
135 AtomicPackedHeader *AtomicHeader =
136 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000137 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000138 }
139
140 // Packs and stores the header, computing the checksum in the process. We
141 // compare the current header with the expected provided one to ensure that
142 // we are not being raced by a corruption occurring in another thread.
143 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
144 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000145 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000146 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
147 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
148 AtomicPackedHeader *AtomicHeader =
149 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000150 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
151 &OldPackedHeader,
152 NewPackedHeader,
153 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000154 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
155 }
156 }
157};
158
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000159ScudoChunk *getScudoChunk(uptr UserBeg) {
160 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
161}
162
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000163struct AllocatorOptions {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000164 u32 QuarantineSizeKb;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000165 u32 ThreadLocalQuarantineSizeKb;
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000166 u32 QuarantineChunksUpToSize;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000167 bool MayReturnNull;
168 s32 ReleaseToOSIntervalMs;
169 bool DeallocationTypeMismatch;
170 bool DeleteSizeMismatch;
171 bool ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000172
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000173 void setFrom(const Flags *f, const CommonFlags *cf);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000174};
Kostya Serebryany712fc982016-06-07 01:20:26 +0000175
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000176void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
177 MayReturnNull = cf->allocator_may_return_null;
178 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000179 QuarantineSizeKb = f->QuarantineSizeKb;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000180 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000181 QuarantineChunksUpToSize = f->QuarantineChunksUpToSize;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000182 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
183 DeleteSizeMismatch = f->DeleteSizeMismatch;
184 ZeroContents = f->ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000185}
186
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000187static void initScudoInternal(const AllocatorOptions &Options);
188
189static bool ScudoInitIsRunning = false;
190
191void initScudo() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000192 SanitizerToolName = "Scudo";
193 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
194 ScudoInitIsRunning = true;
195
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +0000196 // Check if hardware CRC32 is supported in the binary and by the platform, if
197 // so, opt for the CRC32 hardware version of the checksum.
198 if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000199 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000200
Kostya Serebryany712fc982016-06-07 01:20:26 +0000201 initFlags();
202
203 AllocatorOptions Options;
204 Options.setFrom(getFlags(), common_flags());
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000205 initScudoInternal(Options);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000206
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000207 // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000208
Kostya Serebryany712fc982016-06-07 01:20:26 +0000209 ScudoInitIsRunning = false;
210}
211
Kostya Serebryany712fc982016-06-07 01:20:26 +0000212struct QuarantineCallback {
213 explicit QuarantineCallback(AllocatorCache *Cache)
214 : Cache_(Cache) {}
215
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000216 // Chunk recycling function, returns a quarantined chunk to the backend,
217 // first making sure it hasn't been tampered with.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000218 void Recycle(ScudoChunk *Chunk) {
219 UnpackedHeader Header;
220 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000221 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000222 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
223 Chunk);
224 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000225 Chunk->eraseHeader();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000226 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000227 if (Header.FromPrimary)
228 getBackendAllocator().deallocatePrimary(Cache_, Ptr);
229 else
230 getBackendAllocator().deallocateSecondary(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000231 }
232
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000233 // Internal quarantine allocation and deallocation functions. We first check
234 // that the batches are indeed serviced by the Primary.
235 // TODO(kostyak): figure out the best way to protect the batches.
236 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000237 void *Allocate(uptr Size) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000238 return getBackendAllocator().allocatePrimary(Cache_, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000239 }
240
241 void Deallocate(void *Ptr) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000242 getBackendAllocator().deallocatePrimary(Cache_, Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000243 }
244
245 AllocatorCache *Cache_;
246};
247
248typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000249typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000250COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
251 sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000252
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000253AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
254 return &ThreadContext->Cache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000255}
256
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000257ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
258 return reinterpret_cast<
259 ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
260}
261
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000262ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000263 return &ThreadContext->Prng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000264}
265
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000266struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000267 static const uptr MaxAllowedMallocSize =
268 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000269
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000270 typedef ReturnNullOrDieOnFailure FailureHandler;
271
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000272 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000273 ScudoQuarantine AllocatorQuarantine;
274
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000275 StaticSpinMutex GlobalPrngMutex;
276 ScudoPrng GlobalPrng;
277
Kostya Serebryany712fc982016-06-07 01:20:26 +0000278 // The fallback caches are used when the thread local caches have been
279 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
280 // be accessed by different threads.
281 StaticSpinMutex FallbackMutex;
282 AllocatorCache FallbackAllocatorCache;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000283 ScudoQuarantineCache FallbackQuarantineCache;
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000284 ScudoPrng FallbackPrng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000285
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000286 u32 QuarantineChunksUpToSize;
287
Kostya Serebryany712fc982016-06-07 01:20:26 +0000288 bool DeallocationTypeMismatch;
289 bool ZeroContents;
290 bool DeleteSizeMismatch;
291
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000292 explicit ScudoAllocator(LinkerInitialized)
Kostya Serebryany712fc982016-06-07 01:20:26 +0000293 : AllocatorQuarantine(LINKER_INITIALIZED),
294 FallbackQuarantineCache(LINKER_INITIALIZED) {}
295
296 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000297 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000298 // case of the Secondary allocator, it takes care of alignment and the
299 // offset will always be 0. In the case of the Primary, the worst case
300 // scenario happens in the last size class, when the backend allocation
301 // would already be aligned on the requested alignment, which would happen
302 // to be the maximum alignment that would fit in that size class. As a
303 // result, the maximum offset will be at most the maximum alignment for the
304 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000305 UnpackedHeader Header = {};
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000306 uptr MaxPrimaryAlignment =
307 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
308 uptr MaxOffset =
309 (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000310 Header.Offset = MaxOffset;
311 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000312 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
313 "header\n");
314 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000315 // Verify that we can fit the maximum size or amount of unused bytes in the
316 // header. Given that the Secondary fits the allocation to a page, the worst
317 // case scenario happens in the Primary. It will depend on the second to
318 // last and last class sizes, as well as the dynamic base for the Primary.
319 // The following is an over-approximation that works for our needs.
320 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
321 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
322 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000323 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
324 "the header\n");
325 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000326
327 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
328 DeleteSizeMismatch = Options.DeleteSizeMismatch;
329 ZeroContents = Options.ZeroContents;
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000330 SetAllocatorMayReturnNull(Options.MayReturnNull);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000331 BackendAllocator.init(Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000332 AllocatorQuarantine.Init(
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000333 static_cast<uptr>(Options.QuarantineSizeKb) << 10,
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000334 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000335 QuarantineChunksUpToSize = Options.QuarantineChunksUpToSize;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000336 GlobalPrng.init();
337 Cookie = GlobalPrng.getU64();
338 BackendAllocator.initCache(&FallbackAllocatorCache);
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000339 FallbackPrng.init();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000340 }
341
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000342 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000343 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000344 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000345 if (UNLIKELY(!UserPtr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000346 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000347 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
348 if (!IsAligned(UserBeg, MinAlignment))
349 return false;
350 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000351 }
352
Kostya Serebryany712fc982016-06-07 01:20:26 +0000353 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000354 void *allocate(uptr Size, uptr Alignment, AllocType Type,
355 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000356 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000357 if (UNLIKELY(Alignment > MaxAlignment))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000358 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000359 if (UNLIKELY(Alignment < MinAlignment))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000360 Alignment = MinAlignment;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000361 if (UNLIKELY(Size >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000362 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000363 if (UNLIKELY(Size == 0))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000364 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000365
366 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000367 uptr AlignedSize = (Alignment > MinAlignment) ?
368 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000369 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000370 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000371
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000372 // Primary and Secondary backed allocations have a different treatment. We
373 // deal with alignment requirements of Primary serviced allocations here,
374 // but the Secondary will take care of its own alignment needs.
375 bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000376
377 void *Ptr;
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000378 u8 Salt;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000379 uptr AllocSize;
380 if (FromPrimary) {
381 AllocSize = AlignedSize;
382 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
383 if (LIKELY(ThreadContext)) {
384 Salt = getPrng(ThreadContext)->getU8();
385 Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext),
386 AllocSize);
387 ThreadContext->unlock();
388 } else {
389 SpinMutexLock l(&FallbackMutex);
390 Salt = FallbackPrng.getU8();
391 Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache,
392 AllocSize);
393 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000394 } else {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000395 {
396 SpinMutexLock l(&GlobalPrngMutex);
397 Salt = GlobalPrng.getU8();
398 }
399 AllocSize = NeededSize;
400 Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000401 }
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000402 if (UNLIKELY(!Ptr))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000403 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000404
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000405 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000406 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000407 memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize(
408 Ptr, /*FromPrimary=*/true));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000409
Kostya Serebryany712fc982016-06-07 01:20:26 +0000410 UnpackedHeader Header = {};
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000411 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
412 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000413 if (UNLIKELY(!IsAligned(UserBeg, Alignment))) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000414 // Since the Secondary takes care of alignment, a non-aligned pointer
415 // means it is from the Primary. It is also the only case where the offset
416 // field of the header would be non-zero.
417 CHECK(FromPrimary);
418 UserBeg = RoundUpTo(UserBeg, Alignment);
419 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
420 Header.Offset = Offset >> MinAlignmentLog;
421 }
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000422 CHECK_LE(UserBeg + Size, AllocBeg + AllocSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000423 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000424 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000425 if (FromPrimary) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000426 Header.FromPrimary = 1;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000427 Header.SizeOrUnusedBytes = Size;
428 } else {
429 // The secondary fits the allocations to a page, so the amount of unused
430 // bytes is the difference between the end of the user allocation and the
431 // next page boundary.
432 uptr PageSize = GetPageSizeCached();
433 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
434 if (TrailingBytes)
435 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
436 }
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000437 Header.Salt = Salt;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000438 getScudoChunk(UserBeg)->storeHeader(&Header);
439 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000440 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
441 return UserPtr;
442 }
443
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000444 // Place a chunk in the quarantine or directly deallocate it in the event of
445 // a zero-sized quarantine, or if the size of the chunk is greater than the
446 // quarantine chunk size threshold.
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000447 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
448 uptr Size) {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000449 const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
450 (Size > QuarantineChunksUpToSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000451 if (BypassQuarantine) {
452 Chunk->eraseHeader();
453 void *Ptr = Chunk->getAllocBeg(Header);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000454 if (Header->FromPrimary) {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000455 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
456 if (LIKELY(ThreadContext)) {
457 getBackendAllocator().deallocatePrimary(
458 getAllocatorCache(ThreadContext), Ptr);
459 ThreadContext->unlock();
460 } else {
461 SpinMutexLock Lock(&FallbackMutex);
462 getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
463 }
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000464 } else {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000465 getBackendAllocator().deallocateSecondary(Ptr);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000466 }
467 } else {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000468 // If a small memory amount was allocated with a larger alignment, we want
469 // to take that into account. Otherwise the Quarantine would be filled
470 // with tiny chunks, taking a lot of VA memory. This is an approximation
471 // of the usable size, that allows us to not call
472 // GetActuallyAllocatedSize.
473 uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000474 UnpackedHeader NewHeader = *Header;
475 NewHeader.State = ChunkQuarantine;
476 Chunk->compareExchangeHeader(&NewHeader, Header);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000477 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000478 if (LIKELY(ThreadContext)) {
479 AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
480 QuarantineCallback(
481 getAllocatorCache(ThreadContext)),
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000482 Chunk, EstimatedSize);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000483 ThreadContext->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000484 } else {
485 SpinMutexLock l(&FallbackMutex);
486 AllocatorQuarantine.Put(&FallbackQuarantineCache,
487 QuarantineCallback(&FallbackAllocatorCache),
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000488 Chunk, EstimatedSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000489 }
490 }
491 }
492
Kostya Serebryany712fc982016-06-07 01:20:26 +0000493 // Deallocates a Chunk, which means adding it to the delayed free list (or
494 // Quarantine).
495 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000496 initThreadMaybe();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000497 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000498 if (UNLIKELY(!UserPtr))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000499 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000500 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
501 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000502 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
503 "aligned at address %p\n", UserPtr);
504 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000505 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000506 UnpackedHeader Header;
507 Chunk->loadHeader(&Header);
508 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000509 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000510 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000511 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000512 if (DeallocationTypeMismatch) {
513 // The deallocation type has to match the allocation one.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000514 if (Header.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000515 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000516 if (Header.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000517 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000518 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000519 }
520 }
521 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000522 uptr Size = Header.FromPrimary ? Header.SizeOrUnusedBytes :
523 Chunk->getUsableSize(&Header) - Header.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000524 if (DeleteSizeMismatch) {
525 if (DeleteSize && DeleteSize != Size) {
526 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000527 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000528 }
529 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000530 quarantineOrDeallocateChunk(Chunk, &Header, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000531 }
532
Kostya Serebryany712fc982016-06-07 01:20:26 +0000533 // Reallocates a chunk. We can save on a new allocation if the new requested
534 // size still fits in the chunk.
535 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000536 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000537 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
538 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
539 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
540 "aligned at address %p\n", OldPtr);
541 }
542 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000543 UnpackedHeader OldHeader;
544 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000545 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000546 dieWithMessage("ERROR: invalid chunk state when reallocating address "
547 "%p\n", OldPtr);
548 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000549 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000550 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000551 OldPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000552 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000553 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000554 // The new size still fits in the current chunk, and the size difference
555 // is reasonable.
556 if (NewSize <= UsableSize &&
557 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000558 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000559 NewHeader.SizeOrUnusedBytes =
560 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000561 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
562 return OldPtr;
563 }
564 // Otherwise, we have to allocate a new chunk and copy the contents of the
565 // old one.
566 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
567 if (NewPtr) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000568 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
569 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000570 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000571 quarantineOrDeallocateChunk(Chunk, &OldHeader, OldSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000572 }
573 return NewPtr;
574 }
575
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000576 // Helper function that returns the actual usable size of a chunk.
577 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000578 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000579 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000580 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000581 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
582 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000583 UnpackedHeader Header;
584 Chunk->loadHeader(&Header);
585 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000586 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000587 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
588 Ptr);
589 }
590 return Chunk->getUsableSize(&Header);
591 }
592
Kostya Serebryany712fc982016-06-07 01:20:26 +0000593 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000594 initThreadMaybe();
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000595 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000596 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000597 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000598 }
599
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000600 void commitBack(ScudoThreadContext *ThreadContext) {
601 AllocatorCache *Cache = getAllocatorCache(ThreadContext);
602 AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
603 QuarantineCallback(Cache));
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000604 BackendAllocator.destroyCache(Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000605 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000606
607 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000608 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000609 uptr stats[AllocatorStatCount];
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000610 BackendAllocator.getStats(stats);
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000611 return stats[StatType];
612 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000613};
614
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000615static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000616
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000617static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000618 return Instance.BackendAllocator;
619}
620
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000621static void initScudoInternal(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000622 Instance.init(Options);
623}
624
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000625void ScudoThreadContext::init() {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000626 getBackendAllocator().initCache(&Cache);
Kostya Kortchinsky00582562017-07-12 15:29:08 +0000627 Prng.init();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000628 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
629}
630
631void ScudoThreadContext::commitBack() {
632 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000633}
634
635void *scudoMalloc(uptr Size, AllocType Type) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000636 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000637}
638
639void scudoFree(void *Ptr, AllocType Type) {
640 Instance.deallocate(Ptr, 0, Type);
641}
642
643void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
644 Instance.deallocate(Ptr, Size, Type);
645}
646
647void *scudoRealloc(void *Ptr, uptr Size) {
648 if (!Ptr)
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000649 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000650 if (Size == 0) {
651 Instance.deallocate(Ptr, 0, FromMalloc);
652 return nullptr;
653 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000654 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000655}
656
657void *scudoCalloc(uptr NMemB, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000658 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000659}
660
661void *scudoValloc(uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000662 return SetErrnoOnNull(
663 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000664}
665
Kostya Serebryany712fc982016-06-07 01:20:26 +0000666void *scudoPvalloc(uptr Size) {
667 uptr PageSize = GetPageSizeCached();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000668 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
669 errno = errno_ENOMEM;
670 return ScudoAllocator::FailureHandler::OnBadRequest();
671 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000672 // pvalloc(0) should allocate one page.
673 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000674 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000675}
676
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000677void *scudoMemalign(uptr Alignment, uptr Size) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000678 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
679 errno = errno_EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000680 return ScudoAllocator::FailureHandler::OnBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000681 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000682 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000683}
684
Kostya Serebryany712fc982016-06-07 01:20:26 +0000685int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000686 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000687 ScudoAllocator::FailureHandler::OnBadRequest();
688 return errno_EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000689 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000690 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000691 if (UNLIKELY(!Ptr))
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000692 return errno_ENOMEM;
693 *MemPtr = Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000694 return 0;
695}
696
697void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000698 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000699 errno = errno_EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000700 return ScudoAllocator::FailureHandler::OnBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000701 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000702 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000703}
704
705uptr scudoMallocUsableSize(void *Ptr) {
706 return Instance.getUsableSize(Ptr);
707}
708
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000709} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000710
711using namespace __scudo;
712
713// MallocExtension helper functions
714
715uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000716 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000717}
718
719uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000720 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000721}
722
723uptr __sanitizer_get_free_bytes() {
724 return 1;
725}
726
727uptr __sanitizer_get_unmapped_bytes() {
728 return 1;
729}
730
731uptr __sanitizer_get_estimated_allocated_size(uptr size) {
732 return size;
733}
734
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000735int __sanitizer_get_ownership(const void *Ptr) {
736 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000737}
738
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000739uptr __sanitizer_get_allocated_size(const void *Ptr) {
740 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000741}