blob: 00fa192181ade4426b6d5a92c52cb261c76b1180 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000019#include "scudo_tls.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000020#include "scudo_utils.h"
21
22#include "sanitizer_common/sanitizer_allocator_interface.h"
23#include "sanitizer_common/sanitizer_quarantine.h"
24
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +000025#include <errno.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000026#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000027
28namespace __scudo {
29
Kostya Serebryany712fc982016-06-07 01:20:26 +000030// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000031static uptr Cookie;
32
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000033// We default to software CRC32 if the alternatives are not supported, either
34// at compilation or at runtime.
35static atomic_uint8_t HashAlgorithm = { CRC32Software };
36
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000037INLINE u32 computeCRC32(uptr Crc, uptr Value, uptr *Array, uptr ArraySize) {
38 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
39 // as opposed to only for scudo_crc32.cpp. This means that other hardware
40 // specific instructions were likely emitted at other places, and as a
41 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000042#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000043 Crc = CRC32_INTRINSIC(Crc, Value);
44 for (uptr i = 0; i < ArraySize; i++)
45 Crc = CRC32_INTRINSIC(Crc, Array[i]);
46 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000047#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000048 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
49 Crc = computeHardwareCRC32(Crc, Value);
50 for (uptr i = 0; i < ArraySize; i++)
51 Crc = computeHardwareCRC32(Crc, Array[i]);
52 return Crc;
53 }
54 Crc = computeSoftwareCRC32(Crc, Value);
55 for (uptr i = 0; i < ArraySize; i++)
56 Crc = computeSoftwareCRC32(Crc, Array[i]);
57 return Crc;
58#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000059}
Kostya Serebryany712fc982016-06-07 01:20:26 +000060
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000061static ScudoBackendAllocator &getBackendAllocator();
62
Kostya Serebryany712fc982016-06-07 01:20:26 +000063struct ScudoChunk : UnpackedHeader {
64 // We can't use the offset member of the chunk itself, as we would double
65 // fetch it without any warranty that it wouldn't have been tampered. To
66 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000067 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000068 return reinterpret_cast<void *>(
69 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
70 }
71
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000072 // Returns the usable size for a chunk, meaning the amount of bytes from the
73 // beginning of the user data to the end of the backend allocated chunk.
74 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +000075 uptr Size =
76 getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header),
77 Header->FromPrimary);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000078 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000079 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000080 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
81 }
82
83 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000084 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000085 UnpackedHeader ZeroChecksumHeader = *Header;
86 ZeroChecksumHeader.Checksum = 0;
87 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
88 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000089 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HeaderHolder,
90 ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000091 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000092 }
93
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000094 // Checks the validity of a chunk by verifying its checksum. It doesn't
95 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000096 bool isValid() {
97 UnpackedHeader NewUnpackedHeader;
98 const AtomicPackedHeader *AtomicHeader =
99 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000100 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000101 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
102 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000103 }
104
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000105 // Nulls out a chunk header. When returning the chunk to the backend, there
106 // is no need to store a valid ChunkAvailable header, as this would be
107 // computationally expensive. Zeroing out serves the same purpose by making
108 // the header invalid. In the extremely rare event where 0 would be a valid
109 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
110 COMPILER_CHECK(ChunkAvailable == 0);
111 void eraseHeader() {
112 PackedHeader NullPackedHeader = 0;
113 AtomicPackedHeader *AtomicHeader =
114 reinterpret_cast<AtomicPackedHeader *>(this);
115 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
116 }
117
Kostya Serebryany712fc982016-06-07 01:20:26 +0000118 // Loads and unpacks the header, verifying the checksum in the process.
119 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
120 const AtomicPackedHeader *AtomicHeader =
121 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000122 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000123 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000124 if (UNLIKELY(NewUnpackedHeader->Checksum !=
125 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000126 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
127 }
128 }
129
130 // Packs and stores the header, computing the checksum in the process.
131 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000132 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000133 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
134 AtomicPackedHeader *AtomicHeader =
135 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000136 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000137 }
138
139 // Packs and stores the header, computing the checksum in the process. We
140 // compare the current header with the expected provided one to ensure that
141 // we are not being raced by a corruption occurring in another thread.
142 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
143 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000144 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000145 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
146 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
147 AtomicPackedHeader *AtomicHeader =
148 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000149 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
150 &OldPackedHeader,
151 NewPackedHeader,
152 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000153 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
154 }
155 }
156};
157
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000158ScudoChunk *getScudoChunk(uptr UserBeg) {
159 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
160}
161
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000162struct AllocatorOptions {
163 u32 QuarantineSizeMb;
164 u32 ThreadLocalQuarantineSizeKb;
165 bool MayReturnNull;
166 s32 ReleaseToOSIntervalMs;
167 bool DeallocationTypeMismatch;
168 bool DeleteSizeMismatch;
169 bool ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000170
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000171 void setFrom(const Flags *f, const CommonFlags *cf);
172 void copyTo(Flags *f, CommonFlags *cf) const;
173};
Kostya Serebryany712fc982016-06-07 01:20:26 +0000174
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000175void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
176 MayReturnNull = cf->allocator_may_return_null;
177 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
178 QuarantineSizeMb = f->QuarantineSizeMb;
179 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
180 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
181 DeleteSizeMismatch = f->DeleteSizeMismatch;
182 ZeroContents = f->ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000183}
184
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000185void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
186 cf->allocator_may_return_null = MayReturnNull;
187 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
188 f->QuarantineSizeMb = QuarantineSizeMb;
189 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
190 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
191 f->DeleteSizeMismatch = DeleteSizeMismatch;
192 f->ZeroContents = ZeroContents;
193}
194
195static void initScudoInternal(const AllocatorOptions &Options);
196
197static bool ScudoInitIsRunning = false;
198
199void initScudo() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000200 SanitizerToolName = "Scudo";
201 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
202 ScudoInitIsRunning = true;
203
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +0000204 // Check if hardware CRC32 is supported in the binary and by the platform, if
205 // so, opt for the CRC32 hardware version of the checksum.
206 if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000207 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000208
Kostya Serebryany712fc982016-06-07 01:20:26 +0000209 initFlags();
210
211 AllocatorOptions Options;
212 Options.setFrom(getFlags(), common_flags());
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000213 initScudoInternal(Options);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000214
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000215 // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000216
Kostya Serebryany712fc982016-06-07 01:20:26 +0000217 ScudoInitIsRunning = false;
218}
219
Kostya Serebryany712fc982016-06-07 01:20:26 +0000220struct QuarantineCallback {
221 explicit QuarantineCallback(AllocatorCache *Cache)
222 : Cache_(Cache) {}
223
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000224 // Chunk recycling function, returns a quarantined chunk to the backend,
225 // first making sure it hasn't been tampered with.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000226 void Recycle(ScudoChunk *Chunk) {
227 UnpackedHeader Header;
228 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000229 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000230 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
231 Chunk);
232 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000233 Chunk->eraseHeader();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000234 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000235 getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000236 }
237
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000238 // Internal quarantine allocation and deallocation functions. We first check
239 // that the batches are indeed serviced by the Primary.
240 // TODO(kostyak): figure out the best way to protect the batches.
241 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000242 void *Allocate(uptr Size) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000243 return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000244 }
245
246 void Deallocate(void *Ptr) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000247 getBackendAllocator().Deallocate(Cache_, Ptr, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000248 }
249
250 AllocatorCache *Cache_;
251};
252
253typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000254typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000255COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
256 sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000257
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000258AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
259 return &ThreadContext->Cache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000260}
261
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000262ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
263 return reinterpret_cast<
264 ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
265}
266
267Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) {
268 return &ThreadContext->Prng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000269}
270
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000271struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000272 static const uptr MaxAllowedMallocSize =
273 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000274
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000275 typedef ReturnNullOrDieOnFailure FailureHandler;
276
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000277 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000278 ScudoQuarantine AllocatorQuarantine;
279
280 // The fallback caches are used when the thread local caches have been
281 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
282 // be accessed by different threads.
283 StaticSpinMutex FallbackMutex;
284 AllocatorCache FallbackAllocatorCache;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000285 ScudoQuarantineCache FallbackQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000286 Xorshift128Plus FallbackPrng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000287
288 bool DeallocationTypeMismatch;
289 bool ZeroContents;
290 bool DeleteSizeMismatch;
291
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000292 explicit ScudoAllocator(LinkerInitialized)
Kostya Serebryany712fc982016-06-07 01:20:26 +0000293 : AllocatorQuarantine(LINKER_INITIALIZED),
294 FallbackQuarantineCache(LINKER_INITIALIZED) {}
295
296 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000297 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000298 // case of the Secondary allocator, it takes care of alignment and the
299 // offset will always be 0. In the case of the Primary, the worst case
300 // scenario happens in the last size class, when the backend allocation
301 // would already be aligned on the requested alignment, which would happen
302 // to be the maximum alignment that would fit in that size class. As a
303 // result, the maximum offset will be at most the maximum alignment for the
304 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000305 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000306 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000307 SizeClassMap::kMaxSize - MinAlignment);
308 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000309 MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000310 Header.Offset = MaxOffset;
311 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000312 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
313 "header\n");
314 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000315 // Verify that we can fit the maximum size or amount of unused bytes in the
316 // header. Given that the Secondary fits the allocation to a page, the worst
317 // case scenario happens in the Primary. It will depend on the second to
318 // last and last class sizes, as well as the dynamic base for the Primary.
319 // The following is an over-approximation that works for our needs.
320 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
321 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
322 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000323 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
324 "the header\n");
325 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000326
327 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
328 DeleteSizeMismatch = Options.DeleteSizeMismatch;
329 ZeroContents = Options.ZeroContents;
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000330 SetAllocatorMayReturnNull(Options.MayReturnNull);
331 BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000332 AllocatorQuarantine.Init(
333 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
334 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000335 BackendAllocator.InitCache(&FallbackAllocatorCache);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000336 FallbackPrng.initFromURandom();
337 Cookie = FallbackPrng.getNext();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000338 }
339
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000340 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000341 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000342 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000343 if (UNLIKELY(!UserPtr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000344 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000345 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
346 if (!IsAligned(UserBeg, MinAlignment))
347 return false;
348 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000349 }
350
Kostya Serebryany712fc982016-06-07 01:20:26 +0000351 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000352 void *allocate(uptr Size, uptr Alignment, AllocType Type,
353 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000354 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000355 if (UNLIKELY(Alignment > MaxAlignment))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000356 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000357 if (UNLIKELY(Alignment < MinAlignment))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000358 Alignment = MinAlignment;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000359 if (UNLIKELY(Size >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000360 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000361 if (UNLIKELY(Size == 0))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000362 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000363
364 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000365 uptr AlignedSize = (Alignment > MinAlignment) ?
366 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000367 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000368 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000369
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000370 // Primary and Secondary backed allocations have a different treatment. We
371 // deal with alignment requirements of Primary serviced allocations here,
372 // but the Secondary will take care of its own alignment needs.
373 bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000374
375 void *Ptr;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000376 uptr Salt;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000377 uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000378 uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000379 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000380 if (LIKELY(ThreadContext)) {
381 Salt = getPrng(ThreadContext)->getNext();
382 Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000383 AllocationSize, AllocationAlignment,
384 FromPrimary);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000385 ThreadContext->unlock();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000386 } else {
387 SpinMutexLock l(&FallbackMutex);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000388 Salt = FallbackPrng.getNext();
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000389 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize,
390 AllocationAlignment, FromPrimary);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000391 }
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000392 if (UNLIKELY(!Ptr))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000393 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000394
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000395 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000396 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000397 memset(Ptr, 0,
398 BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000399
Kostya Serebryany712fc982016-06-07 01:20:26 +0000400 UnpackedHeader Header = {};
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000401 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
402 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000403 if (UNLIKELY(!IsAligned(UserBeg, Alignment))) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000404 // Since the Secondary takes care of alignment, a non-aligned pointer
405 // means it is from the Primary. It is also the only case where the offset
406 // field of the header would be non-zero.
407 CHECK(FromPrimary);
408 UserBeg = RoundUpTo(UserBeg, Alignment);
409 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
410 Header.Offset = Offset >> MinAlignmentLog;
411 }
412 CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000413 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000414 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000415 if (FromPrimary) {
416 Header.FromPrimary = FromPrimary;
417 Header.SizeOrUnusedBytes = Size;
418 } else {
419 // The secondary fits the allocations to a page, so the amount of unused
420 // bytes is the difference between the end of the user allocation and the
421 // next page boundary.
422 uptr PageSize = GetPageSizeCached();
423 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
424 if (TrailingBytes)
425 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
426 }
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000427 Header.Salt = static_cast<u8>(Salt);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000428 getScudoChunk(UserBeg)->storeHeader(&Header);
429 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000430 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
431 return UserPtr;
432 }
433
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000434 // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
435 // we directly deallocate the chunk, otherwise the flow would lead to the
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000436 // chunk being loaded (and checked) twice, and stored (and checksummed) once,
437 // with no additional security value.
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000438 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
439 uptr Size) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000440 bool FromPrimary = Header->FromPrimary;
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000441 bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
442 if (BypassQuarantine) {
443 Chunk->eraseHeader();
444 void *Ptr = Chunk->getAllocBeg(Header);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000445 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000446 if (LIKELY(ThreadContext)) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000447 getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr,
448 FromPrimary);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000449 ThreadContext->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000450 } else {
451 SpinMutexLock Lock(&FallbackMutex);
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000452 getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr,
453 FromPrimary);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000454 }
455 } else {
456 UnpackedHeader NewHeader = *Header;
457 NewHeader.State = ChunkQuarantine;
458 Chunk->compareExchangeHeader(&NewHeader, Header);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000459 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000460 if (LIKELY(ThreadContext)) {
461 AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
462 QuarantineCallback(
463 getAllocatorCache(ThreadContext)),
464 Chunk, Size);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000465 ThreadContext->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000466 } else {
467 SpinMutexLock l(&FallbackMutex);
468 AllocatorQuarantine.Put(&FallbackQuarantineCache,
469 QuarantineCallback(&FallbackAllocatorCache),
470 Chunk, Size);
471 }
472 }
473 }
474
Kostya Serebryany712fc982016-06-07 01:20:26 +0000475 // Deallocates a Chunk, which means adding it to the delayed free list (or
476 // Quarantine).
477 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000478 initThreadMaybe();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000479 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000480 if (UNLIKELY(!UserPtr))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000481 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000482 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
483 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000484 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
485 "aligned at address %p\n", UserPtr);
486 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000487 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000488 UnpackedHeader OldHeader;
489 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000490 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000491 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000492 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000493 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000494 if (DeallocationTypeMismatch) {
495 // The deallocation type has to match the allocation one.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000496 if (OldHeader.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000497 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000498 if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000499 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000500 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000501 }
502 }
503 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000504 uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
505 Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000506 if (DeleteSizeMismatch) {
507 if (DeleteSize && DeleteSize != Size) {
508 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000509 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000510 }
511 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000512
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000513 // If a small memory amount was allocated with a larger alignment, we want
514 // to take that into account. Otherwise the Quarantine would be filled with
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000515 // tiny chunks, taking a lot of VA memory. This is an approximation of the
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000516 // usable size, that allows us to not call GetActuallyAllocatedSize.
517 uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000518 quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000519 }
520
Kostya Serebryany712fc982016-06-07 01:20:26 +0000521 // Reallocates a chunk. We can save on a new allocation if the new requested
522 // size still fits in the chunk.
523 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000524 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000525 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
526 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
527 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
528 "aligned at address %p\n", OldPtr);
529 }
530 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000531 UnpackedHeader OldHeader;
532 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000533 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000534 dieWithMessage("ERROR: invalid chunk state when reallocating address "
535 "%p\n", OldPtr);
536 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000537 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000538 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000539 OldPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000540 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000541 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000542 // The new size still fits in the current chunk, and the size difference
543 // is reasonable.
544 if (NewSize <= UsableSize &&
545 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000546 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000547 NewHeader.SizeOrUnusedBytes =
548 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000549 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
550 return OldPtr;
551 }
552 // Otherwise, we have to allocate a new chunk and copy the contents of the
553 // old one.
554 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
555 if (NewPtr) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000556 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
557 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000558 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000559 quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000560 }
561 return NewPtr;
562 }
563
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000564 // Helper function that returns the actual usable size of a chunk.
565 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000566 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000567 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000568 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000569 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
570 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000571 UnpackedHeader Header;
572 Chunk->loadHeader(&Header);
573 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000574 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000575 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
576 Ptr);
577 }
578 return Chunk->getUsableSize(&Header);
579 }
580
Kostya Serebryany712fc982016-06-07 01:20:26 +0000581 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000582 initThreadMaybe();
Alex Shlyapnikov346988b2017-06-29 21:54:38 +0000583 if (CheckForCallocOverflow(NMemB, Size))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000584 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000585 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000586 }
587
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000588 void commitBack(ScudoThreadContext *ThreadContext) {
589 AllocatorCache *Cache = getAllocatorCache(ThreadContext);
590 AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
591 QuarantineCallback(Cache));
592 BackendAllocator.DestroyCache(Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000593 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000594
595 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000596 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000597 uptr stats[AllocatorStatCount];
598 BackendAllocator.GetStats(stats);
599 return stats[StatType];
600 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000601};
602
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000603static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000604
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000605static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000606 return Instance.BackendAllocator;
607}
608
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000609static void initScudoInternal(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000610 Instance.init(Options);
611}
612
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000613void ScudoThreadContext::init() {
614 getBackendAllocator().InitCache(&Cache);
615 Prng.initFromURandom();
616 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
617}
618
619void ScudoThreadContext::commitBack() {
620 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000621}
622
623void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000624 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000625}
626
627void scudoFree(void *Ptr, AllocType Type) {
628 Instance.deallocate(Ptr, 0, Type);
629}
630
631void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
632 Instance.deallocate(Ptr, Size, Type);
633}
634
635void *scudoRealloc(void *Ptr, uptr Size) {
636 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000637 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000638 if (Size == 0) {
639 Instance.deallocate(Ptr, 0, FromMalloc);
640 return nullptr;
641 }
642 return Instance.reallocate(Ptr, Size);
643}
644
645void *scudoCalloc(uptr NMemB, uptr Size) {
646 return Instance.calloc(NMemB, Size);
647}
648
649void *scudoValloc(uptr Size) {
650 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
651}
652
Kostya Serebryany712fc982016-06-07 01:20:26 +0000653void *scudoPvalloc(uptr Size) {
654 uptr PageSize = GetPageSizeCached();
655 Size = RoundUpTo(Size, PageSize);
656 if (Size == 0) {
657 // pvalloc(0) should allocate one page.
658 Size = PageSize;
659 }
660 return Instance.allocate(Size, PageSize, FromMemalign);
661}
662
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000663void *scudoMemalign(uptr Alignment, uptr Size) {
664 if (UNLIKELY(!IsPowerOfTwo(Alignment)))
665 return ScudoAllocator::FailureHandler::OnBadRequest();
666 return Instance.allocate(Size, Alignment, FromMemalign);
667}
668
Kostya Serebryany712fc982016-06-07 01:20:26 +0000669int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000670 if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Alignment % sizeof(void *)) != 0)) {
671 *MemPtr = ScudoAllocator::FailureHandler::OnBadRequest();
672 return EINVAL;
673 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000674 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000675 if (!*MemPtr)
676 return ENOMEM;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000677 return 0;
678}
679
680void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000681 // Alignment must be a power of 2, Size must be a multiple of Alignment.
682 if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Size & (Alignment - 1)) != 0))
683 return ScudoAllocator::FailureHandler::OnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000684 return Instance.allocate(Size, Alignment, FromMalloc);
685}
686
687uptr scudoMallocUsableSize(void *Ptr) {
688 return Instance.getUsableSize(Ptr);
689}
690
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000691} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000692
693using namespace __scudo;
694
695// MallocExtension helper functions
696
697uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000698 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000699}
700
701uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000702 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000703}
704
705uptr __sanitizer_get_free_bytes() {
706 return 1;
707}
708
709uptr __sanitizer_get_unmapped_bytes() {
710 return 1;
711}
712
713uptr __sanitizer_get_estimated_allocated_size(uptr size) {
714 return size;
715}
716
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000717int __sanitizer_get_ownership(const void *Ptr) {
718 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000719}
720
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000721uptr __sanitizer_get_allocated_size(const void *Ptr) {
722 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000723}