blob: 1d0db84a5aaf43ed2f61358698adf682cbd0a83f [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000019#include "scudo_tls.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000020#include "scudo_utils.h"
21
22#include "sanitizer_common/sanitizer_allocator_interface.h"
23#include "sanitizer_common/sanitizer_quarantine.h"
24
25#include <limits.h>
26#include <pthread.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000027#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000028
29namespace __scudo {
30
Kostya Serebryany712fc982016-06-07 01:20:26 +000031// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000032static uptr Cookie;
33
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034// We default to software CRC32 if the alternatives are not supported, either
35// at compilation or at runtime.
36static atomic_uint8_t HashAlgorithm = { CRC32Software };
37
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000038INLINE u32 computeCRC32(uptr Crc, uptr Value, uptr *Array, uptr ArraySize) {
39 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
40 // as opposed to only for scudo_crc32.cpp. This means that other hardware
41 // specific instructions were likely emitted at other places, and as a
42 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000043#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000044 Crc = CRC32_INTRINSIC(Crc, Value);
45 for (uptr i = 0; i < ArraySize; i++)
46 Crc = CRC32_INTRINSIC(Crc, Array[i]);
47 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000048#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000049 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
50 Crc = computeHardwareCRC32(Crc, Value);
51 for (uptr i = 0; i < ArraySize; i++)
52 Crc = computeHardwareCRC32(Crc, Array[i]);
53 return Crc;
54 }
55 Crc = computeSoftwareCRC32(Crc, Value);
56 for (uptr i = 0; i < ArraySize; i++)
57 Crc = computeSoftwareCRC32(Crc, Array[i]);
58 return Crc;
59#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000060}
Kostya Serebryany712fc982016-06-07 01:20:26 +000061
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000062static ScudoBackendAllocator &getBackendAllocator();
63
Kostya Serebryany712fc982016-06-07 01:20:26 +000064struct ScudoChunk : UnpackedHeader {
65 // We can't use the offset member of the chunk itself, as we would double
66 // fetch it without any warranty that it wouldn't have been tampered. To
67 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000068 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000069 return reinterpret_cast<void *>(
70 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
71 }
72
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000073 // Returns the usable size for a chunk, meaning the amount of bytes from the
74 // beginning of the user data to the end of the backend allocated chunk.
75 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +000076 uptr Size =
77 getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header),
78 Header->FromPrimary);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000079 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000080 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000081 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
82 }
83
84 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000085 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000086 UnpackedHeader ZeroChecksumHeader = *Header;
87 ZeroChecksumHeader.Checksum = 0;
88 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
89 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000090 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HeaderHolder,
91 ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000092 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000093 }
94
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000095 // Checks the validity of a chunk by verifying its checksum. It doesn't
96 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000097 bool isValid() {
98 UnpackedHeader NewUnpackedHeader;
99 const AtomicPackedHeader *AtomicHeader =
100 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000101 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000102 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
103 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000104 }
105
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000106 // Nulls out a chunk header. When returning the chunk to the backend, there
107 // is no need to store a valid ChunkAvailable header, as this would be
108 // computationally expensive. Zeroing out serves the same purpose by making
109 // the header invalid. In the extremely rare event where 0 would be a valid
110 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
111 COMPILER_CHECK(ChunkAvailable == 0);
112 void eraseHeader() {
113 PackedHeader NullPackedHeader = 0;
114 AtomicPackedHeader *AtomicHeader =
115 reinterpret_cast<AtomicPackedHeader *>(this);
116 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
117 }
118
Kostya Serebryany712fc982016-06-07 01:20:26 +0000119 // Loads and unpacks the header, verifying the checksum in the process.
120 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
121 const AtomicPackedHeader *AtomicHeader =
122 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000123 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000124 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000125 if (UNLIKELY(NewUnpackedHeader->Checksum !=
126 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000127 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
128 }
129 }
130
131 // Packs and stores the header, computing the checksum in the process.
132 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000133 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000134 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
135 AtomicPackedHeader *AtomicHeader =
136 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000137 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000138 }
139
140 // Packs and stores the header, computing the checksum in the process. We
141 // compare the current header with the expected provided one to ensure that
142 // we are not being raced by a corruption occurring in another thread.
143 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
144 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000145 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000146 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
147 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
148 AtomicPackedHeader *AtomicHeader =
149 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000150 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
151 &OldPackedHeader,
152 NewPackedHeader,
153 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000154 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
155 }
156 }
157};
158
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000159ScudoChunk *getScudoChunk(uptr UserBeg) {
160 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
161}
162
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000163struct AllocatorOptions {
164 u32 QuarantineSizeMb;
165 u32 ThreadLocalQuarantineSizeKb;
166 bool MayReturnNull;
167 s32 ReleaseToOSIntervalMs;
168 bool DeallocationTypeMismatch;
169 bool DeleteSizeMismatch;
170 bool ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000171
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000172 void setFrom(const Flags *f, const CommonFlags *cf);
173 void copyTo(Flags *f, CommonFlags *cf) const;
174};
Kostya Serebryany712fc982016-06-07 01:20:26 +0000175
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000176void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
177 MayReturnNull = cf->allocator_may_return_null;
178 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
179 QuarantineSizeMb = f->QuarantineSizeMb;
180 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
181 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
182 DeleteSizeMismatch = f->DeleteSizeMismatch;
183 ZeroContents = f->ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000184}
185
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000186void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
187 cf->allocator_may_return_null = MayReturnNull;
188 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
189 f->QuarantineSizeMb = QuarantineSizeMb;
190 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
191 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
192 f->DeleteSizeMismatch = DeleteSizeMismatch;
193 f->ZeroContents = ZeroContents;
194}
195
196static void initScudoInternal(const AllocatorOptions &Options);
197
198static bool ScudoInitIsRunning = false;
199
200void initScudo() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000201 SanitizerToolName = "Scudo";
202 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
203 ScudoInitIsRunning = true;
204
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +0000205 // Check if hardware CRC32 is supported in the binary and by the platform, if
206 // so, opt for the CRC32 hardware version of the checksum.
207 if (computeHardwareCRC32 && testCPUFeature(CRC32CPUFeature))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000208 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000209
Kostya Serebryany712fc982016-06-07 01:20:26 +0000210 initFlags();
211
212 AllocatorOptions Options;
213 Options.setFrom(getFlags(), common_flags());
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000214 initScudoInternal(Options);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000215
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000216 // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000217
Kostya Serebryany712fc982016-06-07 01:20:26 +0000218 ScudoInitIsRunning = false;
219}
220
Kostya Serebryany712fc982016-06-07 01:20:26 +0000221struct QuarantineCallback {
222 explicit QuarantineCallback(AllocatorCache *Cache)
223 : Cache_(Cache) {}
224
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000225 // Chunk recycling function, returns a quarantined chunk to the backend,
226 // first making sure it hasn't been tampered with.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000227 void Recycle(ScudoChunk *Chunk) {
228 UnpackedHeader Header;
229 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000230 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000231 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
232 Chunk);
233 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000234 Chunk->eraseHeader();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000235 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000236 getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000237 }
238
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000239 // Internal quarantine allocation and deallocation functions. We first check
240 // that the batches are indeed serviced by the Primary.
241 // TODO(kostyak): figure out the best way to protect the batches.
242 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000243 void *Allocate(uptr Size) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000244 return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000245 }
246
247 void Deallocate(void *Ptr) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000248 getBackendAllocator().Deallocate(Cache_, Ptr, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000249 }
250
251 AllocatorCache *Cache_;
252};
253
254typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000255typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000256COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
257 sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000258
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000259AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
260 return &ThreadContext->Cache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000261}
262
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000263ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
264 return reinterpret_cast<
265 ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
266}
267
268Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) {
269 return &ThreadContext->Prng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000270}
271
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000272struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000273 static const uptr MaxAllowedMallocSize =
274 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000275
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000276 typedef ReturnNullOrDieOnFailure FailureHandler;
277
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000278 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000279 ScudoQuarantine AllocatorQuarantine;
280
281 // The fallback caches are used when the thread local caches have been
282 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
283 // be accessed by different threads.
284 StaticSpinMutex FallbackMutex;
285 AllocatorCache FallbackAllocatorCache;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000286 ScudoQuarantineCache FallbackQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000287 Xorshift128Plus FallbackPrng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000288
289 bool DeallocationTypeMismatch;
290 bool ZeroContents;
291 bool DeleteSizeMismatch;
292
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000293 explicit ScudoAllocator(LinkerInitialized)
Kostya Serebryany712fc982016-06-07 01:20:26 +0000294 : AllocatorQuarantine(LINKER_INITIALIZED),
295 FallbackQuarantineCache(LINKER_INITIALIZED) {}
296
297 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000298 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000299 // case of the Secondary allocator, it takes care of alignment and the
300 // offset will always be 0. In the case of the Primary, the worst case
301 // scenario happens in the last size class, when the backend allocation
302 // would already be aligned on the requested alignment, which would happen
303 // to be the maximum alignment that would fit in that size class. As a
304 // result, the maximum offset will be at most the maximum alignment for the
305 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000306 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000307 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000308 SizeClassMap::kMaxSize - MinAlignment);
309 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000310 MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000311 Header.Offset = MaxOffset;
312 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000313 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
314 "header\n");
315 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000316 // Verify that we can fit the maximum size or amount of unused bytes in the
317 // header. Given that the Secondary fits the allocation to a page, the worst
318 // case scenario happens in the Primary. It will depend on the second to
319 // last and last class sizes, as well as the dynamic base for the Primary.
320 // The following is an over-approximation that works for our needs.
321 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
322 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
323 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000324 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
325 "the header\n");
326 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000327
328 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
329 DeleteSizeMismatch = Options.DeleteSizeMismatch;
330 ZeroContents = Options.ZeroContents;
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000331 SetAllocatorMayReturnNull(Options.MayReturnNull);
332 BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000333 AllocatorQuarantine.Init(
334 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
335 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000336 BackendAllocator.InitCache(&FallbackAllocatorCache);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000337 FallbackPrng.initFromURandom();
338 Cookie = FallbackPrng.getNext();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000339 }
340
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000341 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000342 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000343 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000344 if (!UserPtr)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000345 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000346 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
347 if (!IsAligned(UserBeg, MinAlignment))
348 return false;
349 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000350 }
351
Kostya Serebryany712fc982016-06-07 01:20:26 +0000352 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000353 void *allocate(uptr Size, uptr Alignment, AllocType Type,
354 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000355 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000356 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000357 dieWithMessage("ERROR: alignment is not a power of 2\n");
Kostya Serebryany712fc982016-06-07 01:20:26 +0000358 }
359 if (Alignment > MaxAlignment)
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000360 return FailureHandler::OnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000361 if (Alignment < MinAlignment)
362 Alignment = MinAlignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000363 if (Size >= MaxAllowedMallocSize)
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000364 return FailureHandler::OnBadRequest();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000365 if (Size == 0)
366 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000367
368 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000369 uptr AlignedSize = (Alignment > MinAlignment) ?
370 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
371 if (AlignedSize >= MaxAllowedMallocSize)
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000372 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000373
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000374 // Primary and Secondary backed allocations have a different treatment. We
375 // deal with alignment requirements of Primary serviced allocations here,
376 // but the Secondary will take care of its own alignment needs.
377 bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000378
379 void *Ptr;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000380 uptr Salt;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000381 uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000382 uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000383 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000384 if (LIKELY(ThreadContext)) {
385 Salt = getPrng(ThreadContext)->getNext();
386 Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000387 AllocationSize, AllocationAlignment,
388 FromPrimary);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000389 ThreadContext->unlock();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000390 } else {
391 SpinMutexLock l(&FallbackMutex);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000392 Salt = FallbackPrng.getNext();
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000393 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize,
394 AllocationAlignment, FromPrimary);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000395 }
396 if (!Ptr)
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000397 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000398
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000399 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000400 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000401 memset(Ptr, 0,
402 BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000403
Kostya Serebryany712fc982016-06-07 01:20:26 +0000404 UnpackedHeader Header = {};
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000405 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
406 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
407 if (!IsAligned(UserBeg, Alignment)) {
408 // Since the Secondary takes care of alignment, a non-aligned pointer
409 // means it is from the Primary. It is also the only case where the offset
410 // field of the header would be non-zero.
411 CHECK(FromPrimary);
412 UserBeg = RoundUpTo(UserBeg, Alignment);
413 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
414 Header.Offset = Offset >> MinAlignmentLog;
415 }
416 CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000417 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000418 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000419 if (FromPrimary) {
420 Header.FromPrimary = FromPrimary;
421 Header.SizeOrUnusedBytes = Size;
422 } else {
423 // The secondary fits the allocations to a page, so the amount of unused
424 // bytes is the difference between the end of the user allocation and the
425 // next page boundary.
426 uptr PageSize = GetPageSizeCached();
427 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
428 if (TrailingBytes)
429 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
430 }
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000431 Header.Salt = static_cast<u8>(Salt);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000432 getScudoChunk(UserBeg)->storeHeader(&Header);
433 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000434 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
435 return UserPtr;
436 }
437
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000438 // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
439 // we directly deallocate the chunk, otherwise the flow would lead to the
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000440 // chunk being loaded (and checked) twice, and stored (and checksummed) once,
441 // with no additional security value.
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000442 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
443 uptr Size) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000444 bool FromPrimary = Header->FromPrimary;
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000445 bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
446 if (BypassQuarantine) {
447 Chunk->eraseHeader();
448 void *Ptr = Chunk->getAllocBeg(Header);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000449 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000450 if (LIKELY(ThreadContext)) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000451 getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr,
452 FromPrimary);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000453 ThreadContext->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000454 } else {
455 SpinMutexLock Lock(&FallbackMutex);
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000456 getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr,
457 FromPrimary);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000458 }
459 } else {
460 UnpackedHeader NewHeader = *Header;
461 NewHeader.State = ChunkQuarantine;
462 Chunk->compareExchangeHeader(&NewHeader, Header);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000463 ScudoThreadContext *ThreadContext = getThreadContextAndLock();
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000464 if (LIKELY(ThreadContext)) {
465 AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
466 QuarantineCallback(
467 getAllocatorCache(ThreadContext)),
468 Chunk, Size);
Kostya Kortchinskyee0695762017-05-05 21:38:22 +0000469 ThreadContext->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000470 } else {
471 SpinMutexLock l(&FallbackMutex);
472 AllocatorQuarantine.Put(&FallbackQuarantineCache,
473 QuarantineCallback(&FallbackAllocatorCache),
474 Chunk, Size);
475 }
476 }
477 }
478
Kostya Serebryany712fc982016-06-07 01:20:26 +0000479 // Deallocates a Chunk, which means adding it to the delayed free list (or
480 // Quarantine).
481 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000482 initThreadMaybe();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000483 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
484 if (!UserPtr)
485 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000486 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
487 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000488 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
489 "aligned at address %p\n", UserPtr);
490 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000491 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000492 UnpackedHeader OldHeader;
493 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000494 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000495 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000496 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000497 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000498 if (DeallocationTypeMismatch) {
499 // The deallocation type has to match the allocation one.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000500 if (OldHeader.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000501 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000502 if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000503 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000504 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000505 }
506 }
507 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000508 uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
509 Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000510 if (DeleteSizeMismatch) {
511 if (DeleteSize && DeleteSize != Size) {
512 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000513 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000514 }
515 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000516
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000517 // If a small memory amount was allocated with a larger alignment, we want
518 // to take that into account. Otherwise the Quarantine would be filled with
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000519 // tiny chunks, taking a lot of VA memory. This is an approximation of the
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000520 // usable size, that allows us to not call GetActuallyAllocatedSize.
521 uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000522 quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000523 }
524
Kostya Serebryany712fc982016-06-07 01:20:26 +0000525 // Reallocates a chunk. We can save on a new allocation if the new requested
526 // size still fits in the chunk.
527 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000528 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000529 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
530 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
531 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
532 "aligned at address %p\n", OldPtr);
533 }
534 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000535 UnpackedHeader OldHeader;
536 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000537 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000538 dieWithMessage("ERROR: invalid chunk state when reallocating address "
539 "%p\n", OldPtr);
540 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000541 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000542 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000543 OldPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000544 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000545 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000546 // The new size still fits in the current chunk, and the size difference
547 // is reasonable.
548 if (NewSize <= UsableSize &&
549 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000550 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000551 NewHeader.SizeOrUnusedBytes =
552 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000553 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
554 return OldPtr;
555 }
556 // Otherwise, we have to allocate a new chunk and copy the contents of the
557 // old one.
558 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
559 if (NewPtr) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000560 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
561 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000562 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000563 quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000564 }
565 return NewPtr;
566 }
567
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000568 // Helper function that returns the actual usable size of a chunk.
569 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000570 initThreadMaybe();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000571 if (!Ptr)
572 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000573 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
574 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000575 UnpackedHeader Header;
576 Chunk->loadHeader(&Header);
577 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000578 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000579 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
580 Ptr);
581 }
582 return Chunk->getUsableSize(&Header);
583 }
584
Kostya Serebryany712fc982016-06-07 01:20:26 +0000585 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000586 initThreadMaybe();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000587 uptr Total = NMemB * Size;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000588 if (Size != 0 && Total / Size != NMemB) // Overflow check
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000589 return FailureHandler::OnBadRequest();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000590 return allocate(Total, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000591 }
592
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000593 void commitBack(ScudoThreadContext *ThreadContext) {
594 AllocatorCache *Cache = getAllocatorCache(ThreadContext);
595 AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
596 QuarantineCallback(Cache));
597 BackendAllocator.DestroyCache(Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000598 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000599
600 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000601 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000602 uptr stats[AllocatorStatCount];
603 BackendAllocator.GetStats(stats);
604 return stats[StatType];
605 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000606};
607
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000608static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000609
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000610static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000611 return Instance.BackendAllocator;
612}
613
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000614static void initScudoInternal(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000615 Instance.init(Options);
616}
617
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000618void ScudoThreadContext::init() {
619 getBackendAllocator().InitCache(&Cache);
620 Prng.initFromURandom();
621 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
622}
623
624void ScudoThreadContext::commitBack() {
625 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000626}
627
628void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000629 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000630}
631
632void scudoFree(void *Ptr, AllocType Type) {
633 Instance.deallocate(Ptr, 0, Type);
634}
635
636void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
637 Instance.deallocate(Ptr, Size, Type);
638}
639
640void *scudoRealloc(void *Ptr, uptr Size) {
641 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000642 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000643 if (Size == 0) {
644 Instance.deallocate(Ptr, 0, FromMalloc);
645 return nullptr;
646 }
647 return Instance.reallocate(Ptr, Size);
648}
649
650void *scudoCalloc(uptr NMemB, uptr Size) {
651 return Instance.calloc(NMemB, Size);
652}
653
654void *scudoValloc(uptr Size) {
655 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
656}
657
658void *scudoMemalign(uptr Alignment, uptr Size) {
659 return Instance.allocate(Size, Alignment, FromMemalign);
660}
661
662void *scudoPvalloc(uptr Size) {
663 uptr PageSize = GetPageSizeCached();
664 Size = RoundUpTo(Size, PageSize);
665 if (Size == 0) {
666 // pvalloc(0) should allocate one page.
667 Size = PageSize;
668 }
669 return Instance.allocate(Size, PageSize, FromMemalign);
670}
671
672int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
673 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
674 return 0;
675}
676
677void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
678 // size must be a multiple of the alignment. To avoid a division, we first
679 // make sure that alignment is a power of 2.
680 CHECK(IsPowerOfTwo(Alignment));
681 CHECK_EQ((Size & (Alignment - 1)), 0);
682 return Instance.allocate(Size, Alignment, FromMalloc);
683}
684
685uptr scudoMallocUsableSize(void *Ptr) {
686 return Instance.getUsableSize(Ptr);
687}
688
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000689} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000690
691using namespace __scudo;
692
693// MallocExtension helper functions
694
695uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000696 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000697}
698
699uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000700 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000701}
702
703uptr __sanitizer_get_free_bytes() {
704 return 1;
705}
706
707uptr __sanitizer_get_unmapped_bytes() {
708 return 1;
709}
710
711uptr __sanitizer_get_estimated_allocated_size(uptr size) {
712 return size;
713}
714
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000715int __sanitizer_get_ownership(const void *Ptr) {
716 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000717}
718
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000719uptr __sanitizer_get_allocated_size(const void *Ptr) {
720 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000721}