blob: 4f2243e5074a5d45c96963fab5822caed48a8209 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky43917722017-08-16 16:40:48 +000019#include "scudo_flags.h"
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000020#include "scudo_tsd.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000021#include "scudo_utils.h"
22
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000023#include "sanitizer_common/sanitizer_allocator_checks.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000024#include "sanitizer_common/sanitizer_allocator_interface.h"
25#include "sanitizer_common/sanitizer_quarantine.h"
26
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +000027#include <errno.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000028#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000029
30namespace __scudo {
31
Kostya Serebryany712fc982016-06-07 01:20:26 +000032// Global static cookie, initialized at start-up.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000033static u32 Cookie;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000035// We default to software CRC32 if the alternatives are not supported, either
36// at compilation or at runtime.
37static atomic_uint8_t HashAlgorithm = { CRC32Software };
38
Kostya Kortchinsky43917722017-08-16 16:40:48 +000039INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000040 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
41 // as opposed to only for scudo_crc32.cpp. This means that other hardware
42 // specific instructions were likely emitted at other places, and as a
43 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000044#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000045 Crc = CRC32_INTRINSIC(Crc, Value);
46 for (uptr i = 0; i < ArraySize; i++)
47 Crc = CRC32_INTRINSIC(Crc, Array[i]);
48 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000049#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000050 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
51 Crc = computeHardwareCRC32(Crc, Value);
52 for (uptr i = 0; i < ArraySize; i++)
53 Crc = computeHardwareCRC32(Crc, Array[i]);
54 return Crc;
55 }
56 Crc = computeSoftwareCRC32(Crc, Value);
57 for (uptr i = 0; i < ArraySize; i++)
58 Crc = computeSoftwareCRC32(Crc, Array[i]);
59 return Crc;
60#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000061}
Kostya Serebryany712fc982016-06-07 01:20:26 +000062
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000063static ScudoBackendAllocator &getBackendAllocator();
64
Kostya Serebryany712fc982016-06-07 01:20:26 +000065struct ScudoChunk : UnpackedHeader {
66 // We can't use the offset member of the chunk itself, as we would double
67 // fetch it without any warranty that it wouldn't have been tampered. To
68 // prevent this, we work with a local copy of the header.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000069 void *getBackendPtr(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000070 return reinterpret_cast<void *>(
71 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
72 }
73
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000074 // Returns the usable size for a chunk, meaning the amount of bytes from the
75 // beginning of the user data to the end of the backend allocated chunk.
76 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000077 const uptr Size =
78 getBackendAllocator().getActuallyAllocatedSize(getBackendPtr(Header),
79 Header->ClassId);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000080 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000081 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000082 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
83 }
84
85 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000086 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000087 UnpackedHeader ZeroChecksumHeader = *Header;
88 ZeroChecksumHeader.Checksum = 0;
89 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
90 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000091 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HeaderHolder,
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000092 ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000093 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000094 }
95
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000096 // Checks the validity of a chunk by verifying its checksum. It doesn't
97 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000098 bool isValid() {
99 UnpackedHeader NewUnpackedHeader;
100 const AtomicPackedHeader *AtomicHeader =
101 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000102 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000103 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
104 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000105 }
106
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000107 // Nulls out a chunk header. When returning the chunk to the backend, there
108 // is no need to store a valid ChunkAvailable header, as this would be
109 // computationally expensive. Zeroing out serves the same purpose by making
110 // the header invalid. In the extremely rare event where 0 would be a valid
111 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
112 COMPILER_CHECK(ChunkAvailable == 0);
113 void eraseHeader() {
114 PackedHeader NullPackedHeader = 0;
115 AtomicPackedHeader *AtomicHeader =
116 reinterpret_cast<AtomicPackedHeader *>(this);
117 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
118 }
119
Kostya Serebryany712fc982016-06-07 01:20:26 +0000120 // Loads and unpacks the header, verifying the checksum in the process.
121 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
122 const AtomicPackedHeader *AtomicHeader =
123 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000124 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000125 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000126 if (UNLIKELY(NewUnpackedHeader->Checksum !=
127 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000128 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
129 }
130 }
131
132 // Packs and stores the header, computing the checksum in the process.
133 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000134 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000135 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
136 AtomicPackedHeader *AtomicHeader =
137 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000138 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000139 }
140
141 // Packs and stores the header, computing the checksum in the process. We
142 // compare the current header with the expected provided one to ensure that
143 // we are not being raced by a corruption occurring in another thread.
144 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
145 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000146 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000147 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
148 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
149 AtomicPackedHeader *AtomicHeader =
150 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000151 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
152 &OldPackedHeader,
153 NewPackedHeader,
154 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000155 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
156 }
157 }
158};
159
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000160ScudoChunk *getScudoChunk(uptr UserBeg) {
161 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
162}
163
Kostya Serebryany712fc982016-06-07 01:20:26 +0000164struct QuarantineCallback {
165 explicit QuarantineCallback(AllocatorCache *Cache)
166 : Cache_(Cache) {}
167
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000168 // Chunk recycling function, returns a quarantined chunk to the backend,
169 // first making sure it hasn't been tampered with.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000170 void Recycle(ScudoChunk *Chunk) {
171 UnpackedHeader Header;
172 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000173 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000174 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
175 Chunk);
176 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000177 Chunk->eraseHeader();
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000178 void *Ptr = Chunk->getBackendPtr(&Header);
179 if (Header.ClassId)
180 getBackendAllocator().deallocatePrimary(Cache_, Ptr, Header.ClassId);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000181 else
182 getBackendAllocator().deallocateSecondary(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000183 }
184
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000185 // Internal quarantine allocation and deallocation functions. We first check
186 // that the batches are indeed serviced by the Primary.
187 // TODO(kostyak): figure out the best way to protect the batches.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000188 void *Allocate(uptr Size) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000189 return getBackendAllocator().allocatePrimary(Cache_, BatchClassId);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000190 }
191
192 void Deallocate(void *Ptr) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000193 getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000194 }
195
196 AllocatorCache *Cache_;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000197 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
198 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000199};
200
201typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000202typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000203COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000204 sizeof(ScudoTSD::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000205
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000206ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
207 return reinterpret_cast<ScudoQuarantineCache *>(
208 TSD->QuarantineCachePlaceHolder);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000209}
210
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000211struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000212 static const uptr MaxAllowedMallocSize =
213 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000214
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000215 typedef ReturnNullOrDieOnFailure FailureHandler;
216
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000217 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000218 ScudoQuarantine AllocatorQuarantine;
219
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000220 u32 QuarantineChunksUpToSize;
221
Kostya Serebryany712fc982016-06-07 01:20:26 +0000222 bool DeallocationTypeMismatch;
223 bool ZeroContents;
224 bool DeleteSizeMismatch;
225
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000226 bool CheckRssLimit;
227 uptr HardRssLimitMb;
228 uptr SoftRssLimitMb;
229 atomic_uint8_t RssLimitExceeded;
230 atomic_uint64_t RssLastCheckedAtNS;
231
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000232 explicit ScudoAllocator(LinkerInitialized)
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000233 : AllocatorQuarantine(LINKER_INITIALIZED) {}
Kostya Serebryany712fc982016-06-07 01:20:26 +0000234
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000235 void performSanityChecks() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000236 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000237 // case of the Secondary allocator, it takes care of alignment and the
238 // offset will always be 0. In the case of the Primary, the worst case
239 // scenario happens in the last size class, when the backend allocation
240 // would already be aligned on the requested alignment, which would happen
241 // to be the maximum alignment that would fit in that size class. As a
242 // result, the maximum offset will be at most the maximum alignment for the
243 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000244 UnpackedHeader Header = {};
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000245 const uptr MaxPrimaryAlignment =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000246 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000247 const uptr MaxOffset =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000248 (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000249 Header.Offset = MaxOffset;
250 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000251 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
252 "header\n");
253 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000254 // Verify that we can fit the maximum size or amount of unused bytes in the
255 // header. Given that the Secondary fits the allocation to a page, the worst
256 // case scenario happens in the Primary. It will depend on the second to
257 // last and last class sizes, as well as the dynamic base for the Primary.
258 // The following is an over-approximation that works for our needs.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000259 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000260 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
261 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000262 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
263 "the header\n");
264 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000265
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000266 const uptr LargestClassId = SizeClassMap::kLargestClassID;
267 Header.ClassId = LargestClassId;
268 if (Header.ClassId != LargestClassId) {
269 dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
270 }
271 }
272
273 void init() {
274 SanitizerToolName = "Scudo";
275 initFlags();
276
277 performSanityChecks();
278
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000279 // Check if hardware CRC32 is supported in the binary and by the platform,
280 // if so, opt for the CRC32 hardware version of the checksum.
Kostya Kortchinsky0207b6f2017-11-22 18:30:44 +0000281 if (&computeHardwareCRC32 && hasHardwareCRC32())
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000282 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
283
284 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
285 BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms);
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000286 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
287 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000288 AllocatorQuarantine.Init(
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000289 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
290 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
291 QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
292 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
293 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
294 ZeroContents = getFlags()->ZeroContents;
295
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000296 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
297 /*blocking=*/false))) {
298 Cookie = static_cast<u32>((NanoTime() >> 12) ^
299 (reinterpret_cast<uptr>(this) >> 4));
300 }
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000301
302 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
303 if (CheckRssLimit)
304 atomic_store_relaxed(&RssLastCheckedAtNS, NanoTime());
Kostya Serebryany712fc982016-06-07 01:20:26 +0000305 }
306
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000307 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000308 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000309 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000310 if (UNLIKELY(!UserPtr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000311 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000312 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
313 if (!IsAligned(UserBeg, MinAlignment))
314 return false;
315 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000316 }
317
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000318 // Opportunistic RSS limit check. This will update the RSS limit status, if
319 // it can, every 100ms, otherwise it will just return the current one.
320 bool isRssLimitExceeded() {
321 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
322 const u64 CurrentCheck = NanoTime();
323 if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
324 return atomic_load_relaxed(&RssLimitExceeded);
325 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
326 CurrentCheck, memory_order_relaxed))
327 return atomic_load_relaxed(&RssLimitExceeded);
328 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
329 // RSS from /proc/self/statm by default. We might want to
330 // call getrusage directly, even if it's less accurate.
331 const uptr CurrentRssMb = GetRSS() >> 20;
332 if (HardRssLimitMb && HardRssLimitMb < CurrentRssMb) {
333 Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n",
334 SanitizerToolName, HardRssLimitMb, CurrentRssMb);
335 DumpProcessMap();
336 Die();
337 }
338 if (SoftRssLimitMb) {
339 if (atomic_load_relaxed(&RssLimitExceeded)) {
340 if (CurrentRssMb <= SoftRssLimitMb)
341 atomic_store_relaxed(&RssLimitExceeded, false);
342 } else {
343 if (CurrentRssMb > SoftRssLimitMb) {
344 atomic_store_relaxed(&RssLimitExceeded, true);
345 Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
346 SanitizerToolName, SoftRssLimitMb, CurrentRssMb);
347 }
348 }
349 }
350 return atomic_load_relaxed(&RssLimitExceeded);
351 }
352
Kostya Serebryany712fc982016-06-07 01:20:26 +0000353 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000354 void *allocate(uptr Size, uptr Alignment, AllocType Type,
355 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000356 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000357 if (UNLIKELY(Alignment > MaxAlignment))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000358 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000359 if (UNLIKELY(Alignment < MinAlignment))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000360 Alignment = MinAlignment;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000361 if (UNLIKELY(Size >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000362 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000363 if (UNLIKELY(Size == 0))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000364 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000365
366 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000367 uptr AlignedSize = (Alignment > MinAlignment) ?
368 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000369 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000370 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000371
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000372 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded()))
373 return FailureHandler::OnOOM();
374
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000375 // Primary and Secondary backed allocations have a different treatment. We
376 // deal with alignment requirements of Primary serviced allocations here,
377 // but the Secondary will take care of its own alignment needs.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000378 const bool FromPrimary =
379 PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000380
381 void *Ptr;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000382 u8 ClassId;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000383 uptr AllocSize;
384 if (FromPrimary) {
385 AllocSize = AlignedSize;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000386 ClassId = SizeClassMap::ClassID(AllocSize);
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000387 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000388 Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000389 TSD->unlock();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000390 } else {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000391 AllocSize = NeededSize;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000392 ClassId = 0;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000393 Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000394 }
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000395 if (UNLIKELY(!Ptr))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000396 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000397
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000398 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000399 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000400 memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize(Ptr, ClassId));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000401
Kostya Serebryany712fc982016-06-07 01:20:26 +0000402 UnpackedHeader Header = {};
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000403 uptr BackendPtr = reinterpret_cast<uptr>(Ptr);
404 uptr UserBeg = BackendPtr + AlignedChunkHeaderSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000405 if (UNLIKELY(!IsAligned(UserBeg, Alignment))) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000406 // Since the Secondary takes care of alignment, a non-aligned pointer
407 // means it is from the Primary. It is also the only case where the offset
408 // field of the header would be non-zero.
409 CHECK(FromPrimary);
410 UserBeg = RoundUpTo(UserBeg, Alignment);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000411 uptr Offset = UserBeg - AlignedChunkHeaderSize - BackendPtr;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000412 Header.Offset = Offset >> MinAlignmentLog;
413 }
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000414 CHECK_LE(UserBeg + Size, BackendPtr + AllocSize);
415 Header.ClassId = ClassId;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000416 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000417 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000418 if (FromPrimary) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000419 Header.SizeOrUnusedBytes = Size;
420 } else {
421 // The secondary fits the allocations to a page, so the amount of unused
422 // bytes is the difference between the end of the user allocation and the
423 // next page boundary.
424 uptr PageSize = GetPageSizeCached();
425 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
426 if (TrailingBytes)
427 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
428 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000429 getScudoChunk(UserBeg)->storeHeader(&Header);
430 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000431 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
432 return UserPtr;
433 }
434
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000435 // Place a chunk in the quarantine or directly deallocate it in the event of
436 // a zero-sized quarantine, or if the size of the chunk is greater than the
437 // quarantine chunk size threshold.
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000438 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
439 uptr Size) {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000440 const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
441 (Size > QuarantineChunksUpToSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000442 if (BypassQuarantine) {
443 Chunk->eraseHeader();
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000444 void *Ptr = Chunk->getBackendPtr(Header);
445 if (Header->ClassId) {
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000446 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000447 getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr,
448 Header->ClassId);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000449 TSD->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000450 } else {
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000451 getBackendAllocator().deallocateSecondary(Ptr);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000452 }
453 } else {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000454 // If a small memory amount was allocated with a larger alignment, we want
455 // to take that into account. Otherwise the Quarantine would be filled
456 // with tiny chunks, taking a lot of VA memory. This is an approximation
457 // of the usable size, that allows us to not call
458 // GetActuallyAllocatedSize.
459 uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000460 UnpackedHeader NewHeader = *Header;
461 NewHeader.State = ChunkQuarantine;
462 Chunk->compareExchangeHeader(&NewHeader, Header);
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000463 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000464 AllocatorQuarantine.Put(getQuarantineCache(TSD),
465 QuarantineCallback(&TSD->Cache),
466 Chunk, EstimatedSize);
467 TSD->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000468 }
469 }
470
Kostya Serebryany712fc982016-06-07 01:20:26 +0000471 // Deallocates a Chunk, which means adding it to the delayed free list (or
472 // Quarantine).
473 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky040c2112017-09-11 19:59:40 +0000474 // For a deallocation, we only ensure minimal initialization, meaning thread
475 // local data will be left uninitialized for now (when using ELF TLS). The
476 // fallback cache will be used instead. This is a workaround for a situation
477 // where the only heap operation performed in a thread would be a free past
478 // the TLS destructors, ending up in initialized thread specific data never
479 // being destroyed properly. Any other heap operation will do a full init.
480 initThreadMaybe(/*MinimalInit=*/true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000481 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000482 if (UNLIKELY(!UserPtr))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000483 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000484 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
485 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000486 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
487 "aligned at address %p\n", UserPtr);
488 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000489 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000490 UnpackedHeader Header;
491 Chunk->loadHeader(&Header);
492 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000493 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000494 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000495 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000496 if (DeallocationTypeMismatch) {
497 // The deallocation type has to match the allocation one.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000498 if (Header.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000499 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000500 if (Header.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000501 dieWithMessage("ERROR: allocation type mismatch when deallocating "
502 "address %p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000503 }
504 }
505 }
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000506 uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000507 Chunk->getUsableSize(&Header) - Header.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000508 if (DeleteSizeMismatch) {
509 if (DeleteSize && DeleteSize != Size) {
510 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000511 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000512 }
513 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000514 quarantineOrDeallocateChunk(Chunk, &Header, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000515 }
516
Kostya Serebryany712fc982016-06-07 01:20:26 +0000517 // Reallocates a chunk. We can save on a new allocation if the new requested
518 // size still fits in the chunk.
519 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000520 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000521 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
522 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
523 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
524 "aligned at address %p\n", OldPtr);
525 }
526 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000527 UnpackedHeader OldHeader;
528 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000529 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000530 dieWithMessage("ERROR: invalid chunk state when reallocating address "
531 "%p\n", OldPtr);
532 }
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000533 if (DeallocationTypeMismatch) {
534 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
535 dieWithMessage("ERROR: allocation type mismatch when reallocating "
536 "address %p\n", OldPtr);
537 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000538 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000539 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000540 // The new size still fits in the current chunk, and the size difference
541 // is reasonable.
542 if (NewSize <= UsableSize &&
543 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000544 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000545 NewHeader.SizeOrUnusedBytes =
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000546 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000547 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
548 return OldPtr;
549 }
550 // Otherwise, we have to allocate a new chunk and copy the contents of the
551 // old one.
552 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
553 if (NewPtr) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000554 uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000555 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000556 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000557 quarantineOrDeallocateChunk(Chunk, &OldHeader, OldSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000558 }
559 return NewPtr;
560 }
561
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000562 // Helper function that returns the actual usable size of a chunk.
563 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000564 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000565 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000566 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000567 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
568 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000569 UnpackedHeader Header;
570 Chunk->loadHeader(&Header);
571 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000572 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000573 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
574 Ptr);
575 }
576 return Chunk->getUsableSize(&Header);
577 }
578
Kostya Serebryany712fc982016-06-07 01:20:26 +0000579 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000580 initThreadMaybe();
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000581 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000582 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000583 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000584 }
585
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000586 void commitBack(ScudoTSD *TSD) {
587 AllocatorQuarantine.Drain(getQuarantineCache(TSD),
588 QuarantineCallback(&TSD->Cache));
589 BackendAllocator.destroyCache(&TSD->Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000590 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000591
592 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000593 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000594 uptr stats[AllocatorStatCount];
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000595 BackendAllocator.getStats(stats);
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000596 return stats[StatType];
597 }
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000598
599 void *handleBadRequest() {
600 initThreadMaybe();
601 return FailureHandler::OnBadRequest();
602 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000603};
604
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000605static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000606
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000607static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000608 return Instance.BackendAllocator;
609}
610
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000611void initScudo() {
612 Instance.init();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000613}
614
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000615void ScudoTSD::init(bool Shared) {
616 UnlockRequired = Shared;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000617 getBackendAllocator().initCache(&Cache);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000618 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
619}
620
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000621void ScudoTSD::commitBack() {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000622 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000623}
624
625void *scudoMalloc(uptr Size, AllocType Type) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000626 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000627}
628
629void scudoFree(void *Ptr, AllocType Type) {
630 Instance.deallocate(Ptr, 0, Type);
631}
632
633void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
634 Instance.deallocate(Ptr, Size, Type);
635}
636
637void *scudoRealloc(void *Ptr, uptr Size) {
638 if (!Ptr)
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000639 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000640 if (Size == 0) {
641 Instance.deallocate(Ptr, 0, FromMalloc);
642 return nullptr;
643 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000644 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000645}
646
647void *scudoCalloc(uptr NMemB, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000648 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000649}
650
651void *scudoValloc(uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000652 return SetErrnoOnNull(
653 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000654}
655
Kostya Serebryany712fc982016-06-07 01:20:26 +0000656void *scudoPvalloc(uptr Size) {
657 uptr PageSize = GetPageSizeCached();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000658 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000659 errno = ENOMEM;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000660 return Instance.handleBadRequest();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000661 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000662 // pvalloc(0) should allocate one page.
663 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000664 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000665}
666
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000667void *scudoMemalign(uptr Alignment, uptr Size) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000668 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000669 errno = EINVAL;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000670 return Instance.handleBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000671 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000672 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000673}
674
Kostya Serebryany712fc982016-06-07 01:20:26 +0000675int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000676 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000677 Instance.handleBadRequest();
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000678 return EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000679 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000680 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000681 if (UNLIKELY(!Ptr))
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000682 return ENOMEM;
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000683 *MemPtr = Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000684 return 0;
685}
686
687void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000688 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000689 errno = EINVAL;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000690 return Instance.handleBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000691 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000692 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000693}
694
695uptr scudoMallocUsableSize(void *Ptr) {
696 return Instance.getUsableSize(Ptr);
697}
698
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000699} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000700
701using namespace __scudo;
702
703// MallocExtension helper functions
704
705uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000706 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000707}
708
709uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000710 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000711}
712
713uptr __sanitizer_get_free_bytes() {
714 return 1;
715}
716
717uptr __sanitizer_get_unmapped_bytes() {
718 return 1;
719}
720
721uptr __sanitizer_get_estimated_allocated_size(uptr size) {
722 return size;
723}
724
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000725int __sanitizer_get_ownership(const void *Ptr) {
726 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000727}
728
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000729uptr __sanitizer_get_allocated_size(const void *Ptr) {
730 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000731}