blob: 1329497f41abb0665482132fcc23be99d364278c [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky43917722017-08-16 16:40:48 +000019#include "scudo_flags.h"
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000020#include "scudo_tsd.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000021#include "scudo_utils.h"
22
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000023#include "sanitizer_common/sanitizer_allocator_checks.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000024#include "sanitizer_common/sanitizer_allocator_interface.h"
25#include "sanitizer_common/sanitizer_quarantine.h"
26
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +000027#include <errno.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000028#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000029
30namespace __scudo {
31
Kostya Serebryany712fc982016-06-07 01:20:26 +000032// Global static cookie, initialized at start-up.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000033static u32 Cookie;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000034
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000035// We default to software CRC32 if the alternatives are not supported, either
36// at compilation or at runtime.
37static atomic_uint8_t HashAlgorithm = { CRC32Software };
38
Kostya Kortchinsky43917722017-08-16 16:40:48 +000039INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000040 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
41 // as opposed to only for scudo_crc32.cpp. This means that other hardware
42 // specific instructions were likely emitted at other places, and as a
43 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000044#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000045 Crc = CRC32_INTRINSIC(Crc, Value);
46 for (uptr i = 0; i < ArraySize; i++)
47 Crc = CRC32_INTRINSIC(Crc, Array[i]);
48 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000049#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000050 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
51 Crc = computeHardwareCRC32(Crc, Value);
52 for (uptr i = 0; i < ArraySize; i++)
53 Crc = computeHardwareCRC32(Crc, Array[i]);
54 return Crc;
55 }
56 Crc = computeSoftwareCRC32(Crc, Value);
57 for (uptr i = 0; i < ArraySize; i++)
58 Crc = computeSoftwareCRC32(Crc, Array[i]);
59 return Crc;
60#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000061}
Kostya Serebryany712fc982016-06-07 01:20:26 +000062
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000063static ScudoBackendAllocator &getBackendAllocator();
64
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000065namespace Chunk {
Kostya Serebryany712fc982016-06-07 01:20:26 +000066 // We can't use the offset member of the chunk itself, as we would double
67 // fetch it without any warranty that it wouldn't have been tampered. To
68 // prevent this, we work with a local copy of the header.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000069 static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
70 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
71 AlignedChunkHeaderSize -
72 (Header->Offset << MinAlignmentLog));
73 }
74
75 static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
76 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
77 AlignedChunkHeaderSize);
78 }
79 static INLINE
80 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
81 return reinterpret_cast<const AtomicPackedHeader *>(
82 reinterpret_cast<uptr>(Ptr) - AlignedChunkHeaderSize);
83 }
84
85 static INLINE bool isAligned(const void *Ptr) {
86 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +000087 }
88
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000089 // Returns the usable size for a chunk, meaning the amount of bytes from the
90 // beginning of the user data to the end of the backend allocated chunk.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000091 static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
92 const uptr Size = getBackendAllocator().getActuallyAllocatedSize(
93 getBackendPtr(Ptr, Header), Header->ClassId);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000094 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000095 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000096 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
97 }
98
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000099 // Compute the checksum of the chunk pointer and its header.
100 static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000101 UnpackedHeader ZeroChecksumHeader = *Header;
102 ZeroChecksumHeader.Checksum = 0;
103 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
104 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000105 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
106 HeaderHolder, ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +0000107 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000108 }
109
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000110 // Checks the validity of a chunk by verifying its checksum. It doesn't
111 // incur termination in the event of an invalid chunk.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000112 static INLINE bool isValid(const void *Ptr) {
113 PackedHeader NewPackedHeader =
114 atomic_load_relaxed(getConstAtomicHeader(Ptr));
115 UnpackedHeader NewUnpackedHeader =
116 bit_cast<UnpackedHeader>(NewPackedHeader);
117 return (NewUnpackedHeader.Checksum ==
118 computeChecksum(Ptr, &NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000119 }
120
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000121 // Nulls out a chunk header. When returning the chunk to the backend, there
122 // is no need to store a valid ChunkAvailable header, as this would be
123 // computationally expensive. Zeroing out serves the same purpose by making
124 // the header invalid. In the extremely rare event where 0 would be a valid
125 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
126 COMPILER_CHECK(ChunkAvailable == 0);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000127 static INLINE void eraseHeader(void *Ptr) {
128 const PackedHeader NullPackedHeader = 0;
129 atomic_store_relaxed(getAtomicHeader(Ptr), NullPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000130 }
131
Kostya Serebryany712fc982016-06-07 01:20:26 +0000132 // Loads and unpacks the header, verifying the checksum in the process.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000133 static INLINE
134 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
135 PackedHeader NewPackedHeader =
136 atomic_load_relaxed(getConstAtomicHeader(Ptr));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000137 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000138 if (UNLIKELY(NewUnpackedHeader->Checksum !=
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000139 computeChecksum(Ptr, NewUnpackedHeader))) {
140 dieWithMessage("ERROR: corrupted chunk header at address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000141 }
142 }
143
144 // Packs and stores the header, computing the checksum in the process.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000145 static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
146 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000147 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000148 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000149 }
150
151 // Packs and stores the header, computing the checksum in the process. We
152 // compare the current header with the expected provided one to ensure that
153 // we are not being raced by a corruption occurring in another thread.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000154 static INLINE void compareExchangeHeader(void *Ptr,
155 UnpackedHeader *NewUnpackedHeader,
156 UnpackedHeader *OldUnpackedHeader) {
157 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000158 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
159 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000160 if (UNLIKELY(!atomic_compare_exchange_strong(
161 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
162 memory_order_relaxed))) {
163 dieWithMessage("ERROR: race on chunk header at address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000164 }
165 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000166} // namespace Chunk
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000167
Kostya Serebryany712fc982016-06-07 01:20:26 +0000168struct QuarantineCallback {
169 explicit QuarantineCallback(AllocatorCache *Cache)
170 : Cache_(Cache) {}
171
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000172 // Chunk recycling function, returns a quarantined chunk to the backend,
173 // first making sure it hasn't been tampered with.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000174 void Recycle(void *Ptr) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000175 UnpackedHeader Header;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000176 Chunk::loadHeader(Ptr, &Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000177 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000178 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000179 Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000180 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000181 Chunk::eraseHeader(Ptr);
182 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000183 if (Header.ClassId)
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000184 getBackendAllocator().deallocatePrimary(Cache_, BackendPtr,
185 Header.ClassId);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000186 else
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000187 getBackendAllocator().deallocateSecondary(BackendPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000188 }
189
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000190 // Internal quarantine allocation and deallocation functions. We first check
191 // that the batches are indeed serviced by the Primary.
192 // TODO(kostyak): figure out the best way to protect the batches.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000193 void *Allocate(uptr Size) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000194 return getBackendAllocator().allocatePrimary(Cache_, BatchClassId);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000195 }
196
197 void Deallocate(void *Ptr) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000198 getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000199 }
200
201 AllocatorCache *Cache_;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000202 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
203 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000204};
205
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000206typedef Quarantine<QuarantineCallback, void> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000207typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000208COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000209 sizeof(ScudoTSD::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000210
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000211ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
212 return reinterpret_cast<ScudoQuarantineCache *>(
213 TSD->QuarantineCachePlaceHolder);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000214}
215
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000216struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000217 static const uptr MaxAllowedMallocSize =
218 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000219
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000220 typedef ReturnNullOrDieOnFailure FailureHandler;
221
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000222 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000223 ScudoQuarantine AllocatorQuarantine;
224
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000225 u32 QuarantineChunksUpToSize;
226
Kostya Serebryany712fc982016-06-07 01:20:26 +0000227 bool DeallocationTypeMismatch;
228 bool ZeroContents;
229 bool DeleteSizeMismatch;
230
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000231 bool CheckRssLimit;
232 uptr HardRssLimitMb;
233 uptr SoftRssLimitMb;
234 atomic_uint8_t RssLimitExceeded;
235 atomic_uint64_t RssLastCheckedAtNS;
236
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000237 explicit ScudoAllocator(LinkerInitialized)
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000238 : AllocatorQuarantine(LINKER_INITIALIZED) {}
Kostya Serebryany712fc982016-06-07 01:20:26 +0000239
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000240 void performSanityChecks() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000241 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000242 // case of the Secondary allocator, it takes care of alignment and the
243 // offset will always be 0. In the case of the Primary, the worst case
244 // scenario happens in the last size class, when the backend allocation
245 // would already be aligned on the requested alignment, which would happen
246 // to be the maximum alignment that would fit in that size class. As a
247 // result, the maximum offset will be at most the maximum alignment for the
248 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000249 UnpackedHeader Header = {};
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000250 const uptr MaxPrimaryAlignment =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000251 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000252 const uptr MaxOffset =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000253 (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000254 Header.Offset = MaxOffset;
255 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000256 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
257 "header\n");
258 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000259 // Verify that we can fit the maximum size or amount of unused bytes in the
260 // header. Given that the Secondary fits the allocation to a page, the worst
261 // case scenario happens in the Primary. It will depend on the second to
262 // last and last class sizes, as well as the dynamic base for the Primary.
263 // The following is an over-approximation that works for our needs.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000264 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000265 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
266 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000267 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
268 "the header\n");
269 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000270
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000271 const uptr LargestClassId = SizeClassMap::kLargestClassID;
272 Header.ClassId = LargestClassId;
273 if (Header.ClassId != LargestClassId) {
274 dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
275 }
276 }
277
278 void init() {
279 SanitizerToolName = "Scudo";
280 initFlags();
281
282 performSanityChecks();
283
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000284 // Check if hardware CRC32 is supported in the binary and by the platform,
285 // if so, opt for the CRC32 hardware version of the checksum.
Kostya Kortchinsky0207b6f2017-11-22 18:30:44 +0000286 if (&computeHardwareCRC32 && hasHardwareCRC32())
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000287 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
288
289 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
290 BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms);
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000291 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
292 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000293 AllocatorQuarantine.Init(
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000294 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
295 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
296 QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
297 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
298 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
299 ZeroContents = getFlags()->ZeroContents;
300
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000301 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
302 /*blocking=*/false))) {
303 Cookie = static_cast<u32>((NanoTime() >> 12) ^
304 (reinterpret_cast<uptr>(this) >> 4));
305 }
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000306
307 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
308 if (CheckRssLimit)
Kostya Kortchinskyf50246d2017-12-13 16:23:54 +0000309 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
Kostya Serebryany712fc982016-06-07 01:20:26 +0000310 }
311
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000312 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000313 bool isValidPointer(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000314 initThreadMaybe();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000315 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000316 return false;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000317 if (!Chunk::isAligned(Ptr))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000318 return false;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000319 return Chunk::isValid(Ptr);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000320 }
321
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000322 // Opportunistic RSS limit check. This will update the RSS limit status, if
323 // it can, every 100ms, otherwise it will just return the current one.
324 bool isRssLimitExceeded() {
325 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
Kostya Kortchinskyf50246d2017-12-13 16:23:54 +0000326 const u64 CurrentCheck = MonotonicNanoTime();
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000327 if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
328 return atomic_load_relaxed(&RssLimitExceeded);
329 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
330 CurrentCheck, memory_order_relaxed))
331 return atomic_load_relaxed(&RssLimitExceeded);
332 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
333 // RSS from /proc/self/statm by default. We might want to
334 // call getrusage directly, even if it's less accurate.
335 const uptr CurrentRssMb = GetRSS() >> 20;
336 if (HardRssLimitMb && HardRssLimitMb < CurrentRssMb) {
337 Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n",
338 SanitizerToolName, HardRssLimitMb, CurrentRssMb);
339 DumpProcessMap();
340 Die();
341 }
342 if (SoftRssLimitMb) {
343 if (atomic_load_relaxed(&RssLimitExceeded)) {
344 if (CurrentRssMb <= SoftRssLimitMb)
345 atomic_store_relaxed(&RssLimitExceeded, false);
346 } else {
347 if (CurrentRssMb > SoftRssLimitMb) {
348 atomic_store_relaxed(&RssLimitExceeded, true);
349 Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
350 SanitizerToolName, SoftRssLimitMb, CurrentRssMb);
351 }
352 }
353 }
354 return atomic_load_relaxed(&RssLimitExceeded);
355 }
356
Kostya Serebryany712fc982016-06-07 01:20:26 +0000357 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000358 void *allocate(uptr Size, uptr Alignment, AllocType Type,
359 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000360 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000361 if (UNLIKELY(Alignment > MaxAlignment))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000362 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000363 if (UNLIKELY(Alignment < MinAlignment))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000364 Alignment = MinAlignment;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000365 if (UNLIKELY(Size >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000366 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000367 if (UNLIKELY(Size == 0))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000368 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000369
370 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000371 uptr AlignedSize = (Alignment > MinAlignment) ?
372 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000373 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000374 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000375
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000376 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded()))
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000377 return FailureHandler::OnOOM();
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000378
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000379 // Primary and Secondary backed allocations have a different treatment. We
380 // deal with alignment requirements of Primary serviced allocations here,
381 // but the Secondary will take care of its own alignment needs.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000382 void *BackendPtr;
383 uptr BackendSize;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000384 u8 ClassId;
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000385 if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000386 BackendSize = AlignedSize;
387 ClassId = SizeClassMap::ClassID(BackendSize);
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000388 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000389 BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000390 TSD->unlock();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000391 } else {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000392 BackendSize = NeededSize;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000393 ClassId = 0;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000394 BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000395 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000396 if (UNLIKELY(!BackendPtr))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000397 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000398
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000399 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000400 if ((ForceZeroContents || ZeroContents) && ClassId)
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000401 memset(BackendPtr, 0,
402 BackendAllocator.getActuallyAllocatedSize(BackendPtr, ClassId));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000403
Kostya Serebryany712fc982016-06-07 01:20:26 +0000404 UnpackedHeader Header = {};
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000405 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + AlignedChunkHeaderSize;
406 if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000407 // Since the Secondary takes care of alignment, a non-aligned pointer
408 // means it is from the Primary. It is also the only case where the offset
409 // field of the header would be non-zero.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000410 DCHECK(ClassId);
411 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
412 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
413 UserPtr = AlignedUserPtr;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000414 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000415 CHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000416 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000417 Header.AllocType = Type;
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000418 if (ClassId) {
419 Header.ClassId = ClassId;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000420 Header.SizeOrUnusedBytes = Size;
421 } else {
422 // The secondary fits the allocations to a page, so the amount of unused
423 // bytes is the difference between the end of the user allocation and the
424 // next page boundary.
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000425 const uptr PageSize = GetPageSizeCached();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000426 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000427 if (TrailingBytes)
428 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
429 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000430 void *Ptr = reinterpret_cast<void *>(UserPtr);
431 Chunk::storeHeader(Ptr, &Header);
432 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(Ptr, Size);
433 return Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000434 }
435
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000436 // Place a chunk in the quarantine or directly deallocate it in the event of
437 // a zero-sized quarantine, or if the size of the chunk is greater than the
438 // quarantine chunk size threshold.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000439 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000440 uptr Size) {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000441 const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
442 (Size > QuarantineChunksUpToSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000443 if (BypassQuarantine) {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000444 Chunk::eraseHeader(Ptr);
445 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000446 if (Header->ClassId) {
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000447 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000448 getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr,
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000449 Header->ClassId);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000450 TSD->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000451 } else {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000452 getBackendAllocator().deallocateSecondary(BackendPtr);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000453 }
454 } else {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000455 // If a small memory amount was allocated with a larger alignment, we want
456 // to take that into account. Otherwise the Quarantine would be filled
457 // with tiny chunks, taking a lot of VA memory. This is an approximation
458 // of the usable size, that allows us to not call
459 // GetActuallyAllocatedSize.
460 uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000461 UnpackedHeader NewHeader = *Header;
462 NewHeader.State = ChunkQuarantine;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000463 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000464 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000465 AllocatorQuarantine.Put(getQuarantineCache(TSD),
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000466 QuarantineCallback(&TSD->Cache), Ptr,
467 EstimatedSize);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000468 TSD->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000469 }
470 }
471
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000472 // Deallocates a Chunk, which means either adding it to the quarantine or
473 // directly returning it to the backend if criteria are met.
474 void deallocate(void *Ptr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky040c2112017-09-11 19:59:40 +0000475 // For a deallocation, we only ensure minimal initialization, meaning thread
476 // local data will be left uninitialized for now (when using ELF TLS). The
477 // fallback cache will be used instead. This is a workaround for a situation
478 // where the only heap operation performed in a thread would be a free past
479 // the TLS destructors, ending up in initialized thread specific data never
480 // being destroyed properly. Any other heap operation will do a full init.
481 initThreadMaybe(/*MinimalInit=*/true);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000482 // if (&__sanitizer_free_hook) __sanitizer_free_hook(Ptr);
483 if (UNLIKELY(!Ptr))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000484 return;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000485 if (UNLIKELY(!Chunk::isAligned(Ptr))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000486 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000487 "aligned at address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000488 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000489 UnpackedHeader Header;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000490 Chunk::loadHeader(Ptr, &Header);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000491 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000492 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000493 "%p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000494 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000495 if (DeallocationTypeMismatch) {
496 // The deallocation type has to match the allocation one.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000497 if (Header.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000498 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000499 if (Header.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000500 dieWithMessage("ERROR: allocation type mismatch when deallocating "
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000501 "address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000502 }
503 }
504 }
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000505 uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000506 Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000507 if (DeleteSizeMismatch) {
508 if (DeleteSize && DeleteSize != Size) {
509 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000510 Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000511 }
512 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000513 quarantineOrDeallocateChunk(Ptr, &Header, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000514 }
515
Kostya Serebryany712fc982016-06-07 01:20:26 +0000516 // Reallocates a chunk. We can save on a new allocation if the new requested
517 // size still fits in the chunk.
518 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000519 initThreadMaybe();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000520 if (UNLIKELY(!Chunk::isAligned(OldPtr))) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000521 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
522 "aligned at address %p\n", OldPtr);
523 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000524 UnpackedHeader OldHeader;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000525 Chunk::loadHeader(OldPtr, &OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000526 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000527 dieWithMessage("ERROR: invalid chunk state when reallocating address "
528 "%p\n", OldPtr);
529 }
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000530 if (DeallocationTypeMismatch) {
531 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
532 dieWithMessage("ERROR: allocation type mismatch when reallocating "
533 "address %p\n", OldPtr);
534 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000535 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000536 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000537 // The new size still fits in the current chunk, and the size difference
538 // is reasonable.
539 if (NewSize <= UsableSize &&
540 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000541 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000542 NewHeader.SizeOrUnusedBytes =
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000543 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000544 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000545 return OldPtr;
546 }
547 // Otherwise, we have to allocate a new chunk and copy the contents of the
548 // old one.
549 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
550 if (NewPtr) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000551 uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000552 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000553 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000554 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000555 }
556 return NewPtr;
557 }
558
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000559 // Helper function that returns the actual usable size of a chunk.
560 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000561 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000562 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000563 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000564 UnpackedHeader Header;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000565 Chunk::loadHeader(Ptr, &Header);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000566 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000567 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000568 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
569 Ptr);
570 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000571 return Chunk::getUsableSize(Ptr, &Header);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000572 }
573
Kostya Serebryany712fc982016-06-07 01:20:26 +0000574 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000575 initThreadMaybe();
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000576 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000577 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000578 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000579 }
580
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000581 void commitBack(ScudoTSD *TSD) {
582 AllocatorQuarantine.Drain(getQuarantineCache(TSD),
583 QuarantineCallback(&TSD->Cache));
584 BackendAllocator.destroyCache(&TSD->Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000585 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000586
587 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000588 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000589 uptr stats[AllocatorStatCount];
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000590 BackendAllocator.getStats(stats);
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000591 return stats[StatType];
592 }
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000593
594 void *handleBadRequest() {
595 initThreadMaybe();
596 return FailureHandler::OnBadRequest();
597 }
Kostya Kortchinskyf22f5fe2017-12-13 20:41:35 +0000598
599 void setRssLimit(uptr LimitMb, bool HardLimit) {
600 if (HardLimit)
601 HardRssLimitMb = LimitMb;
602 else
603 SoftRssLimitMb = LimitMb;
604 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
605 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000606};
607
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000608static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000609
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000610static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000611 return Instance.BackendAllocator;
612}
613
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000614void initScudo() {
615 Instance.init();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000616}
617
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000618void ScudoTSD::init(bool Shared) {
619 UnlockRequired = Shared;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000620 getBackendAllocator().initCache(&Cache);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000621 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
622}
623
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000624void ScudoTSD::commitBack() {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000625 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000626}
627
628void *scudoMalloc(uptr Size, AllocType Type) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000629 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000630}
631
632void scudoFree(void *Ptr, AllocType Type) {
633 Instance.deallocate(Ptr, 0, Type);
634}
635
636void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
637 Instance.deallocate(Ptr, Size, Type);
638}
639
640void *scudoRealloc(void *Ptr, uptr Size) {
641 if (!Ptr)
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000642 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000643 if (Size == 0) {
644 Instance.deallocate(Ptr, 0, FromMalloc);
645 return nullptr;
646 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000647 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000648}
649
650void *scudoCalloc(uptr NMemB, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000651 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000652}
653
654void *scudoValloc(uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000655 return SetErrnoOnNull(
656 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000657}
658
Kostya Serebryany712fc982016-06-07 01:20:26 +0000659void *scudoPvalloc(uptr Size) {
660 uptr PageSize = GetPageSizeCached();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000661 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000662 errno = ENOMEM;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000663 return Instance.handleBadRequest();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000664 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000665 // pvalloc(0) should allocate one page.
666 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000667 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000668}
669
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000670void *scudoMemalign(uptr Alignment, uptr Size) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000671 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000672 errno = EINVAL;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000673 return Instance.handleBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000674 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000675 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000676}
677
Kostya Serebryany712fc982016-06-07 01:20:26 +0000678int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000679 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000680 Instance.handleBadRequest();
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000681 return EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000682 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000683 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000684 if (UNLIKELY(!Ptr))
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000685 return ENOMEM;
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000686 *MemPtr = Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000687 return 0;
688}
689
690void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000691 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000692 errno = EINVAL;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000693 return Instance.handleBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000694 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000695 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000696}
697
698uptr scudoMallocUsableSize(void *Ptr) {
699 return Instance.getUsableSize(Ptr);
700}
701
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000702} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000703
704using namespace __scudo;
705
706// MallocExtension helper functions
707
708uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000709 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000710}
711
712uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000713 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000714}
715
716uptr __sanitizer_get_free_bytes() {
717 return 1;
718}
719
720uptr __sanitizer_get_unmapped_bytes() {
721 return 1;
722}
723
Kostya Kortchinsky541c5a02018-01-04 17:05:04 +0000724uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
725 return Size;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000726}
727
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000728int __sanitizer_get_ownership(const void *Ptr) {
729 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000730}
731
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000732uptr __sanitizer_get_allocated_size(const void *Ptr) {
733 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000734}
Kostya Kortchinskyf22f5fe2017-12-13 20:41:35 +0000735
736// Interface functions
737
738extern "C" {
Kostya Kortchinsky541c5a02018-01-04 17:05:04 +0000739void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
Kostya Kortchinskyf22f5fe2017-12-13 20:41:35 +0000740 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
741 return;
742 Instance.setRssLimit(LimitMb, !!HardLimit);
743}
744} // extern "C"