blob: 0e18141f48cbeec9a00878aa0f7b1c5fd5696ff2 [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000018#include "scudo_crc32.h"
Kostya Kortchinsky43917722017-08-16 16:40:48 +000019#include "scudo_flags.h"
Kostya Kortchinsky33802be2018-01-17 23:10:02 +000020#include "scudo_interface_internal.h"
Kostya Kortchinskyb59abb22017-09-26 17:20:02 +000021#include "scudo_tsd.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000022#include "scudo_utils.h"
23
Alex Shlyapnikov42bea012017-07-18 19:11:04 +000024#include "sanitizer_common/sanitizer_allocator_checks.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000025#include "sanitizer_common/sanitizer_allocator_interface.h"
26#include "sanitizer_common/sanitizer_quarantine.h"
27
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +000028#include <errno.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000029#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000030
31namespace __scudo {
32
Kostya Serebryany712fc982016-06-07 01:20:26 +000033// Global static cookie, initialized at start-up.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +000034static u32 Cookie;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000035
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000036// We default to software CRC32 if the alternatives are not supported, either
37// at compilation or at runtime.
38static atomic_uint8_t HashAlgorithm = { CRC32Software };
39
Kostya Kortchinsky43917722017-08-16 16:40:48 +000040INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000041 // If the hardware CRC32 feature is defined here, it was enabled everywhere,
42 // as opposed to only for scudo_crc32.cpp. This means that other hardware
43 // specific instructions were likely emitted at other places, and as a
44 // result there is no reason to not use it here.
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000045#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000046 Crc = CRC32_INTRINSIC(Crc, Value);
47 for (uptr i = 0; i < ArraySize; i++)
48 Crc = CRC32_INTRINSIC(Crc, Array[i]);
49 return Crc;
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000050#else
Kostya Kortchinskyb0e96eb2017-05-09 15:12:38 +000051 if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
52 Crc = computeHardwareCRC32(Crc, Value);
53 for (uptr i = 0; i < ArraySize; i++)
54 Crc = computeHardwareCRC32(Crc, Array[i]);
55 return Crc;
56 }
57 Crc = computeSoftwareCRC32(Crc, Value);
58 for (uptr i = 0; i < ArraySize; i++)
59 Crc = computeSoftwareCRC32(Crc, Array[i]);
60 return Crc;
61#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000062}
Kostya Serebryany712fc982016-06-07 01:20:26 +000063
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000064static ScudoBackendAllocator &getBackendAllocator();
65
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000066namespace Chunk {
Kostya Serebryany712fc982016-06-07 01:20:26 +000067 // We can't use the offset member of the chunk itself, as we would double
68 // fetch it without any warranty that it wouldn't have been tampered. To
69 // prevent this, we work with a local copy of the header.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000070 static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
71 return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
72 AlignedChunkHeaderSize -
73 (Header->Offset << MinAlignmentLog));
74 }
75
76 static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
77 return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
78 AlignedChunkHeaderSize);
79 }
80 static INLINE
81 const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
82 return reinterpret_cast<const AtomicPackedHeader *>(
83 reinterpret_cast<uptr>(Ptr) - AlignedChunkHeaderSize);
84 }
85
86 static INLINE bool isAligned(const void *Ptr) {
87 return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +000088 }
89
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000090 // Returns the usable size for a chunk, meaning the amount of bytes from the
91 // beginning of the user data to the end of the backend allocated chunk.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +000092 static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
93 const uptr Size = getBackendAllocator().getActuallyAllocatedSize(
94 getBackendPtr(Ptr, Header), Header->ClassId);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000095 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000096 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000097 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
98 }
99
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000100 // Compute the checksum of the chunk pointer and its header.
101 static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000102 UnpackedHeader ZeroChecksumHeader = *Header;
103 ZeroChecksumHeader.Checksum = 0;
104 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
105 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000106 const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
107 HeaderHolder, ARRAY_SIZE(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +0000108 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000109 }
110
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000111 // Checks the validity of a chunk by verifying its checksum. It doesn't
112 // incur termination in the event of an invalid chunk.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000113 static INLINE bool isValid(const void *Ptr) {
114 PackedHeader NewPackedHeader =
115 atomic_load_relaxed(getConstAtomicHeader(Ptr));
116 UnpackedHeader NewUnpackedHeader =
117 bit_cast<UnpackedHeader>(NewPackedHeader);
118 return (NewUnpackedHeader.Checksum ==
119 computeChecksum(Ptr, &NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000120 }
121
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000122 // Nulls out a chunk header. When returning the chunk to the backend, there
123 // is no need to store a valid ChunkAvailable header, as this would be
124 // computationally expensive. Zeroing out serves the same purpose by making
125 // the header invalid. In the extremely rare event where 0 would be a valid
126 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
127 COMPILER_CHECK(ChunkAvailable == 0);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000128 static INLINE void eraseHeader(void *Ptr) {
129 const PackedHeader NullPackedHeader = 0;
130 atomic_store_relaxed(getAtomicHeader(Ptr), NullPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000131 }
132
Kostya Serebryany712fc982016-06-07 01:20:26 +0000133 // Loads and unpacks the header, verifying the checksum in the process.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000134 static INLINE
135 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
136 PackedHeader NewPackedHeader =
137 atomic_load_relaxed(getConstAtomicHeader(Ptr));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000138 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000139 if (UNLIKELY(NewUnpackedHeader->Checksum !=
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000140 computeChecksum(Ptr, NewUnpackedHeader))) {
141 dieWithMessage("ERROR: corrupted chunk header at address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000142 }
143 }
144
145 // Packs and stores the header, computing the checksum in the process.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000146 static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
147 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000148 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000149 atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000150 }
151
152 // Packs and stores the header, computing the checksum in the process. We
153 // compare the current header with the expected provided one to ensure that
154 // we are not being raced by a corruption occurring in another thread.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000155 static INLINE void compareExchangeHeader(void *Ptr,
156 UnpackedHeader *NewUnpackedHeader,
157 UnpackedHeader *OldUnpackedHeader) {
158 NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000159 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
160 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000161 if (UNLIKELY(!atomic_compare_exchange_strong(
162 getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
163 memory_order_relaxed))) {
164 dieWithMessage("ERROR: race on chunk header at address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000165 }
166 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000167} // namespace Chunk
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000168
Kostya Serebryany712fc982016-06-07 01:20:26 +0000169struct QuarantineCallback {
170 explicit QuarantineCallback(AllocatorCache *Cache)
171 : Cache_(Cache) {}
172
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000173 // Chunk recycling function, returns a quarantined chunk to the backend,
174 // first making sure it hasn't been tampered with.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000175 void Recycle(void *Ptr) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000176 UnpackedHeader Header;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000177 Chunk::loadHeader(Ptr, &Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000178 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000179 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000180 Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000181 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000182 Chunk::eraseHeader(Ptr);
183 void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000184 if (Header.ClassId)
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000185 getBackendAllocator().deallocatePrimary(Cache_, BackendPtr,
186 Header.ClassId);
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000187 else
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000188 getBackendAllocator().deallocateSecondary(BackendPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000189 }
190
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000191 // Internal quarantine allocation and deallocation functions. We first check
192 // that the batches are indeed serviced by the Primary.
193 // TODO(kostyak): figure out the best way to protect the batches.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000194 void *Allocate(uptr Size) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000195 return getBackendAllocator().allocatePrimary(Cache_, BatchClassId);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000196 }
197
198 void Deallocate(void *Ptr) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000199 getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000200 }
201
202 AllocatorCache *Cache_;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000203 COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
204 const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000205};
206
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000207typedef Quarantine<QuarantineCallback, void> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000208typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000209COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000210 sizeof(ScudoTSD::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000211
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000212ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
213 return reinterpret_cast<ScudoQuarantineCache *>(
214 TSD->QuarantineCachePlaceHolder);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000215}
216
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000217struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000218 static const uptr MaxAllowedMallocSize =
219 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000220
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000221 typedef ReturnNullOrDieOnFailure FailureHandler;
222
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000223 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000224 ScudoQuarantine AllocatorQuarantine;
225
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000226 u32 QuarantineChunksUpToSize;
227
Kostya Serebryany712fc982016-06-07 01:20:26 +0000228 bool DeallocationTypeMismatch;
229 bool ZeroContents;
230 bool DeleteSizeMismatch;
231
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000232 bool CheckRssLimit;
233 uptr HardRssLimitMb;
234 uptr SoftRssLimitMb;
235 atomic_uint8_t RssLimitExceeded;
236 atomic_uint64_t RssLastCheckedAtNS;
237
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000238 explicit ScudoAllocator(LinkerInitialized)
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000239 : AllocatorQuarantine(LINKER_INITIALIZED) {}
Kostya Serebryany712fc982016-06-07 01:20:26 +0000240
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000241 void performSanityChecks() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000242 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000243 // case of the Secondary allocator, it takes care of alignment and the
244 // offset will always be 0. In the case of the Primary, the worst case
245 // scenario happens in the last size class, when the backend allocation
246 // would already be aligned on the requested alignment, which would happen
247 // to be the maximum alignment that would fit in that size class. As a
248 // result, the maximum offset will be at most the maximum alignment for the
249 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000250 UnpackedHeader Header = {};
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000251 const uptr MaxPrimaryAlignment =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000252 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000253 const uptr MaxOffset =
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000254 (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000255 Header.Offset = MaxOffset;
256 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000257 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
258 "header\n");
259 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000260 // Verify that we can fit the maximum size or amount of unused bytes in the
261 // header. Given that the Secondary fits the allocation to a page, the worst
262 // case scenario happens in the Primary. It will depend on the second to
263 // last and last class sizes, as well as the dynamic base for the Primary.
264 // The following is an over-approximation that works for our needs.
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000265 const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000266 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
267 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000268 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
269 "the header\n");
270 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000271
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000272 const uptr LargestClassId = SizeClassMap::kLargestClassID;
273 Header.ClassId = LargestClassId;
274 if (Header.ClassId != LargestClassId) {
275 dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
276 }
277 }
278
279 void init() {
280 SanitizerToolName = "Scudo";
281 initFlags();
282
283 performSanityChecks();
284
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000285 // Check if hardware CRC32 is supported in the binary and by the platform,
286 // if so, opt for the CRC32 hardware version of the checksum.
Kostya Kortchinsky0207b6f2017-11-22 18:30:44 +0000287 if (&computeHardwareCRC32 && hasHardwareCRC32())
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000288 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
289
290 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
291 BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms);
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000292 HardRssLimitMb = common_flags()->hard_rss_limit_mb;
293 SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000294 AllocatorQuarantine.Init(
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000295 static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
296 static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
297 QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
298 DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
299 DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
300 ZeroContents = getFlags()->ZeroContents;
301
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000302 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
303 /*blocking=*/false))) {
304 Cookie = static_cast<u32>((NanoTime() >> 12) ^
305 (reinterpret_cast<uptr>(this) >> 4));
306 }
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000307
308 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
309 if (CheckRssLimit)
Kostya Kortchinskyf50246d2017-12-13 16:23:54 +0000310 atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
Kostya Serebryany712fc982016-06-07 01:20:26 +0000311 }
312
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000313 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000314 bool isValidPointer(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000315 initThreadMaybe();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000316 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000317 return false;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000318 if (!Chunk::isAligned(Ptr))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000319 return false;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000320 return Chunk::isValid(Ptr);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000321 }
322
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000323 // Opportunistic RSS limit check. This will update the RSS limit status, if
324 // it can, every 100ms, otherwise it will just return the current one.
325 bool isRssLimitExceeded() {
326 u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
Kostya Kortchinskyf50246d2017-12-13 16:23:54 +0000327 const u64 CurrentCheck = MonotonicNanoTime();
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000328 if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
329 return atomic_load_relaxed(&RssLimitExceeded);
330 if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
331 CurrentCheck, memory_order_relaxed))
332 return atomic_load_relaxed(&RssLimitExceeded);
333 // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
334 // RSS from /proc/self/statm by default. We might want to
335 // call getrusage directly, even if it's less accurate.
336 const uptr CurrentRssMb = GetRSS() >> 20;
337 if (HardRssLimitMb && HardRssLimitMb < CurrentRssMb) {
338 Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n",
339 SanitizerToolName, HardRssLimitMb, CurrentRssMb);
340 DumpProcessMap();
341 Die();
342 }
343 if (SoftRssLimitMb) {
344 if (atomic_load_relaxed(&RssLimitExceeded)) {
345 if (CurrentRssMb <= SoftRssLimitMb)
346 atomic_store_relaxed(&RssLimitExceeded, false);
347 } else {
348 if (CurrentRssMb > SoftRssLimitMb) {
349 atomic_store_relaxed(&RssLimitExceeded, true);
350 Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
351 SanitizerToolName, SoftRssLimitMb, CurrentRssMb);
352 }
353 }
354 }
355 return atomic_load_relaxed(&RssLimitExceeded);
356 }
357
Kostya Serebryany712fc982016-06-07 01:20:26 +0000358 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000359 void *allocate(uptr Size, uptr Alignment, AllocType Type,
360 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000361 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000362 if (UNLIKELY(Alignment > MaxAlignment))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000363 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000364 if (UNLIKELY(Alignment < MinAlignment))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000365 Alignment = MinAlignment;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000366 if (UNLIKELY(Size >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000367 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000368 if (UNLIKELY(Size == 0))
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000369 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000370
371 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000372 uptr AlignedSize = (Alignment > MinAlignment) ?
373 NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000374 if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000375 return FailureHandler::OnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000376
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000377 if (CheckRssLimit && UNLIKELY(isRssLimitExceeded()))
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000378 return FailureHandler::OnOOM();
Kostya Kortchinsky58f2656d2017-11-15 16:40:27 +0000379
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000380 // Primary and Secondary backed allocations have a different treatment. We
381 // deal with alignment requirements of Primary serviced allocations here,
382 // but the Secondary will take care of its own alignment needs.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000383 void *BackendPtr;
384 uptr BackendSize;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000385 u8 ClassId;
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000386 if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000387 BackendSize = AlignedSize;
388 ClassId = SizeClassMap::ClassID(BackendSize);
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000389 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000390 BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000391 TSD->unlock();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000392 } else {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000393 BackendSize = NeededSize;
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000394 ClassId = 0;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000395 BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000396 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000397 if (UNLIKELY(!BackendPtr))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000398 return FailureHandler::OnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000399
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000400 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000401 if ((ForceZeroContents || ZeroContents) && ClassId)
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000402 memset(BackendPtr, 0,
403 BackendAllocator.getActuallyAllocatedSize(BackendPtr, ClassId));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000404
Kostya Serebryany712fc982016-06-07 01:20:26 +0000405 UnpackedHeader Header = {};
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000406 uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + AlignedChunkHeaderSize;
407 if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000408 // Since the Secondary takes care of alignment, a non-aligned pointer
409 // means it is from the Primary. It is also the only case where the offset
410 // field of the header would be non-zero.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000411 DCHECK(ClassId);
412 const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
413 Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
414 UserPtr = AlignedUserPtr;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000415 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000416 CHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000417 Header.State = ChunkAllocated;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000418 Header.AllocType = Type;
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000419 if (ClassId) {
420 Header.ClassId = ClassId;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000421 Header.SizeOrUnusedBytes = Size;
422 } else {
423 // The secondary fits the allocations to a page, so the amount of unused
424 // bytes is the difference between the end of the user allocation and the
425 // next page boundary.
Kostya Kortchinsky9fcb91b2017-12-08 16:36:37 +0000426 const uptr PageSize = GetPageSizeCached();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000427 const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000428 if (TrailingBytes)
429 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
430 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000431 void *Ptr = reinterpret_cast<void *>(UserPtr);
432 Chunk::storeHeader(Ptr, &Header);
433 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(Ptr, Size);
434 return Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000435 }
436
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000437 // Place a chunk in the quarantine or directly deallocate it in the event of
438 // a zero-sized quarantine, or if the size of the chunk is greater than the
439 // quarantine chunk size threshold.
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000440 void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000441 uptr Size) {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000442 const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
443 (Size > QuarantineChunksUpToSize);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000444 if (BypassQuarantine) {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000445 Chunk::eraseHeader(Ptr);
446 void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000447 if (Header->ClassId) {
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000448 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000449 getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr,
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000450 Header->ClassId);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000451 TSD->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000452 } else {
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000453 getBackendAllocator().deallocateSecondary(BackendPtr);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000454 }
455 } else {
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000456 // If a small memory amount was allocated with a larger alignment, we want
457 // to take that into account. Otherwise the Quarantine would be filled
458 // with tiny chunks, taking a lot of VA memory. This is an approximation
459 // of the usable size, that allows us to not call
460 // GetActuallyAllocatedSize.
461 uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000462 UnpackedHeader NewHeader = *Header;
463 NewHeader.State = ChunkQuarantine;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000464 Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000465 ScudoTSD *TSD = getTSDAndLock();
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000466 AllocatorQuarantine.Put(getQuarantineCache(TSD),
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000467 QuarantineCallback(&TSD->Cache), Ptr,
468 EstimatedSize);
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000469 TSD->unlock();
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000470 }
471 }
472
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000473 // Deallocates a Chunk, which means either adding it to the quarantine or
474 // directly returning it to the backend if criteria are met.
475 void deallocate(void *Ptr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky040c2112017-09-11 19:59:40 +0000476 // For a deallocation, we only ensure minimal initialization, meaning thread
477 // local data will be left uninitialized for now (when using ELF TLS). The
478 // fallback cache will be used instead. This is a workaround for a situation
479 // where the only heap operation performed in a thread would be a free past
480 // the TLS destructors, ending up in initialized thread specific data never
481 // being destroyed properly. Any other heap operation will do a full init.
482 initThreadMaybe(/*MinimalInit=*/true);
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000483 // if (&__sanitizer_free_hook) __sanitizer_free_hook(Ptr);
484 if (UNLIKELY(!Ptr))
Kostya Serebryany712fc982016-06-07 01:20:26 +0000485 return;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000486 if (UNLIKELY(!Chunk::isAligned(Ptr))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000487 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000488 "aligned at address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000489 }
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000490 UnpackedHeader Header;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000491 Chunk::loadHeader(Ptr, &Header);
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000492 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000493 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000494 "%p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000495 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000496 if (DeallocationTypeMismatch) {
497 // The deallocation type has to match the allocation one.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000498 if (Header.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000499 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky2d944052017-07-24 15:29:38 +0000500 if (Header.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000501 dieWithMessage("ERROR: allocation type mismatch when deallocating "
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000502 "address %p\n", Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000503 }
504 }
505 }
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000506 uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000507 Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000508 if (DeleteSizeMismatch) {
509 if (DeleteSize && DeleteSize != Size) {
510 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000511 Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000512 }
513 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000514 quarantineOrDeallocateChunk(Ptr, &Header, Size);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000515 }
516
Kostya Serebryany712fc982016-06-07 01:20:26 +0000517 // Reallocates a chunk. We can save on a new allocation if the new requested
518 // size still fits in the chunk.
519 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000520 initThreadMaybe();
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000521 if (UNLIKELY(!Chunk::isAligned(OldPtr))) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000522 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
523 "aligned at address %p\n", OldPtr);
524 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000525 UnpackedHeader OldHeader;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000526 Chunk::loadHeader(OldPtr, &OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000527 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000528 dieWithMessage("ERROR: invalid chunk state when reallocating address "
529 "%p\n", OldPtr);
530 }
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000531 if (DeallocationTypeMismatch) {
532 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
533 dieWithMessage("ERROR: allocation type mismatch when reallocating "
534 "address %p\n", OldPtr);
535 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000536 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000537 const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000538 // The new size still fits in the current chunk, and the size difference
539 // is reasonable.
540 if (NewSize <= UsableSize &&
541 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000542 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000543 NewHeader.SizeOrUnusedBytes =
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000544 OldHeader.ClassId ? NewSize : UsableSize - NewSize;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000545 Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000546 return OldPtr;
547 }
548 // Otherwise, we have to allocate a new chunk and copy the contents of the
549 // old one.
550 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
551 if (NewPtr) {
Kostya Kortchinskydf6ba242017-12-05 17:08:29 +0000552 uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000553 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Kortchinsky43917722017-08-16 16:40:48 +0000554 memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000555 quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000556 }
557 return NewPtr;
558 }
559
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000560 // Helper function that returns the actual usable size of a chunk.
561 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000562 initThreadMaybe();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000563 if (UNLIKELY(!Ptr))
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000564 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000565 UnpackedHeader Header;
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000566 Chunk::loadHeader(Ptr, &Header);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000567 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000568 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000569 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
570 Ptr);
571 }
Kostya Kortchinskyefe3d342017-12-14 21:32:57 +0000572 return Chunk::getUsableSize(Ptr, &Header);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000573 }
574
Kostya Serebryany712fc982016-06-07 01:20:26 +0000575 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000576 initThreadMaybe();
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000577 if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +0000578 return FailureHandler::OnBadRequest();
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000579 return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000580 }
581
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000582 void commitBack(ScudoTSD *TSD) {
583 AllocatorQuarantine.Drain(getQuarantineCache(TSD),
584 QuarantineCallback(&TSD->Cache));
585 BackendAllocator.destroyCache(&TSD->Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000586 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000587
588 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000589 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000590 uptr stats[AllocatorStatCount];
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000591 BackendAllocator.getStats(stats);
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000592 return stats[StatType];
593 }
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000594
595 void *handleBadRequest() {
596 initThreadMaybe();
597 return FailureHandler::OnBadRequest();
598 }
Kostya Kortchinskyf22f5fe2017-12-13 20:41:35 +0000599
600 void setRssLimit(uptr LimitMb, bool HardLimit) {
601 if (HardLimit)
602 HardRssLimitMb = LimitMb;
603 else
604 SoftRssLimitMb = LimitMb;
605 CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
606 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000607};
608
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000609static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000610
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000611static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000612 return Instance.BackendAllocator;
613}
614
Kostya Kortchinskya2b715f2017-11-14 16:14:53 +0000615void initScudo() {
616 Instance.init();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000617}
618
Kostya Kortchinsky22396c22017-09-25 15:12:08 +0000619void ScudoTSD::init(bool Shared) {
620 UnlockRequired = Shared;
Kostya Kortchinskyb44364d2017-07-13 21:01:19 +0000621 getBackendAllocator().initCache(&Cache);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000622 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
623}
624
Kostya Kortchinsky39248092017-09-22 15:35:37 +0000625void ScudoTSD::commitBack() {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000626 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000627}
628
629void *scudoMalloc(uptr Size, AllocType Type) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000630 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000631}
632
633void scudoFree(void *Ptr, AllocType Type) {
634 Instance.deallocate(Ptr, 0, Type);
635}
636
637void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
638 Instance.deallocate(Ptr, Size, Type);
639}
640
641void *scudoRealloc(void *Ptr, uptr Size) {
642 if (!Ptr)
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000643 return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000644 if (Size == 0) {
645 Instance.deallocate(Ptr, 0, FromMalloc);
646 return nullptr;
647 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000648 return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000649}
650
651void *scudoCalloc(uptr NMemB, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000652 return SetErrnoOnNull(Instance.calloc(NMemB, Size));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000653}
654
655void *scudoValloc(uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000656 return SetErrnoOnNull(
657 Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000658}
659
Kostya Serebryany712fc982016-06-07 01:20:26 +0000660void *scudoPvalloc(uptr Size) {
661 uptr PageSize = GetPageSizeCached();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000662 if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000663 errno = ENOMEM;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000664 return Instance.handleBadRequest();
Kostya Kortchinsky65fdf672017-07-25 21:18:02 +0000665 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000666 // pvalloc(0) should allocate one page.
667 Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000668 return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000669}
670
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000671void *scudoMemalign(uptr Alignment, uptr Size) {
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000672 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000673 errno = EINVAL;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000674 return Instance.handleBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000675 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000676 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000677}
678
Kostya Serebryany712fc982016-06-07 01:20:26 +0000679int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000680 if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000681 Instance.handleBadRequest();
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000682 return EINVAL;
Kostya Kortchinsky0ce49992017-06-29 16:45:20 +0000683 }
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000684 void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000685 if (UNLIKELY(!Ptr))
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000686 return ENOMEM;
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000687 *MemPtr = Ptr;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000688 return 0;
689}
690
691void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000692 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
Kostya Kortchinsky8d4ba5f2017-10-12 15:01:09 +0000693 errno = EINVAL;
Kostya Kortchinsky26e689f2017-09-14 20:34:32 +0000694 return Instance.handleBadRequest();
Alex Shlyapnikovdf18cbb2017-07-14 21:17:16 +0000695 }
Alex Shlyapnikov42bea012017-07-18 19:11:04 +0000696 return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000697}
698
699uptr scudoMallocUsableSize(void *Ptr) {
700 return Instance.getUsableSize(Ptr);
701}
702
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000703} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000704
705using namespace __scudo;
706
707// MallocExtension helper functions
708
709uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000710 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000711}
712
713uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000714 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000715}
716
717uptr __sanitizer_get_free_bytes() {
718 return 1;
719}
720
721uptr __sanitizer_get_unmapped_bytes() {
722 return 1;
723}
724
Kostya Kortchinsky541c5a02018-01-04 17:05:04 +0000725uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
726 return Size;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000727}
728
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000729int __sanitizer_get_ownership(const void *Ptr) {
730 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000731}
732
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000733uptr __sanitizer_get_allocated_size(const void *Ptr) {
734 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000735}
Kostya Kortchinskyf22f5fe2017-12-13 20:41:35 +0000736
737// Interface functions
738
Kostya Kortchinsky541c5a02018-01-04 17:05:04 +0000739void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
Kostya Kortchinskyf22f5fe2017-12-13 20:41:35 +0000740 if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
741 return;
742 Instance.setRssLimit(LimitMb, !!HardLimit);
743}