blob: 2ccdcd903dad35aa276263fbf3bce5dc3567c04b [file] [log] [blame]
Kostya Serebryany712fc982016-06-07 01:20:26 +00001//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000018#include "scudo_tls.h"
Kostya Serebryany712fc982016-06-07 01:20:26 +000019#include "scudo_utils.h"
20
21#include "sanitizer_common/sanitizer_allocator_interface.h"
22#include "sanitizer_common/sanitizer_quarantine.h"
23
24#include <limits.h>
25#include <pthread.h>
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000026#include <string.h>
Kostya Serebryany712fc982016-06-07 01:20:26 +000027
28namespace __scudo {
29
Kostya Serebryany712fc982016-06-07 01:20:26 +000030// Global static cookie, initialized at start-up.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000031static uptr Cookie;
32
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000033// We default to software CRC32 if the alternatives are not supported, either
34// at compilation or at runtime.
35static atomic_uint8_t HashAlgorithm = { CRC32Software };
36
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000037SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
38
39INLINE u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
40 // If SSE4.2 is defined here, it was enabled everywhere, as opposed to only
41 // for scudo_crc32.cpp. This means that other SSE instructions were likely
42 // emitted at other places, and as a result there is no reason to not use
43 // the hardware version of the CRC32.
44#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
45 return computeHardwareCRC32(Crc, Data);
46#else
47 if (computeHardwareCRC32 && HashType == CRC32Hardware)
48 return computeHardwareCRC32(Crc, Data);
49 else
50 return computeSoftwareCRC32(Crc, Data);
51#endif // defined(__SSE4_2__)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000052}
Kostya Serebryany712fc982016-06-07 01:20:26 +000053
Kostya Kortchinsky36b34342017-04-27 20:21:16 +000054static ScudoBackendAllocator &getBackendAllocator();
55
Kostya Serebryany712fc982016-06-07 01:20:26 +000056struct ScudoChunk : UnpackedHeader {
57 // We can't use the offset member of the chunk itself, as we would double
58 // fetch it without any warranty that it wouldn't have been tampered. To
59 // prevent this, we work with a local copy of the header.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000060 void *getAllocBeg(UnpackedHeader *Header) {
Kostya Serebryany712fc982016-06-07 01:20:26 +000061 return reinterpret_cast<void *>(
62 reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
63 }
64
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000065 // Returns the usable size for a chunk, meaning the amount of bytes from the
66 // beginning of the user data to the end of the backend allocated chunk.
67 uptr getUsableSize(UnpackedHeader *Header) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000068 uptr Size = getBackendAllocator().GetActuallyAllocatedSize(
69 getAllocBeg(Header));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000070 if (Size == 0)
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000071 return 0;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000072 return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
73 }
74
75 // Compute the checksum of the Chunk pointer and its ChunkHeader.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000076 u16 computeChecksum(UnpackedHeader *Header) const {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000077 UnpackedHeader ZeroChecksumHeader = *Header;
78 ZeroChecksumHeader.Checksum = 0;
79 uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
80 memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
Kostya Kortchinskyb39dff42017-01-18 17:11:17 +000081 u8 HashType = atomic_load_relaxed(&HashAlgorithm);
82 u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(this), HashType);
83 for (uptr i = 0; i < ARRAY_SIZE(HeaderHolder); i++)
84 Crc = computeCRC32(Crc, HeaderHolder[i], HashType);
85 return static_cast<u16>(Crc);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000086 }
87
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000088 // Checks the validity of a chunk by verifying its checksum. It doesn't
89 // incur termination in the event of an invalid chunk.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000090 bool isValid() {
91 UnpackedHeader NewUnpackedHeader;
92 const AtomicPackedHeader *AtomicHeader =
93 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +000094 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +000095 NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
96 return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader));
Kostya Serebryany712fc982016-06-07 01:20:26 +000097 }
98
Kostya Kortchinsky006805d2017-04-20 15:11:00 +000099 // Nulls out a chunk header. When returning the chunk to the backend, there
100 // is no need to store a valid ChunkAvailable header, as this would be
101 // computationally expensive. Zeroing out serves the same purpose by making
102 // the header invalid. In the extremely rare event where 0 would be a valid
103 // checksum for the chunk, the state of the chunk is ChunkAvailable anyway.
104 COMPILER_CHECK(ChunkAvailable == 0);
105 void eraseHeader() {
106 PackedHeader NullPackedHeader = 0;
107 AtomicPackedHeader *AtomicHeader =
108 reinterpret_cast<AtomicPackedHeader *>(this);
109 atomic_store_relaxed(AtomicHeader, NullPackedHeader);
110 }
111
Kostya Serebryany712fc982016-06-07 01:20:26 +0000112 // Loads and unpacks the header, verifying the checksum in the process.
113 void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
114 const AtomicPackedHeader *AtomicHeader =
115 reinterpret_cast<const AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000116 PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000117 *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000118 if (UNLIKELY(NewUnpackedHeader->Checksum !=
119 computeChecksum(NewUnpackedHeader))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000120 dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
121 }
122 }
123
124 // Packs and stores the header, computing the checksum in the process.
125 void storeHeader(UnpackedHeader *NewUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000126 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000127 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
128 AtomicPackedHeader *AtomicHeader =
129 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinskya00b9222017-01-20 18:32:18 +0000130 atomic_store_relaxed(AtomicHeader, NewPackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000131 }
132
133 // Packs and stores the header, computing the checksum in the process. We
134 // compare the current header with the expected provided one to ensure that
135 // we are not being raced by a corruption occurring in another thread.
136 void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
137 UnpackedHeader *OldUnpackedHeader) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000138 NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000139 PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
140 PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
141 AtomicPackedHeader *AtomicHeader =
142 reinterpret_cast<AtomicPackedHeader *>(this);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000143 if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader,
144 &OldPackedHeader,
145 NewPackedHeader,
146 memory_order_relaxed))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000147 dieWithMessage("ERROR: race on chunk header at address %p\n", this);
148 }
149 }
150};
151
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000152ScudoChunk *getScudoChunk(uptr UserBeg) {
153 return reinterpret_cast<ScudoChunk *>(UserBeg - AlignedChunkHeaderSize);
154}
155
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000156struct AllocatorOptions {
157 u32 QuarantineSizeMb;
158 u32 ThreadLocalQuarantineSizeKb;
159 bool MayReturnNull;
160 s32 ReleaseToOSIntervalMs;
161 bool DeallocationTypeMismatch;
162 bool DeleteSizeMismatch;
163 bool ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000164
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000165 void setFrom(const Flags *f, const CommonFlags *cf);
166 void copyTo(Flags *f, CommonFlags *cf) const;
167};
Kostya Serebryany712fc982016-06-07 01:20:26 +0000168
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000169void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
170 MayReturnNull = cf->allocator_may_return_null;
171 ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms;
172 QuarantineSizeMb = f->QuarantineSizeMb;
173 ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
174 DeallocationTypeMismatch = f->DeallocationTypeMismatch;
175 DeleteSizeMismatch = f->DeleteSizeMismatch;
176 ZeroContents = f->ZeroContents;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000177}
178
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000179void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
180 cf->allocator_may_return_null = MayReturnNull;
181 cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs;
182 f->QuarantineSizeMb = QuarantineSizeMb;
183 f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
184 f->DeallocationTypeMismatch = DeallocationTypeMismatch;
185 f->DeleteSizeMismatch = DeleteSizeMismatch;
186 f->ZeroContents = ZeroContents;
187}
188
189static void initScudoInternal(const AllocatorOptions &Options);
190
191static bool ScudoInitIsRunning = false;
192
193void initScudo() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000194 SanitizerToolName = "Scudo";
195 CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
196 ScudoInitIsRunning = true;
197
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000198 // Check is SSE4.2 is supported, if so, opt for the CRC32 hardware version.
199 if (testCPUFeature(CRC32CPUFeature)) {
200 atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
201 }
202
Kostya Serebryany712fc982016-06-07 01:20:26 +0000203 initFlags();
204
205 AllocatorOptions Options;
206 Options.setFrom(getFlags(), common_flags());
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000207 initScudoInternal(Options);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000208
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000209 // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000210
Kostya Serebryany712fc982016-06-07 01:20:26 +0000211 ScudoInitIsRunning = false;
212}
213
Kostya Serebryany712fc982016-06-07 01:20:26 +0000214struct QuarantineCallback {
215 explicit QuarantineCallback(AllocatorCache *Cache)
216 : Cache_(Cache) {}
217
218 // Chunk recycling function, returns a quarantined chunk to the backend.
219 void Recycle(ScudoChunk *Chunk) {
220 UnpackedHeader Header;
221 Chunk->loadHeader(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000222 if (UNLIKELY(Header.State != ChunkQuarantine)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000223 dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
224 Chunk);
225 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000226 Chunk->eraseHeader();
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000227 void *Ptr = Chunk->getAllocBeg(&Header);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000228 getBackendAllocator().Deallocate(Cache_, Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000229 }
230
231 /// Internal quarantine allocation and deallocation functions.
232 void *Allocate(uptr Size) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000233 // TODO(kostyak): figure out the best way to protect the batches.
234 return getBackendAllocator().Allocate(Cache_, Size, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000235 }
236
237 void Deallocate(void *Ptr) {
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000238 getBackendAllocator().Deallocate(Cache_, Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000239 }
240
241 AllocatorCache *Cache_;
242};
243
244typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000245typedef ScudoQuarantine::Cache ScudoQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000246COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
247 sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
Kostya Serebryany712fc982016-06-07 01:20:26 +0000248
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000249AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
250 return &ThreadContext->Cache;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000251}
252
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000253ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
254 return reinterpret_cast<
255 ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
256}
257
258Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) {
259 return &ThreadContext->Prng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000260}
261
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000262struct ScudoAllocator {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000263 static const uptr MaxAllowedMallocSize =
264 FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000265
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000266 ScudoBackendAllocator BackendAllocator;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000267 ScudoQuarantine AllocatorQuarantine;
268
269 // The fallback caches are used when the thread local caches have been
270 // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
271 // be accessed by different threads.
272 StaticSpinMutex FallbackMutex;
273 AllocatorCache FallbackAllocatorCache;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000274 ScudoQuarantineCache FallbackQuarantineCache;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000275 Xorshift128Plus FallbackPrng;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000276
277 bool DeallocationTypeMismatch;
278 bool ZeroContents;
279 bool DeleteSizeMismatch;
280
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000281 explicit ScudoAllocator(LinkerInitialized)
Kostya Serebryany712fc982016-06-07 01:20:26 +0000282 : AllocatorQuarantine(LINKER_INITIALIZED),
283 FallbackQuarantineCache(LINKER_INITIALIZED) {}
284
285 void init(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000286 // Verify that the header offset field can hold the maximum offset. In the
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000287 // case of the Secondary allocator, it takes care of alignment and the
288 // offset will always be 0. In the case of the Primary, the worst case
289 // scenario happens in the last size class, when the backend allocation
290 // would already be aligned on the requested alignment, which would happen
291 // to be the maximum alignment that would fit in that size class. As a
292 // result, the maximum offset will be at most the maximum alignment for the
293 // last size class minus the header size, in multiples of MinAlignment.
Kostya Serebryany712fc982016-06-07 01:20:26 +0000294 UnpackedHeader Header = {};
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000295 uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000296 SizeClassMap::kMaxSize - MinAlignment);
297 uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000298 MinAlignmentLog;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000299 Header.Offset = MaxOffset;
300 if (Header.Offset != MaxOffset) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000301 dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
302 "header\n");
303 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000304 // Verify that we can fit the maximum size or amount of unused bytes in the
305 // header. Given that the Secondary fits the allocation to a page, the worst
306 // case scenario happens in the Primary. It will depend on the second to
307 // last and last class sizes, as well as the dynamic base for the Primary.
308 // The following is an over-approximation that works for our needs.
309 uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
310 Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
311 if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000312 dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
313 "the header\n");
314 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000315
316 DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
317 DeleteSizeMismatch = Options.DeleteSizeMismatch;
318 ZeroContents = Options.ZeroContents;
Evgeniy Stepanovd3305af2016-11-29 00:22:50 +0000319 BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000320 AllocatorQuarantine.Init(
321 static_cast<uptr>(Options.QuarantineSizeMb) << 20,
322 static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000323 BackendAllocator.InitCache(&FallbackAllocatorCache);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000324 FallbackPrng.initFromURandom();
325 Cookie = FallbackPrng.getNext();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000326 }
327
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000328 // Helper function that checks for a valid Scudo chunk. nullptr isn't.
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000329 bool isValidPointer(const void *UserPtr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000330 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000331 if (!UserPtr)
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000332 return false;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000333 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
334 if (!IsAligned(UserBeg, MinAlignment))
335 return false;
336 return getScudoChunk(UserBeg)->isValid();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000337 }
338
Kostya Serebryany712fc982016-06-07 01:20:26 +0000339 // Allocates a chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000340 void *allocate(uptr Size, uptr Alignment, AllocType Type,
341 bool ForceZeroContents = false) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000342 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000343 if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000344 dieWithMessage("ERROR: alignment is not a power of 2\n");
Kostya Serebryany712fc982016-06-07 01:20:26 +0000345 }
346 if (Alignment > MaxAlignment)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000347 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000348 if (Alignment < MinAlignment)
349 Alignment = MinAlignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000350 if (Size >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000351 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000352 if (Size == 0)
353 Size = 1;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000354
355 uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000356 if (Alignment > MinAlignment)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000357 NeededSize += Alignment;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000358 if (NeededSize >= MaxAllowedMallocSize)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000359 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000360
361 // Primary backed and Secondary backed allocations have a different
362 // treatment. We deal with alignment requirements of Primary serviced
363 // allocations here, but the Secondary will take care of its own alignment
364 // needs, which means we also have to work around some limitations of the
365 // combined allocator to accommodate the situation.
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000366 bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000367
368 void *Ptr;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000369 uptr Salt;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000370 uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000371 ScudoThreadContext *ThreadContext = getThreadContext();
372 if (LIKELY(ThreadContext)) {
373 Salt = getPrng(ThreadContext)->getNext();
374 Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
375 NeededSize, AllocationAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000376 } else {
377 SpinMutexLock l(&FallbackMutex);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000378 Salt = FallbackPrng.getNext();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000379 Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000380 AllocationAlignment);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000381 }
382 if (!Ptr)
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000383 return BackendAllocator.ReturnNullOrDieOnOOM();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000384
Kostya Serebryany712fc982016-06-07 01:20:26 +0000385 uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000386 // If the allocation was serviced by the secondary, the returned pointer
387 // accounts for ChunkHeaderSize to pass the alignment check of the combined
388 // allocator. Adjust it here.
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000389 if (!FromPrimary) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000390 AllocBeg -= AlignedChunkHeaderSize;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000391 if (Alignment > MinAlignment)
392 NeededSize -= Alignment;
393 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000394
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000395 // If requested, we will zero out the entire contents of the returned chunk.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000396 if ((ForceZeroContents || ZeroContents) && FromPrimary)
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000397 memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000398
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000399 uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
400 if (!IsAligned(UserBeg, Alignment))
401 UserBeg = RoundUpTo(UserBeg, Alignment);
402 CHECK_LE(UserBeg + Size, AllocBeg + NeededSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000403 UnpackedHeader Header = {};
404 Header.State = ChunkAllocated;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000405 uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000406 Header.Offset = Offset >> MinAlignmentLog;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000407 Header.AllocType = Type;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000408 if (FromPrimary) {
409 Header.FromPrimary = FromPrimary;
410 Header.SizeOrUnusedBytes = Size;
411 } else {
412 // The secondary fits the allocations to a page, so the amount of unused
413 // bytes is the difference between the end of the user allocation and the
414 // next page boundary.
415 uptr PageSize = GetPageSizeCached();
416 uptr TrailingBytes = (UserBeg + Size) & (PageSize - 1);
417 if (TrailingBytes)
418 Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
419 }
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000420 Header.Salt = static_cast<u8>(Salt);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000421 getScudoChunk(UserBeg)->storeHeader(&Header);
422 void *UserPtr = reinterpret_cast<void *>(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000423 // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
424 return UserPtr;
425 }
426
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000427 // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
428 // we directly deallocate the chunk, otherwise the flow would lead to the
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000429 // chunk being loaded (and checked) twice, and stored (and checksummed) once,
430 // with no additional security value.
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000431 void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
432 uptr Size) {
433 bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
434 if (BypassQuarantine) {
435 Chunk->eraseHeader();
436 void *Ptr = Chunk->getAllocBeg(Header);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000437 ScudoThreadContext *ThreadContext = getThreadContext();
438 if (LIKELY(ThreadContext)) {
439 getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000440 } else {
441 SpinMutexLock Lock(&FallbackMutex);
442 getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
443 }
444 } else {
445 UnpackedHeader NewHeader = *Header;
446 NewHeader.State = ChunkQuarantine;
447 Chunk->compareExchangeHeader(&NewHeader, Header);
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000448 ScudoThreadContext *ThreadContext = getThreadContext();
449 if (LIKELY(ThreadContext)) {
450 AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
451 QuarantineCallback(
452 getAllocatorCache(ThreadContext)),
453 Chunk, Size);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000454 } else {
455 SpinMutexLock l(&FallbackMutex);
456 AllocatorQuarantine.Put(&FallbackQuarantineCache,
457 QuarantineCallback(&FallbackAllocatorCache),
458 Chunk, Size);
459 }
460 }
461 }
462
Kostya Serebryany712fc982016-06-07 01:20:26 +0000463 // Deallocates a Chunk, which means adding it to the delayed free list (or
464 // Quarantine).
465 void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000466 initThreadMaybe();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000467 // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
468 if (!UserPtr)
469 return;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000470 uptr UserBeg = reinterpret_cast<uptr>(UserPtr);
471 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000472 dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
473 "aligned at address %p\n", UserPtr);
474 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000475 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000476 UnpackedHeader OldHeader;
477 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000478 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000479 dieWithMessage("ERROR: invalid chunk state when deallocating address "
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000480 "%p\n", UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000481 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000482 if (DeallocationTypeMismatch) {
483 // The deallocation type has to match the allocation one.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000484 if (OldHeader.AllocType != Type) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000485 // With the exception of memalign'd Chunks, that can be still be free'd.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000486 if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000487 dieWithMessage("ERROR: allocation type mismatch on address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000488 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000489 }
490 }
491 }
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000492 uptr Size = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
493 Chunk->getUsableSize(&OldHeader) - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000494 if (DeleteSizeMismatch) {
495 if (DeleteSize && DeleteSize != Size) {
496 dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000497 UserPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000498 }
499 }
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000500
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000501 // If a small memory amount was allocated with a larger alignment, we want
502 // to take that into account. Otherwise the Quarantine would be filled with
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000503 // tiny chunks, taking a lot of VA memory. This is an approximation of the
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000504 // usable size, that allows us to not call GetActuallyAllocatedSize.
505 uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000506 quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000507 }
508
Kostya Serebryany712fc982016-06-07 01:20:26 +0000509 // Reallocates a chunk. We can save on a new allocation if the new requested
510 // size still fits in the chunk.
511 void *reallocate(void *OldPtr, uptr NewSize) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000512 initThreadMaybe();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000513 uptr UserBeg = reinterpret_cast<uptr>(OldPtr);
514 if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) {
515 dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
516 "aligned at address %p\n", OldPtr);
517 }
518 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000519 UnpackedHeader OldHeader;
520 Chunk->loadHeader(&OldHeader);
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000521 if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000522 dieWithMessage("ERROR: invalid chunk state when reallocating address "
523 "%p\n", OldPtr);
524 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000525 if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000526 dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000527 OldPtr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000528 }
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000529 uptr UsableSize = Chunk->getUsableSize(&OldHeader);
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000530 // The new size still fits in the current chunk, and the size difference
531 // is reasonable.
532 if (NewSize <= UsableSize &&
533 (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000534 UnpackedHeader NewHeader = OldHeader;
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000535 NewHeader.SizeOrUnusedBytes =
536 OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000537 Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
538 return OldPtr;
539 }
540 // Otherwise, we have to allocate a new chunk and copy the contents of the
541 // old one.
542 void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
543 if (NewPtr) {
Kostya Kortchinskyfff8e062017-04-20 18:07:17 +0000544 uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
545 UsableSize - OldHeader.SizeOrUnusedBytes;
Kostya Serebryany712fc982016-06-07 01:20:26 +0000546 memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
Kostya Kortchinskyf1a54fd2017-04-21 18:10:53 +0000547 quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000548 }
549 return NewPtr;
550 }
551
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000552 // Helper function that returns the actual usable size of a chunk.
553 uptr getUsableSize(const void *Ptr) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000554 initThreadMaybe();
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000555 if (!Ptr)
556 return 0;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000557 uptr UserBeg = reinterpret_cast<uptr>(Ptr);
558 ScudoChunk *Chunk = getScudoChunk(UserBeg);
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000559 UnpackedHeader Header;
560 Chunk->loadHeader(&Header);
561 // Getting the usable size of a chunk only makes sense if it's allocated.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000562 if (UNLIKELY(Header.State != ChunkAllocated)) {
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000563 dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
564 Ptr);
565 }
566 return Chunk->getUsableSize(&Header);
567 }
568
Kostya Serebryany712fc982016-06-07 01:20:26 +0000569 void *calloc(uptr NMemB, uptr Size) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000570 initThreadMaybe();
Kostya Serebryany712fc982016-06-07 01:20:26 +0000571 uptr Total = NMemB * Size;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000572 if (Size != 0 && Total / Size != NMemB) // Overflow check
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000573 return BackendAllocator.ReturnNullOrDieOnBadRequest();
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000574 return allocate(Total, MinAlignment, FromMalloc, true);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000575 }
576
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000577 void commitBack(ScudoThreadContext *ThreadContext) {
578 AllocatorCache *Cache = getAllocatorCache(ThreadContext);
579 AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
580 QuarantineCallback(Cache));
581 BackendAllocator.DestroyCache(Cache);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000582 }
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000583
584 uptr getStats(AllocatorStat StatType) {
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000585 initThreadMaybe();
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000586 uptr stats[AllocatorStatCount];
587 BackendAllocator.GetStats(stats);
588 return stats[StatType];
589 }
Kostya Serebryany712fc982016-06-07 01:20:26 +0000590};
591
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000592static ScudoAllocator Instance(LINKER_INITIALIZED);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000593
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000594static ScudoBackendAllocator &getBackendAllocator() {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000595 return Instance.BackendAllocator;
596}
597
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000598static void initScudoInternal(const AllocatorOptions &Options) {
Kostya Serebryany712fc982016-06-07 01:20:26 +0000599 Instance.init(Options);
600}
601
Kostya Kortchinsky36b34342017-04-27 20:21:16 +0000602void ScudoThreadContext::init() {
603 getBackendAllocator().InitCache(&Cache);
604 Prng.initFromURandom();
605 memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
606}
607
608void ScudoThreadContext::commitBack() {
609 Instance.commitBack(this);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000610}
611
612void *scudoMalloc(uptr Size, AllocType Type) {
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000613 return Instance.allocate(Size, MinAlignment, Type);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000614}
615
616void scudoFree(void *Ptr, AllocType Type) {
617 Instance.deallocate(Ptr, 0, Type);
618}
619
620void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
621 Instance.deallocate(Ptr, Size, Type);
622}
623
624void *scudoRealloc(void *Ptr, uptr Size) {
625 if (!Ptr)
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000626 return Instance.allocate(Size, MinAlignment, FromMalloc);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000627 if (Size == 0) {
628 Instance.deallocate(Ptr, 0, FromMalloc);
629 return nullptr;
630 }
631 return Instance.reallocate(Ptr, Size);
632}
633
634void *scudoCalloc(uptr NMemB, uptr Size) {
635 return Instance.calloc(NMemB, Size);
636}
637
638void *scudoValloc(uptr Size) {
639 return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
640}
641
642void *scudoMemalign(uptr Alignment, uptr Size) {
643 return Instance.allocate(Size, Alignment, FromMemalign);
644}
645
646void *scudoPvalloc(uptr Size) {
647 uptr PageSize = GetPageSizeCached();
648 Size = RoundUpTo(Size, PageSize);
649 if (Size == 0) {
650 // pvalloc(0) should allocate one page.
651 Size = PageSize;
652 }
653 return Instance.allocate(Size, PageSize, FromMemalign);
654}
655
656int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
657 *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
658 return 0;
659}
660
661void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
662 // size must be a multiple of the alignment. To avoid a division, we first
663 // make sure that alignment is a power of 2.
664 CHECK(IsPowerOfTwo(Alignment));
665 CHECK_EQ((Size & (Alignment - 1)), 0);
666 return Instance.allocate(Size, Alignment, FromMalloc);
667}
668
669uptr scudoMallocUsableSize(void *Ptr) {
670 return Instance.getUsableSize(Ptr);
671}
672
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000673} // namespace __scudo
Kostya Serebryany712fc982016-06-07 01:20:26 +0000674
675using namespace __scudo;
676
677// MallocExtension helper functions
678
679uptr __sanitizer_get_current_allocated_bytes() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000680 return Instance.getStats(AllocatorStatAllocated);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000681}
682
683uptr __sanitizer_get_heap_size() {
Kostya Kortchinsky8d6257b2017-02-03 20:49:42 +0000684 return Instance.getStats(AllocatorStatMapped);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000685}
686
687uptr __sanitizer_get_free_bytes() {
688 return 1;
689}
690
691uptr __sanitizer_get_unmapped_bytes() {
692 return 1;
693}
694
695uptr __sanitizer_get_estimated_allocated_size(uptr size) {
696 return size;
697}
698
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000699int __sanitizer_get_ownership(const void *Ptr) {
700 return Instance.isValidPointer(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000701}
702
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000703uptr __sanitizer_get_allocated_size(const void *Ptr) {
704 return Instance.getUsableSize(Ptr);
Kostya Serebryany712fc982016-06-07 01:20:26 +0000705}