Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 1 | //===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// |
| 10 | /// Scudo Hardened Allocator implementation. |
| 11 | /// It uses the sanitizer_common allocator as a base and aims at mitigating |
| 12 | /// heap corruption vulnerabilities. It provides a checksum-guarded chunk |
| 13 | /// header, a delayed free list, and additional sanity checks. |
| 14 | /// |
| 15 | //===----------------------------------------------------------------------===// |
| 16 | |
| 17 | #include "scudo_allocator.h" |
| 18 | #include "scudo_utils.h" |
| 19 | |
| 20 | #include "sanitizer_common/sanitizer_allocator_interface.h" |
| 21 | #include "sanitizer_common/sanitizer_quarantine.h" |
| 22 | |
| 23 | #include <limits.h> |
| 24 | #include <pthread.h> |
| 25 | #include <smmintrin.h> |
| 26 | |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 27 | #include <cstring> |
| 28 | |
| 29 | namespace __scudo { |
| 30 | |
Kostya Serebryany | 15647b1 | 2016-08-25 20:23:08 +0000 | [diff] [blame] | 31 | struct AP { |
| 32 | static const uptr kSpaceBeg = ~0ULL; |
| 33 | static const uptr kSpaceSize = 0x10000000000ULL; |
| 34 | static const uptr kMetadataSize = 0; |
| 35 | typedef DefaultSizeClassMap SizeClassMap; |
| 36 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
Kostya Serebryany | 7c5ae7c | 2016-08-26 00:06:03 +0000 | [diff] [blame] | 37 | static const uptr kFlags = |
| 38 | SizeClassAllocator64FlagMasks::kRandomShuffleChunks; |
Kostya Serebryany | 15647b1 | 2016-08-25 20:23:08 +0000 | [diff] [blame] | 39 | }; |
| 40 | |
| 41 | typedef SizeClassAllocator64<AP> PrimaryAllocator; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 42 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 43 | typedef ScudoLargeMmapAllocator SecondaryAllocator; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 44 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> |
| 45 | ScudoAllocator; |
| 46 | |
| 47 | static ScudoAllocator &getAllocator(); |
| 48 | |
| 49 | static thread_local Xorshift128Plus Prng; |
| 50 | // Global static cookie, initialized at start-up. |
| 51 | static u64 Cookie; |
| 52 | |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 53 | struct ScudoChunk : UnpackedHeader { |
| 54 | // We can't use the offset member of the chunk itself, as we would double |
| 55 | // fetch it without any warranty that it wouldn't have been tampered. To |
| 56 | // prevent this, we work with a local copy of the header. |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 57 | void *getAllocBeg(UnpackedHeader *Header) { |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 58 | return reinterpret_cast<void *>( |
| 59 | reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog)); |
| 60 | } |
| 61 | |
| 62 | // CRC32 checksum of the Chunk pointer and its ChunkHeader. |
| 63 | // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction. |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 64 | u16 computeChecksum(UnpackedHeader *Header) const { |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 65 | u64 HeaderHolder[2]; |
| 66 | memcpy(HeaderHolder, Header, sizeof(HeaderHolder)); |
| 67 | u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this)); |
| 68 | // This is somewhat of a shortcut. The checksum is stored in the 16 least |
| 69 | // significant bits of the first 8 bytes of the header, hence zero-ing |
| 70 | // those bits out. It would be more valid to zero the checksum field of the |
| 71 | // UnpackedHeader, but would require holding an additional copy of it. |
| 72 | Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL); |
| 73 | Crc = _mm_crc32_u64(Crc, HeaderHolder[1]); |
| 74 | return static_cast<u16>(Crc); |
| 75 | } |
| 76 | |
| 77 | // Loads and unpacks the header, verifying the checksum in the process. |
| 78 | void loadHeader(UnpackedHeader *NewUnpackedHeader) const { |
| 79 | const AtomicPackedHeader *AtomicHeader = |
| 80 | reinterpret_cast<const AtomicPackedHeader *>(this); |
| 81 | PackedHeader NewPackedHeader = |
| 82 | AtomicHeader->load(std::memory_order_relaxed); |
| 83 | *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader); |
| 84 | if ((NewUnpackedHeader->Unused_0_ != 0) || |
| 85 | (NewUnpackedHeader->Unused_1_ != 0) || |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 86 | (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader))) { |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 87 | dieWithMessage("ERROR: corrupted chunk header at address %p\n", this); |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | // Packs and stores the header, computing the checksum in the process. |
| 92 | void storeHeader(UnpackedHeader *NewUnpackedHeader) { |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 93 | NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 94 | PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); |
| 95 | AtomicPackedHeader *AtomicHeader = |
| 96 | reinterpret_cast<AtomicPackedHeader *>(this); |
| 97 | AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed); |
| 98 | } |
| 99 | |
| 100 | // Packs and stores the header, computing the checksum in the process. We |
| 101 | // compare the current header with the expected provided one to ensure that |
| 102 | // we are not being raced by a corruption occurring in another thread. |
| 103 | void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader, |
| 104 | UnpackedHeader *OldUnpackedHeader) { |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 105 | NewUnpackedHeader->Checksum = computeChecksum(NewUnpackedHeader); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 106 | PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader); |
| 107 | PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader); |
| 108 | AtomicPackedHeader *AtomicHeader = |
| 109 | reinterpret_cast<AtomicPackedHeader *>(this); |
| 110 | if (!AtomicHeader->compare_exchange_strong(OldPackedHeader, |
| 111 | NewPackedHeader, |
| 112 | std::memory_order_relaxed, |
| 113 | std::memory_order_relaxed)) { |
| 114 | dieWithMessage("ERROR: race on chunk header at address %p\n", this); |
| 115 | } |
| 116 | } |
| 117 | }; |
| 118 | |
| 119 | static bool ScudoInitIsRunning = false; |
| 120 | |
| 121 | static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; |
| 122 | static pthread_key_t pkey; |
| 123 | |
| 124 | static thread_local bool ThreadInited = false; |
| 125 | static thread_local bool ThreadTornDown = false; |
| 126 | static thread_local AllocatorCache Cache; |
| 127 | |
| 128 | static void teardownThread(void *p) { |
| 129 | uptr v = reinterpret_cast<uptr>(p); |
| 130 | // The glibc POSIX thread-local-storage deallocation routine calls user |
| 131 | // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. |
| 132 | // We want to be called last since other destructors might call free and the |
| 133 | // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the |
| 134 | // quarantine and swallowing the cache. |
| 135 | if (v < PTHREAD_DESTRUCTOR_ITERATIONS) { |
| 136 | pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1)); |
| 137 | return; |
| 138 | } |
| 139 | drainQuarantine(); |
| 140 | getAllocator().DestroyCache(&Cache); |
| 141 | ThreadTornDown = true; |
| 142 | } |
| 143 | |
| 144 | static void initInternal() { |
| 145 | SanitizerToolName = "Scudo"; |
| 146 | CHECK(!ScudoInitIsRunning && "Scudo init calls itself!"); |
| 147 | ScudoInitIsRunning = true; |
| 148 | |
| 149 | initFlags(); |
| 150 | |
| 151 | AllocatorOptions Options; |
| 152 | Options.setFrom(getFlags(), common_flags()); |
| 153 | initAllocator(Options); |
| 154 | |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 155 | MaybeStartBackgroudThread(); |
| 156 | |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 157 | ScudoInitIsRunning = false; |
| 158 | } |
| 159 | |
| 160 | static void initGlobal() { |
| 161 | pthread_key_create(&pkey, teardownThread); |
| 162 | initInternal(); |
| 163 | } |
| 164 | |
| 165 | static void NOINLINE initThread() { |
| 166 | pthread_once(&GlobalInited, initGlobal); |
| 167 | pthread_setspecific(pkey, reinterpret_cast<void *>(1)); |
| 168 | getAllocator().InitCache(&Cache); |
| 169 | ThreadInited = true; |
| 170 | } |
| 171 | |
| 172 | struct QuarantineCallback { |
| 173 | explicit QuarantineCallback(AllocatorCache *Cache) |
| 174 | : Cache_(Cache) {} |
| 175 | |
| 176 | // Chunk recycling function, returns a quarantined chunk to the backend. |
| 177 | void Recycle(ScudoChunk *Chunk) { |
| 178 | UnpackedHeader Header; |
| 179 | Chunk->loadHeader(&Header); |
| 180 | if (Header.State != ChunkQuarantine) { |
| 181 | dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", |
| 182 | Chunk); |
| 183 | } |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 184 | void *Ptr = Chunk->getAllocBeg(&Header); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 185 | getAllocator().Deallocate(Cache_, Ptr); |
| 186 | } |
| 187 | |
| 188 | /// Internal quarantine allocation and deallocation functions. |
| 189 | void *Allocate(uptr Size) { |
| 190 | // The internal quarantine memory cannot be protected by us. But the only |
| 191 | // structures allocated are QuarantineBatch, that are 8KB for x64. So we |
| 192 | // will use mmap for those, and given that Deallocate doesn't pass a size |
| 193 | // in, we enforce the size of the allocation to be sizeof(QuarantineBatch). |
| 194 | // TODO(kostyak): switching to mmap impacts greatly performances, we have |
| 195 | // to find another solution |
| 196 | // CHECK_EQ(Size, sizeof(QuarantineBatch)); |
| 197 | // return MmapOrDie(Size, "QuarantineBatch"); |
| 198 | return getAllocator().Allocate(Cache_, Size, 1, false); |
| 199 | } |
| 200 | |
| 201 | void Deallocate(void *Ptr) { |
| 202 | // UnmapOrDie(Ptr, sizeof(QuarantineBatch)); |
| 203 | getAllocator().Deallocate(Cache_, Ptr); |
| 204 | } |
| 205 | |
| 206 | AllocatorCache *Cache_; |
| 207 | }; |
| 208 | |
| 209 | typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine; |
| 210 | typedef ScudoQuarantine::Cache QuarantineCache; |
| 211 | static thread_local QuarantineCache ThreadQuarantineCache; |
| 212 | |
| 213 | void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { |
| 214 | MayReturnNull = cf->allocator_may_return_null; |
Evgeniy Stepanov | d3305af | 2016-11-29 00:22:50 +0000 | [diff] [blame] | 215 | ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 216 | QuarantineSizeMb = f->QuarantineSizeMb; |
| 217 | ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; |
| 218 | DeallocationTypeMismatch = f->DeallocationTypeMismatch; |
| 219 | DeleteSizeMismatch = f->DeleteSizeMismatch; |
| 220 | ZeroContents = f->ZeroContents; |
| 221 | } |
| 222 | |
| 223 | void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { |
| 224 | cf->allocator_may_return_null = MayReturnNull; |
Evgeniy Stepanov | d3305af | 2016-11-29 00:22:50 +0000 | [diff] [blame] | 225 | cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 226 | f->QuarantineSizeMb = QuarantineSizeMb; |
| 227 | f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; |
| 228 | f->DeallocationTypeMismatch = DeallocationTypeMismatch; |
| 229 | f->DeleteSizeMismatch = DeleteSizeMismatch; |
| 230 | f->ZeroContents = ZeroContents; |
| 231 | } |
| 232 | |
| 233 | struct Allocator { |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 234 | static const uptr MaxAllowedMallocSize = |
| 235 | FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 236 | |
| 237 | ScudoAllocator BackendAllocator; |
| 238 | ScudoQuarantine AllocatorQuarantine; |
| 239 | |
| 240 | // The fallback caches are used when the thread local caches have been |
| 241 | // 'detroyed' on thread tear-down. They are protected by a Mutex as they can |
| 242 | // be accessed by different threads. |
| 243 | StaticSpinMutex FallbackMutex; |
| 244 | AllocatorCache FallbackAllocatorCache; |
| 245 | QuarantineCache FallbackQuarantineCache; |
| 246 | |
| 247 | bool DeallocationTypeMismatch; |
| 248 | bool ZeroContents; |
| 249 | bool DeleteSizeMismatch; |
| 250 | |
| 251 | explicit Allocator(LinkerInitialized) |
| 252 | : AllocatorQuarantine(LINKER_INITIALIZED), |
| 253 | FallbackQuarantineCache(LINKER_INITIALIZED) {} |
| 254 | |
| 255 | void init(const AllocatorOptions &Options) { |
| 256 | // Currently SSE 4.2 support is required. This might change later. |
| 257 | CHECK(testCPUFeature(SSE4_2)); // for crc32 |
| 258 | |
| 259 | // Verify that the header offset field can hold the maximum offset. In the |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 260 | // case of the Secondary allocator, it takes care of alignment and the |
| 261 | // offset will always be 0. In the case of the Primary, the worst case |
| 262 | // scenario happens in the last size class, when the backend allocation |
| 263 | // would already be aligned on the requested alignment, which would happen |
| 264 | // to be the maximum alignment that would fit in that size class. As a |
| 265 | // result, the maximum offset will be at most the maximum alignment for the |
| 266 | // last size class minus the header size, in multiples of MinAlignment. |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 267 | UnpackedHeader Header = {}; |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 268 | uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex( |
| 269 | PrimaryAllocator::SizeClassMap::kMaxSize - MinAlignment); |
| 270 | uptr MaximumOffset = (MaxPrimaryAlignment - ChunkHeaderSize) >> |
| 271 | MinAlignmentLog; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 272 | Header.Offset = MaximumOffset; |
| 273 | if (Header.Offset != MaximumOffset) { |
| 274 | dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " |
| 275 | "header\n"); |
| 276 | } |
| 277 | |
| 278 | DeallocationTypeMismatch = Options.DeallocationTypeMismatch; |
| 279 | DeleteSizeMismatch = Options.DeleteSizeMismatch; |
| 280 | ZeroContents = Options.ZeroContents; |
Evgeniy Stepanov | d3305af | 2016-11-29 00:22:50 +0000 | [diff] [blame] | 281 | BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs); |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 282 | AllocatorQuarantine.Init( |
| 283 | static_cast<uptr>(Options.QuarantineSizeMb) << 20, |
| 284 | static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 285 | BackendAllocator.InitCache(&FallbackAllocatorCache); |
| 286 | Cookie = Prng.Next(); |
| 287 | } |
| 288 | |
| 289 | // Allocates a chunk. |
| 290 | void *allocate(uptr Size, uptr Alignment, AllocType Type) { |
| 291 | if (UNLIKELY(!ThreadInited)) |
| 292 | initThread(); |
| 293 | if (!IsPowerOfTwo(Alignment)) { |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 294 | dieWithMessage("ERROR: alignment is not a power of 2\n"); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 295 | } |
| 296 | if (Alignment > MaxAlignment) |
Vitaly Buka | 0ec5a28 | 2016-09-29 23:00:54 +0000 | [diff] [blame] | 297 | return BackendAllocator.ReturnNullOrDieOnBadRequest(); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 298 | if (Alignment < MinAlignment) |
| 299 | Alignment = MinAlignment; |
| 300 | if (Size == 0) |
| 301 | Size = 1; |
| 302 | if (Size >= MaxAllowedMallocSize) |
Vitaly Buka | 0ec5a28 | 2016-09-29 23:00:54 +0000 | [diff] [blame] | 303 | return BackendAllocator.ReturnNullOrDieOnBadRequest(); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 304 | uptr RoundedSize = RoundUpTo(Size, MinAlignment); |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 305 | uptr NeededSize = RoundedSize + ChunkHeaderSize; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 306 | if (Alignment > MinAlignment) |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 307 | NeededSize += Alignment; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 308 | if (NeededSize >= MaxAllowedMallocSize) |
Vitaly Buka | 0ec5a28 | 2016-09-29 23:00:54 +0000 | [diff] [blame] | 309 | return BackendAllocator.ReturnNullOrDieOnBadRequest(); |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 310 | bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 311 | |
| 312 | void *Ptr; |
| 313 | if (LIKELY(!ThreadTornDown)) { |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 314 | Ptr = BackendAllocator.Allocate(&Cache, NeededSize, |
| 315 | FromPrimary ? MinAlignment : Alignment); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 316 | } else { |
| 317 | SpinMutexLock l(&FallbackMutex); |
| 318 | Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 319 | FromPrimary ? MinAlignment : Alignment); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 320 | } |
| 321 | if (!Ptr) |
Vitaly Buka | 0ec5a28 | 2016-09-29 23:00:54 +0000 | [diff] [blame] | 322 | return BackendAllocator.ReturnNullOrDieOnOOM(); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 323 | |
| 324 | // If requested, we will zero out the entire contents of the returned chunk. |
| 325 | if (ZeroContents && BackendAllocator.FromPrimary(Ptr)) |
| 326 | memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr)); |
| 327 | |
| 328 | uptr AllocBeg = reinterpret_cast<uptr>(Ptr); |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 329 | // If the allocation was serviced by the secondary, the returned pointer |
| 330 | // accounts for ChunkHeaderSize to pass the alignment check of the combined |
| 331 | // allocator. Adjust it here. |
| 332 | if (!FromPrimary) |
| 333 | AllocBeg -= ChunkHeaderSize; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 334 | uptr ChunkBeg = AllocBeg + ChunkHeaderSize; |
| 335 | if (!IsAligned(ChunkBeg, Alignment)) |
| 336 | ChunkBeg = RoundUpTo(ChunkBeg, Alignment); |
| 337 | CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize); |
| 338 | ScudoChunk *Chunk = |
| 339 | reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize); |
| 340 | UnpackedHeader Header = {}; |
| 341 | Header.State = ChunkAllocated; |
| 342 | Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog; |
| 343 | Header.AllocType = Type; |
| 344 | Header.RequestedSize = Size; |
| 345 | Header.Salt = static_cast<u16>(Prng.Next()); |
| 346 | Chunk->storeHeader(&Header); |
| 347 | void *UserPtr = reinterpret_cast<void *>(ChunkBeg); |
| 348 | // TODO(kostyak): hooks sound like a terrible idea security wise but might |
| 349 | // be needed for things to work properly? |
| 350 | // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); |
| 351 | return UserPtr; |
| 352 | } |
| 353 | |
| 354 | // Deallocates a Chunk, which means adding it to the delayed free list (or |
| 355 | // Quarantine). |
| 356 | void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { |
| 357 | if (UNLIKELY(!ThreadInited)) |
| 358 | initThread(); |
| 359 | // TODO(kostyak): see hook comment above |
| 360 | // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); |
| 361 | if (!UserPtr) |
| 362 | return; |
| 363 | uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr); |
| 364 | if (!IsAligned(ChunkBeg, MinAlignment)) { |
| 365 | dieWithMessage("ERROR: attempted to deallocate a chunk not properly " |
| 366 | "aligned at address %p\n", UserPtr); |
| 367 | } |
| 368 | ScudoChunk *Chunk = |
| 369 | reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize); |
| 370 | UnpackedHeader OldHeader; |
| 371 | Chunk->loadHeader(&OldHeader); |
| 372 | if (OldHeader.State != ChunkAllocated) { |
| 373 | dieWithMessage("ERROR: invalid chunk state when deallocating address " |
| 374 | "%p\n", Chunk); |
| 375 | } |
| 376 | UnpackedHeader NewHeader = OldHeader; |
| 377 | NewHeader.State = ChunkQuarantine; |
| 378 | Chunk->compareExchangeHeader(&NewHeader, &OldHeader); |
| 379 | if (DeallocationTypeMismatch) { |
| 380 | // The deallocation type has to match the allocation one. |
| 381 | if (NewHeader.AllocType != Type) { |
| 382 | // With the exception of memalign'd Chunks, that can be still be free'd. |
| 383 | if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) { |
| 384 | dieWithMessage("ERROR: allocation type mismatch on address %p\n", |
| 385 | Chunk); |
| 386 | } |
| 387 | } |
| 388 | } |
| 389 | uptr Size = NewHeader.RequestedSize; |
| 390 | if (DeleteSizeMismatch) { |
| 391 | if (DeleteSize && DeleteSize != Size) { |
| 392 | dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", |
| 393 | Chunk); |
| 394 | } |
| 395 | } |
| 396 | if (LIKELY(!ThreadTornDown)) { |
| 397 | AllocatorQuarantine.Put(&ThreadQuarantineCache, |
| 398 | QuarantineCallback(&Cache), Chunk, Size); |
| 399 | } else { |
| 400 | SpinMutexLock l(&FallbackMutex); |
| 401 | AllocatorQuarantine.Put(&FallbackQuarantineCache, |
| 402 | QuarantineCallback(&FallbackAllocatorCache), |
| 403 | Chunk, Size); |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | // Returns the actual usable size of a chunk. Since this requires loading the |
| 408 | // header, we will return it in the second parameter, as it can be required |
| 409 | // by the caller to perform additional processing. |
| 410 | uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { |
| 411 | if (UNLIKELY(!ThreadInited)) |
| 412 | initThread(); |
| 413 | if (!Ptr) |
| 414 | return 0; |
| 415 | uptr ChunkBeg = reinterpret_cast<uptr>(Ptr); |
| 416 | ScudoChunk *Chunk = |
| 417 | reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize); |
| 418 | Chunk->loadHeader(Header); |
| 419 | // Getting the usable size of a chunk only makes sense if it's allocated. |
| 420 | if (Header->State != ChunkAllocated) { |
| 421 | dieWithMessage("ERROR: attempted to size a non-allocated chunk at " |
| 422 | "address %p\n", Chunk); |
| 423 | } |
| 424 | uptr Size = |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 425 | BackendAllocator.GetActuallyAllocatedSize(Chunk->getAllocBeg(Header)); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 426 | // UsableSize works as malloc_usable_size, which is also what (AFAIU) |
| 427 | // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This |
| 428 | // means we will return the size of the chunk from the user beginning to |
| 429 | // the end of the 'user' allocation, hence us subtracting the header size |
| 430 | // and the offset from the size. |
| 431 | if (Size == 0) |
| 432 | return Size; |
| 433 | return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog); |
| 434 | } |
| 435 | |
| 436 | // Helper function that doesn't care about the header. |
| 437 | uptr getUsableSize(const void *Ptr) { |
| 438 | UnpackedHeader Header; |
| 439 | return getUsableSize(Ptr, &Header); |
| 440 | } |
| 441 | |
| 442 | // Reallocates a chunk. We can save on a new allocation if the new requested |
| 443 | // size still fits in the chunk. |
| 444 | void *reallocate(void *OldPtr, uptr NewSize) { |
| 445 | if (UNLIKELY(!ThreadInited)) |
| 446 | initThread(); |
| 447 | UnpackedHeader OldHeader; |
| 448 | uptr Size = getUsableSize(OldPtr, &OldHeader); |
| 449 | uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr); |
| 450 | ScudoChunk *Chunk = |
| 451 | reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize); |
| 452 | if (OldHeader.AllocType != FromMalloc) { |
| 453 | dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n", |
| 454 | Chunk); |
| 455 | } |
| 456 | UnpackedHeader NewHeader = OldHeader; |
| 457 | // The new size still fits in the current chunk. |
| 458 | if (NewSize <= Size) { |
| 459 | NewHeader.RequestedSize = NewSize; |
| 460 | Chunk->compareExchangeHeader(&NewHeader, &OldHeader); |
| 461 | return OldPtr; |
| 462 | } |
| 463 | // Otherwise, we have to allocate a new chunk and copy the contents of the |
| 464 | // old one. |
| 465 | void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); |
| 466 | if (NewPtr) { |
| 467 | uptr OldSize = OldHeader.RequestedSize; |
| 468 | memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); |
| 469 | NewHeader.State = ChunkQuarantine; |
| 470 | Chunk->compareExchangeHeader(&NewHeader, &OldHeader); |
| 471 | if (LIKELY(!ThreadTornDown)) { |
| 472 | AllocatorQuarantine.Put(&ThreadQuarantineCache, |
| 473 | QuarantineCallback(&Cache), Chunk, OldSize); |
| 474 | } else { |
| 475 | SpinMutexLock l(&FallbackMutex); |
| 476 | AllocatorQuarantine.Put(&FallbackQuarantineCache, |
| 477 | QuarantineCallback(&FallbackAllocatorCache), |
| 478 | Chunk, OldSize); |
| 479 | } |
| 480 | } |
| 481 | return NewPtr; |
| 482 | } |
| 483 | |
| 484 | void *calloc(uptr NMemB, uptr Size) { |
| 485 | if (UNLIKELY(!ThreadInited)) |
| 486 | initThread(); |
| 487 | uptr Total = NMemB * Size; |
| 488 | if (Size != 0 && Total / Size != NMemB) // Overflow check |
Vitaly Buka | 0ec5a28 | 2016-09-29 23:00:54 +0000 | [diff] [blame] | 489 | return BackendAllocator.ReturnNullOrDieOnBadRequest(); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 490 | void *Ptr = allocate(Total, MinAlignment, FromMalloc); |
| 491 | // If ZeroContents, the content of the chunk has already been zero'd out. |
| 492 | if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr)) |
| 493 | memset(Ptr, 0, getUsableSize(Ptr)); |
| 494 | return Ptr; |
| 495 | } |
| 496 | |
| 497 | void drainQuarantine() { |
| 498 | AllocatorQuarantine.Drain(&ThreadQuarantineCache, |
| 499 | QuarantineCallback(&Cache)); |
| 500 | } |
| 501 | }; |
| 502 | |
| 503 | static Allocator Instance(LINKER_INITIALIZED); |
| 504 | |
| 505 | static ScudoAllocator &getAllocator() { |
| 506 | return Instance.BackendAllocator; |
| 507 | } |
| 508 | |
| 509 | void initAllocator(const AllocatorOptions &Options) { |
| 510 | Instance.init(Options); |
| 511 | } |
| 512 | |
| 513 | void drainQuarantine() { |
| 514 | Instance.drainQuarantine(); |
| 515 | } |
| 516 | |
| 517 | void *scudoMalloc(uptr Size, AllocType Type) { |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 518 | return Instance.allocate(Size, MinAlignment, Type); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 519 | } |
| 520 | |
| 521 | void scudoFree(void *Ptr, AllocType Type) { |
| 522 | Instance.deallocate(Ptr, 0, Type); |
| 523 | } |
| 524 | |
| 525 | void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { |
| 526 | Instance.deallocate(Ptr, Size, Type); |
| 527 | } |
| 528 | |
| 529 | void *scudoRealloc(void *Ptr, uptr Size) { |
| 530 | if (!Ptr) |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 531 | return Instance.allocate(Size, MinAlignment, FromMalloc); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 532 | if (Size == 0) { |
| 533 | Instance.deallocate(Ptr, 0, FromMalloc); |
| 534 | return nullptr; |
| 535 | } |
| 536 | return Instance.reallocate(Ptr, Size); |
| 537 | } |
| 538 | |
| 539 | void *scudoCalloc(uptr NMemB, uptr Size) { |
| 540 | return Instance.calloc(NMemB, Size); |
| 541 | } |
| 542 | |
| 543 | void *scudoValloc(uptr Size) { |
| 544 | return Instance.allocate(Size, GetPageSizeCached(), FromMemalign); |
| 545 | } |
| 546 | |
| 547 | void *scudoMemalign(uptr Alignment, uptr Size) { |
| 548 | return Instance.allocate(Size, Alignment, FromMemalign); |
| 549 | } |
| 550 | |
| 551 | void *scudoPvalloc(uptr Size) { |
| 552 | uptr PageSize = GetPageSizeCached(); |
| 553 | Size = RoundUpTo(Size, PageSize); |
| 554 | if (Size == 0) { |
| 555 | // pvalloc(0) should allocate one page. |
| 556 | Size = PageSize; |
| 557 | } |
| 558 | return Instance.allocate(Size, PageSize, FromMemalign); |
| 559 | } |
| 560 | |
| 561 | int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { |
| 562 | *MemPtr = Instance.allocate(Size, Alignment, FromMemalign); |
| 563 | return 0; |
| 564 | } |
| 565 | |
| 566 | void *scudoAlignedAlloc(uptr Alignment, uptr Size) { |
| 567 | // size must be a multiple of the alignment. To avoid a division, we first |
| 568 | // make sure that alignment is a power of 2. |
| 569 | CHECK(IsPowerOfTwo(Alignment)); |
| 570 | CHECK_EQ((Size & (Alignment - 1)), 0); |
| 571 | return Instance.allocate(Size, Alignment, FromMalloc); |
| 572 | } |
| 573 | |
| 574 | uptr scudoMallocUsableSize(void *Ptr) { |
| 575 | return Instance.getUsableSize(Ptr); |
| 576 | } |
| 577 | |
| 578 | } // namespace __scudo |
| 579 | |
| 580 | using namespace __scudo; |
| 581 | |
| 582 | // MallocExtension helper functions |
| 583 | |
| 584 | uptr __sanitizer_get_current_allocated_bytes() { |
| 585 | uptr stats[AllocatorStatCount]; |
| 586 | getAllocator().GetStats(stats); |
| 587 | return stats[AllocatorStatAllocated]; |
| 588 | } |
| 589 | |
| 590 | uptr __sanitizer_get_heap_size() { |
| 591 | uptr stats[AllocatorStatCount]; |
| 592 | getAllocator().GetStats(stats); |
| 593 | return stats[AllocatorStatMapped]; |
| 594 | } |
| 595 | |
| 596 | uptr __sanitizer_get_free_bytes() { |
| 597 | return 1; |
| 598 | } |
| 599 | |
| 600 | uptr __sanitizer_get_unmapped_bytes() { |
| 601 | return 1; |
| 602 | } |
| 603 | |
| 604 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { |
| 605 | return size; |
| 606 | } |
| 607 | |
| 608 | int __sanitizer_get_ownership(const void *p) { |
| 609 | return Instance.getUsableSize(p) != 0; |
| 610 | } |
| 611 | |
| 612 | uptr __sanitizer_get_allocated_size(const void *p) { |
| 613 | return Instance.getUsableSize(p); |
| 614 | } |