Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 1 | //===-- scudo_allocator.h ---------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// |
| 10 | /// Header for scudo_allocator.cpp. |
| 11 | /// |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #ifndef SCUDO_ALLOCATOR_H_ |
| 15 | #define SCUDO_ALLOCATOR_H_ |
| 16 | |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 17 | #include "sanitizer_common/sanitizer_allocator.h" |
| 18 | |
Kostya Kortchinsky | b39dff4 | 2017-01-18 17:11:17 +0000 | [diff] [blame] | 19 | #if !SANITIZER_LINUX |
| 20 | # error "The Scudo hardened allocator is currently only supported on Linux." |
| 21 | #endif |
| 22 | |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 23 | namespace __scudo { |
| 24 | |
| 25 | enum AllocType : u8 { |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame^] | 26 | FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc. |
| 27 | FromNew = 1, // Memory block came from operator new. |
| 28 | FromNewArray = 2, // Memory block came from operator new []. |
| 29 | FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc. |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 30 | }; |
| 31 | |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 32 | enum ChunkState : u8 { |
| 33 | ChunkAvailable = 0, |
| 34 | ChunkAllocated = 1, |
| 35 | ChunkQuarantine = 2 |
| 36 | }; |
| 37 | |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 38 | // Our header requires 64 bits of storage. Having the offset saves us from |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 39 | // using functions such as GetBlockBegin, that is fairly costly. Our first |
| 40 | // implementation used the MetaData as well, which offers the advantage of |
| 41 | // being stored away from the chunk itself, but accessing it was costly as |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 42 | // well. The header will be atomically loaded and stored. |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 43 | typedef u64 PackedHeader; |
| 44 | struct UnpackedHeader { |
Kostya Kortchinsky | fff8e06 | 2017-04-20 18:07:17 +0000 | [diff] [blame] | 45 | u64 Checksum : 16; |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame^] | 46 | u64 SizeOrUnusedBytes : 19; // Size for Primary backed allocations, amount of |
| 47 | // unused bytes in the chunk for Secondary ones. |
Kostya Kortchinsky | fff8e06 | 2017-04-20 18:07:17 +0000 | [diff] [blame] | 48 | u64 FromPrimary : 1; |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame^] | 49 | u64 State : 2; // available, allocated, or quarantined |
| 50 | u64 AllocType : 2; // malloc, new, new[], or memalign |
| 51 | u64 Offset : 16; // Offset from the beginning of the backend |
| 52 | // allocation to the beginning of the chunk |
| 53 | // itself, in multiples of MinAlignment. See |
| 54 | // comment about its maximum value and in init(). |
Kostya Kortchinsky | fff8e06 | 2017-04-20 18:07:17 +0000 | [diff] [blame] | 55 | u64 Salt : 8; |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 56 | }; |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 57 | |
Kostya Kortchinsky | a00b922 | 2017-01-20 18:32:18 +0000 | [diff] [blame] | 58 | typedef atomic_uint64_t AtomicPackedHeader; |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 59 | COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader)); |
| 60 | |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 61 | // Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit |
| 62 | const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4); |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 63 | const uptr MaxAlignmentLog = 24; // 16 MB |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 64 | const uptr MinAlignment = 1 << MinAlignmentLog; |
| 65 | const uptr MaxAlignment = 1 << MaxAlignmentLog; |
| 66 | |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 67 | const uptr ChunkHeaderSize = sizeof(PackedHeader); |
| 68 | const uptr AlignedChunkHeaderSize = |
| 69 | (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1); |
| 70 | |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 71 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
| 72 | const uptr AllocatorSpace = ~0ULL; |
Kostya Kortchinsky | ee069576 | 2017-05-05 21:38:22 +0000 | [diff] [blame] | 73 | # if defined(__aarch64__) && SANITIZER_ANDROID |
| 74 | const uptr AllocatorSize = 0x4000000000ULL; // 256G. |
| 75 | # elif defined(__aarch64__) |
| 76 | const uptr AllocatorSize = 0x10000000000ULL; // 1T. |
| 77 | # else |
| 78 | const uptr AllocatorSize = 0x40000000000ULL; // 4T. |
| 79 | # endif |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 80 | typedef DefaultSizeClassMap SizeClassMap; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 81 | struct AP64 { |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 82 | static const uptr kSpaceBeg = AllocatorSpace; |
| 83 | static const uptr kSpaceSize = AllocatorSize; |
| 84 | static const uptr kMetadataSize = 0; |
| 85 | typedef __scudo::SizeClassMap SizeClassMap; |
| 86 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
| 87 | static const uptr kFlags = |
| 88 | SizeClassAllocator64FlagMasks::kRandomShuffleChunks; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 89 | }; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 90 | typedef SizeClassAllocator64<AP64> PrimaryAllocator; |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 91 | #else |
| 92 | // Currently, the 32-bit Sanitizer allocator has not yet benefited from all the |
| 93 | // security improvements brought to the 64-bit one. This makes the 32-bit |
| 94 | // version of Scudo slightly less toughened. |
| 95 | static const uptr RegionSizeLog = 20; |
| 96 | static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; |
| 97 | # if SANITIZER_WORDSIZE == 32 |
| 98 | typedef FlatByteMap<NumRegions> ByteMap; |
| 99 | # elif SANITIZER_WORDSIZE == 64 |
| 100 | typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; |
| 101 | # endif // SANITIZER_WORDSIZE |
| 102 | typedef DefaultSizeClassMap SizeClassMap; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 103 | struct AP32 { |
| 104 | static const uptr kSpaceBeg = 0; |
| 105 | static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; |
| 106 | static const uptr kMetadataSize = 0; |
| 107 | typedef __scudo::SizeClassMap SizeClassMap; |
| 108 | static const uptr kRegionSizeLog = RegionSizeLog; |
| 109 | typedef __scudo::ByteMap ByteMap; |
| 110 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
| 111 | static const uptr kFlags = |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame^] | 112 | SizeClassAllocator32FlagMasks::kRandomShuffleChunks | |
| 113 | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 114 | }; |
| 115 | typedef SizeClassAllocator32<AP32> PrimaryAllocator; |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 116 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 117 | |
Kostya Kortchinsky | 0ce4999 | 2017-06-29 16:45:20 +0000 | [diff] [blame] | 118 | // __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own. |
| 119 | INLINE uptr RoundUpTo(uptr Size, uptr Boundary) { |
| 120 | return (Size + Boundary - 1) & ~(Boundary - 1); |
| 121 | } |
| 122 | |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 123 | #include "scudo_allocator_secondary.h" |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 124 | #include "scudo_allocator_combined.h" |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 125 | |
| 126 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
| 127 | typedef ScudoLargeMmapAllocator SecondaryAllocator; |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 128 | typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 129 | SecondaryAllocator> ScudoBackendAllocator; |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 130 | |
| 131 | void initScudo(); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 132 | |
| 133 | void *scudoMalloc(uptr Size, AllocType Type); |
| 134 | void scudoFree(void *Ptr, AllocType Type); |
| 135 | void scudoSizedFree(void *Ptr, uptr Size, AllocType Type); |
| 136 | void *scudoRealloc(void *Ptr, uptr Size); |
| 137 | void *scudoCalloc(uptr NMemB, uptr Size); |
| 138 | void *scudoMemalign(uptr Alignment, uptr Size); |
| 139 | void *scudoValloc(uptr Size); |
| 140 | void *scudoPvalloc(uptr Size); |
| 141 | int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size); |
| 142 | void *scudoAlignedAlloc(uptr Alignment, uptr Size); |
| 143 | uptr scudoMallocUsableSize(void *Ptr); |
| 144 | |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 145 | } // namespace __scudo |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 146 | |
| 147 | #endif // SCUDO_ALLOCATOR_H_ |