Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 1 | //===-- scudo_allocator.h ---------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// |
| 10 | /// Header for scudo_allocator.cpp. |
| 11 | /// |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #ifndef SCUDO_ALLOCATOR_H_ |
| 15 | #define SCUDO_ALLOCATOR_H_ |
| 16 | |
Kostya Kortchinsky | b59abb2 | 2017-09-26 17:20:02 +0000 | [diff] [blame] | 17 | #include "scudo_platform.h" |
Kostya Kortchinsky | b39dff4 | 2017-01-18 17:11:17 +0000 | [diff] [blame] | 18 | |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 19 | namespace __scudo { |
| 20 | |
| 21 | enum AllocType : u8 { |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame] | 22 | FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc. |
| 23 | FromNew = 1, // Memory block came from operator new. |
| 24 | FromNewArray = 2, // Memory block came from operator new []. |
| 25 | FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc. |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 26 | }; |
| 27 | |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 28 | enum ChunkState : u8 { |
| 29 | ChunkAvailable = 0, |
| 30 | ChunkAllocated = 1, |
| 31 | ChunkQuarantine = 2 |
| 32 | }; |
| 33 | |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 34 | // Our header requires 64 bits of storage. Having the offset saves us from |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 35 | // using functions such as GetBlockBegin, that is fairly costly. Our first |
| 36 | // implementation used the MetaData as well, which offers the advantage of |
| 37 | // being stored away from the chunk itself, but accessing it was costly as |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 38 | // well. The header will be atomically loaded and stored. |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 39 | typedef u64 PackedHeader; |
| 40 | struct UnpackedHeader { |
Kostya Kortchinsky | fff8e06 | 2017-04-20 18:07:17 +0000 | [diff] [blame] | 41 | u64 Checksum : 16; |
Kostya Kortchinsky | df6ba24 | 2017-12-05 17:08:29 +0000 | [diff] [blame] | 42 | u64 ClassId : 8; |
| 43 | u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame] | 44 | // unused bytes in the chunk for Secondary ones. |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame] | 45 | u64 State : 2; // available, allocated, or quarantined |
| 46 | u64 AllocType : 2; // malloc, new, new[], or memalign |
| 47 | u64 Offset : 16; // Offset from the beginning of the backend |
| 48 | // allocation to the beginning of the chunk |
| 49 | // itself, in multiples of MinAlignment. See |
| 50 | // comment about its maximum value and in init(). |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 51 | }; |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 52 | |
Kostya Kortchinsky | a00b922 | 2017-01-20 18:32:18 +0000 | [diff] [blame] | 53 | typedef atomic_uint64_t AtomicPackedHeader; |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 54 | COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader)); |
| 55 | |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 56 | // Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit |
| 57 | const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4); |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 58 | const uptr MaxAlignmentLog = 24; // 16 MB |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 59 | const uptr MinAlignment = 1 << MinAlignmentLog; |
| 60 | const uptr MaxAlignment = 1 << MaxAlignmentLog; |
| 61 | |
Kostya Kortchinsky | beeea62 | 2018-02-27 16:14:49 +0000 | [diff] [blame^] | 62 | // constexpr version of __sanitizer::RoundUp without the extraneous CHECK. |
| 63 | // This way we can use it in constexpr variables and functions declarations. |
| 64 | constexpr uptr RoundUpTo(uptr Size, uptr Boundary) { |
| 65 | return (Size + Boundary - 1) & ~(Boundary - 1); |
| 66 | } |
| 67 | |
| 68 | namespace Chunk { |
| 69 | constexpr uptr getHeaderSize() { |
| 70 | return RoundUpTo(sizeof(PackedHeader), MinAlignment); |
| 71 | } |
| 72 | } |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 73 | |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 74 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
| 75 | const uptr AllocatorSpace = ~0ULL; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 76 | struct AP64 { |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 77 | static const uptr kSpaceBeg = AllocatorSpace; |
| 78 | static const uptr kSpaceSize = AllocatorSize; |
| 79 | static const uptr kMetadataSize = 0; |
| 80 | typedef __scudo::SizeClassMap SizeClassMap; |
| 81 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
| 82 | static const uptr kFlags = |
| 83 | SizeClassAllocator64FlagMasks::kRandomShuffleChunks; |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 84 | }; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 85 | typedef SizeClassAllocator64<AP64> PrimaryAllocator; |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 86 | #else |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 87 | static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; |
| 88 | # if SANITIZER_WORDSIZE == 32 |
| 89 | typedef FlatByteMap<NumRegions> ByteMap; |
| 90 | # elif SANITIZER_WORDSIZE == 64 |
| 91 | typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; |
| 92 | # endif // SANITIZER_WORDSIZE |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 93 | struct AP32 { |
| 94 | static const uptr kSpaceBeg = 0; |
| 95 | static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; |
| 96 | static const uptr kMetadataSize = 0; |
| 97 | typedef __scudo::SizeClassMap SizeClassMap; |
| 98 | static const uptr kRegionSizeLog = RegionSizeLog; |
| 99 | typedef __scudo::ByteMap ByteMap; |
| 100 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
| 101 | static const uptr kFlags = |
Kostya Kortchinsky | 476f21d | 2017-08-28 15:20:02 +0000 | [diff] [blame] | 102 | SizeClassAllocator32FlagMasks::kRandomShuffleChunks | |
| 103 | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; |
Kostya Kortchinsky | dc646a0 | 2017-05-15 14:47:19 +0000 | [diff] [blame] | 104 | }; |
| 105 | typedef SizeClassAllocator32<AP32> PrimaryAllocator; |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 106 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 107 | |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 108 | #include "scudo_allocator_secondary.h" |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 109 | #include "scudo_allocator_combined.h" |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 110 | |
| 111 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
| 112 | typedef ScudoLargeMmapAllocator SecondaryAllocator; |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 113 | typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 114 | SecondaryAllocator> ScudoBackendAllocator; |
Kostya Kortchinsky | 36b3434 | 2017-04-27 20:21:16 +0000 | [diff] [blame] | 115 | |
| 116 | void initScudo(); |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 117 | |
| 118 | void *scudoMalloc(uptr Size, AllocType Type); |
| 119 | void scudoFree(void *Ptr, AllocType Type); |
| 120 | void scudoSizedFree(void *Ptr, uptr Size, AllocType Type); |
| 121 | void *scudoRealloc(void *Ptr, uptr Size); |
| 122 | void *scudoCalloc(uptr NMemB, uptr Size); |
| 123 | void *scudoMemalign(uptr Alignment, uptr Size); |
| 124 | void *scudoValloc(uptr Size); |
| 125 | void *scudoPvalloc(uptr Size); |
| 126 | int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size); |
| 127 | void *scudoAlignedAlloc(uptr Alignment, uptr Size); |
| 128 | uptr scudoMallocUsableSize(void *Ptr); |
| 129 | |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 130 | } // namespace __scudo |
Kostya Serebryany | 712fc98 | 2016-06-07 01:20:26 +0000 | [diff] [blame] | 131 | |
| 132 | #endif // SCUDO_ALLOCATOR_H_ |