Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 1 | //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// |
| 10 | /// Scudo Secondary Allocator. |
| 11 | /// This services allocation that are too large to be serviced by the Primary |
| 12 | /// Allocator. It is directly backed by the memory mapping functions of the |
| 13 | /// operating system. |
| 14 | /// |
| 15 | //===----------------------------------------------------------------------===// |
| 16 | |
| 17 | #ifndef SCUDO_ALLOCATOR_SECONDARY_H_ |
| 18 | #define SCUDO_ALLOCATOR_SECONDARY_H_ |
| 19 | |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 20 | #ifndef SCUDO_ALLOCATOR_H_ |
| 21 | # error "This file must be included inside scudo_allocator.h." |
| 22 | #endif |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 23 | |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 24 | // Secondary backed allocations are standalone chunks that contain extra |
| 25 | // information stored in a LargeChunk::Header prior to the frontend's header. |
| 26 | // |
| 27 | // The secondary takes care of alignment requirements (so that it can release |
| 28 | // unnecessary pages in the rare event of larger alignments), and as such must |
| 29 | // know about the frontend's header size. |
| 30 | // |
| 31 | // Since Windows doesn't support partial releasing of a reserved memory region, |
| 32 | // we have to keep track of both the reserved and the committed memory. |
| 33 | // |
| 34 | // The resulting chunk resembles the following: |
| 35 | // |
| 36 | // +--------------------+ |
| 37 | // | Guard page(s) | |
| 38 | // +--------------------+ |
| 39 | // | Unused space* | |
| 40 | // +--------------------+ |
| 41 | // | LargeChunk::Header | |
| 42 | // +--------------------+ |
| 43 | // | {Unp,P}ackedHeader | |
| 44 | // +--------------------+ |
| 45 | // | Data (aligned) | |
| 46 | // +--------------------+ |
| 47 | // | Unused space** | |
| 48 | // +--------------------+ |
| 49 | // | Guard page(s) | |
| 50 | // +--------------------+ |
| 51 | |
| 52 | namespace LargeChunk { |
| 53 | struct Header { |
| 54 | ReservedAddressRange StoredRange; |
| 55 | uptr CommittedSize; |
| 56 | uptr Size; |
| 57 | }; |
| 58 | constexpr uptr getHeaderSize() { |
| 59 | return RoundUpTo(sizeof(Header), MinAlignment); |
| 60 | } |
| 61 | static Header *getHeader(uptr Ptr) { |
| 62 | return reinterpret_cast<Header *>(Ptr - getHeaderSize()); |
| 63 | } |
| 64 | static Header *getHeader(const void *Ptr) { |
| 65 | return getHeader(reinterpret_cast<uptr>(Ptr)); |
| 66 | } |
| 67 | } // namespace LargeChunk |
| 68 | |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 69 | class ScudoLargeMmapAllocator { |
| 70 | public: |
Alex Shlyapnikov | ccab11b | 2017-06-20 21:23:02 +0000 | [diff] [blame] | 71 | void Init() { |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 72 | NumberOfAllocs = 0; |
| 73 | NumberOfFrees = 0; |
| 74 | AllocatedBytes = 0; |
| 75 | FreedBytes = 0; |
| 76 | LargestSize = 0; |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { |
Kostya Kortchinsky | beeea62 | 2018-02-27 16:14:49 +0000 | [diff] [blame] | 80 | const uptr UserSize = Size - Chunk::getHeaderSize(); |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 81 | // The Scudo frontend prevents us from allocating more than |
| 82 | // MaxAllowedMallocSize, so integer overflow checks would be superfluous. |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 83 | uptr ReservedSize = Size + LargeChunk::getHeaderSize(); |
| 84 | if (UNLIKELY(Alignment > MinAlignment)) |
| 85 | ReservedSize += Alignment; |
| 86 | const uptr PageSize = GetPageSizeCached(); |
| 87 | ReservedSize = RoundUpTo(ReservedSize, PageSize); |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 88 | // Account for 2 guard pages, one before and one after the chunk. |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 89 | ReservedSize += 2 * PageSize; |
Kostya Kortchinsky | c74da7c | 2016-12-13 19:31:54 +0000 | [diff] [blame] | 90 | |
Kostya Kortchinsky | 2ba105a | 2017-11-13 20:38:22 +0000 | [diff] [blame] | 91 | ReservedAddressRange AddressRange; |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 92 | uptr ReservedBeg = AddressRange.Init(ReservedSize); |
| 93 | if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0))) |
Alex Shlyapnikov | ccab11b | 2017-06-20 21:23:02 +0000 | [diff] [blame] | 94 | return ReturnNullOrDieOnFailure::OnOOM(); |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 95 | // A page-aligned pointer is assumed after that, so check it now. |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 96 | DCHECK(IsAligned(ReservedBeg, PageSize)); |
| 97 | uptr ReservedEnd = ReservedBeg + ReservedSize; |
Kostya Kortchinsky | c74da7c | 2016-12-13 19:31:54 +0000 | [diff] [blame] | 98 | // The beginning of the user area for that allocation comes after the |
| 99 | // initial guard page, and both headers. This is the pointer that has to |
| 100 | // abide by alignment requirements. |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 101 | uptr CommittedBeg = ReservedBeg + PageSize; |
| 102 | uptr UserBeg = CommittedBeg + HeadersSize; |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 103 | uptr UserEnd = UserBeg + UserSize; |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 104 | uptr CommittedEnd = RoundUpTo(UserEnd, PageSize); |
Kostya Kortchinsky | c74da7c | 2016-12-13 19:31:54 +0000 | [diff] [blame] | 105 | |
| 106 | // In the rare event of larger alignments, we will attempt to fit the mmap |
| 107 | // area better and unmap extraneous memory. This will also ensure that the |
Kostya Kortchinsky | 2defe4d | 2016-12-08 19:05:46 +0000 | [diff] [blame] | 108 | // offset and unused bytes field of the header stay small. |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 109 | if (UNLIKELY(Alignment > MinAlignment)) { |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 110 | if (!IsAligned(UserBeg, Alignment)) { |
| 111 | UserBeg = RoundUpTo(UserBeg, Alignment); |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 112 | CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize); |
| 113 | const uptr NewReservedBeg = CommittedBeg - PageSize; |
| 114 | DCHECK_GE(NewReservedBeg, ReservedBeg); |
| 115 | if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) { |
| 116 | AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg); |
| 117 | ReservedBeg = NewReservedBeg; |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 118 | } |
| 119 | UserEnd = UserBeg + UserSize; |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 120 | CommittedEnd = RoundUpTo(UserEnd, PageSize); |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 121 | } |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 122 | const uptr NewReservedEnd = CommittedEnd + PageSize; |
| 123 | DCHECK_LE(NewReservedEnd, ReservedEnd); |
| 124 | if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) { |
| 125 | AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd); |
| 126 | ReservedEnd = NewReservedEnd; |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 127 | } |
Kostya Kortchinsky | 71dcc33 | 2016-10-26 16:16:58 +0000 | [diff] [blame] | 128 | } |
Kostya Kortchinsky | c74da7c | 2016-12-13 19:31:54 +0000 | [diff] [blame] | 129 | |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 130 | DCHECK_LE(UserEnd, CommittedEnd); |
| 131 | const uptr CommittedSize = CommittedEnd - CommittedBeg; |
| 132 | // Actually mmap the memory, preserving the guard pages on either sides. |
| 133 | CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize)); |
Kostya Kortchinsky | beeea62 | 2018-02-27 16:14:49 +0000 | [diff] [blame] | 134 | const uptr Ptr = UserBeg - Chunk::getHeaderSize(); |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 135 | LargeChunk::Header *H = LargeChunk::getHeader(Ptr); |
| 136 | H->StoredRange = AddressRange; |
| 137 | H->Size = CommittedEnd - Ptr; |
| 138 | H->CommittedSize = CommittedSize; |
Kostya Kortchinsky | 2ba105a | 2017-11-13 20:38:22 +0000 | [diff] [blame] | 139 | |
Kostya Kortchinsky | 1148dc5 | 2016-11-30 17:32:20 +0000 | [diff] [blame] | 140 | // The primary adds the whole class size to the stats when allocating a |
| 141 | // chunk, so we will do something similar here. But we will not account for |
| 142 | // the guard pages. |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 143 | { |
| 144 | SpinMutexLock l(&StatsMutex); |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 145 | Stats->Add(AllocatorStatAllocated, CommittedSize); |
| 146 | Stats->Add(AllocatorStatMapped, CommittedSize); |
| 147 | AllocatedBytes += CommittedSize; |
| 148 | if (LargestSize < CommittedSize) |
| 149 | LargestSize = CommittedSize; |
| 150 | NumberOfAllocs++; |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 151 | } |
Kostya Kortchinsky | c74da7c | 2016-12-13 19:31:54 +0000 | [diff] [blame] | 152 | |
Kostya Kortchinsky | 01a66fc | 2017-05-11 21:40:45 +0000 | [diff] [blame] | 153 | return reinterpret_cast<void *>(Ptr); |
Vitaly Buka | 0ec5a28 | 2016-09-29 23:00:54 +0000 | [diff] [blame] | 154 | } |
| 155 | |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 156 | void Deallocate(AllocatorStats *Stats, void *Ptr) { |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 157 | LargeChunk::Header *H = LargeChunk::getHeader(Ptr); |
Kostya Kortchinsky | 2ba105a | 2017-11-13 20:38:22 +0000 | [diff] [blame] | 158 | // Since we're unmapping the entirety of where the ReservedAddressRange |
| 159 | // actually is, copy onto the stack. |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 160 | ReservedAddressRange AddressRange = H->StoredRange; |
| 161 | const uptr Size = H->CommittedSize; |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 162 | { |
| 163 | SpinMutexLock l(&StatsMutex); |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 164 | Stats->Sub(AllocatorStatAllocated, Size); |
| 165 | Stats->Sub(AllocatorStatMapped, Size); |
| 166 | FreedBytes += Size; |
| 167 | NumberOfFrees++; |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 168 | } |
Kostya Kortchinsky | 2ba105a | 2017-11-13 20:38:22 +0000 | [diff] [blame] | 169 | AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()), |
| 170 | AddressRange.size()); |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 171 | } |
| 172 | |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 173 | static uptr GetActuallyAllocatedSize(void *Ptr) { |
| 174 | return LargeChunk::getHeader(Ptr)->Size; |
| 175 | } |
| 176 | |
| 177 | void PrintStats() { |
| 178 | Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), " |
| 179 | "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n", |
| 180 | NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, |
| 181 | FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, |
| 182 | (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20); |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 183 | } |
| 184 | |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 185 | private: |
Kostya Kortchinsky | ddf4ef3 | 2017-12-06 16:53:24 +0000 | [diff] [blame] | 186 | static constexpr uptr HeadersSize = |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 187 | LargeChunk::getHeaderSize() + Chunk::getHeaderSize(); |
Kostya Kortchinsky | 2ba105a | 2017-11-13 20:38:22 +0000 | [diff] [blame] | 188 | |
Kostya Kortchinsky | 006805d | 2017-04-20 15:11:00 +0000 | [diff] [blame] | 189 | SpinMutex StatsMutex; |
Kostya Kortchinsky | e95ef87 | 2018-03-12 19:29:38 +0000 | [diff] [blame] | 190 | u32 NumberOfAllocs; |
| 191 | u32 NumberOfFrees; |
| 192 | uptr AllocatedBytes; |
| 193 | uptr FreedBytes; |
| 194 | uptr LargestSize; |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 195 | }; |
| 196 | |
Kostya Kortchinsky | 3beafff | 2016-09-19 21:11:55 +0000 | [diff] [blame] | 197 | #endif // SCUDO_ALLOCATOR_SECONDARY_H_ |