blob: 834b91b387113d82ef3e2f2a83cfb14efb0154d2 [file] [log] [blame]
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +00001//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Secondary Allocator.
11/// This services allocation that are too large to be serviced by the Primary
12/// Allocator. It is directly backed by the memory mapping functions of the
13/// operating system.
14///
15//===----------------------------------------------------------------------===//
16
17#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
18#define SCUDO_ALLOCATOR_SECONDARY_H_
19
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +000020#ifndef SCUDO_ALLOCATOR_H_
21# error "This file must be included inside scudo_allocator.h."
22#endif
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000023
Kostya Kortchinskye95ef872018-03-12 19:29:38 +000024// Secondary backed allocations are standalone chunks that contain extra
25// information stored in a LargeChunk::Header prior to the frontend's header.
26//
27// The secondary takes care of alignment requirements (so that it can release
28// unnecessary pages in the rare event of larger alignments), and as such must
29// know about the frontend's header size.
30//
31// Since Windows doesn't support partial releasing of a reserved memory region,
32// we have to keep track of both the reserved and the committed memory.
33//
34// The resulting chunk resembles the following:
35//
36// +--------------------+
37// | Guard page(s) |
38// +--------------------+
39// | Unused space* |
40// +--------------------+
41// | LargeChunk::Header |
42// +--------------------+
43// | {Unp,P}ackedHeader |
44// +--------------------+
45// | Data (aligned) |
46// +--------------------+
47// | Unused space** |
48// +--------------------+
49// | Guard page(s) |
50// +--------------------+
51
52namespace LargeChunk {
53 struct Header {
54 ReservedAddressRange StoredRange;
55 uptr CommittedSize;
56 uptr Size;
57 };
58 constexpr uptr getHeaderSize() {
59 return RoundUpTo(sizeof(Header), MinAlignment);
60 }
61 static Header *getHeader(uptr Ptr) {
62 return reinterpret_cast<Header *>(Ptr - getHeaderSize());
63 }
64 static Header *getHeader(const void *Ptr) {
65 return getHeader(reinterpret_cast<uptr>(Ptr));
66 }
67} // namespace LargeChunk
68
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000069class ScudoLargeMmapAllocator {
70 public:
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000071 void Init() {
Kostya Kortchinskye95ef872018-03-12 19:29:38 +000072 NumberOfAllocs = 0;
73 NumberOfFrees = 0;
74 AllocatedBytes = 0;
75 FreedBytes = 0;
76 LargestSize = 0;
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000077 }
78
79 void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
Kostya Kortchinskybeeea622018-02-27 16:14:49 +000080 const uptr UserSize = Size - Chunk::getHeaderSize();
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000081 // The Scudo frontend prevents us from allocating more than
82 // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
Kostya Kortchinskye95ef872018-03-12 19:29:38 +000083 uptr ReservedSize = Size + LargeChunk::getHeaderSize();
84 if (UNLIKELY(Alignment > MinAlignment))
85 ReservedSize += Alignment;
86 const uptr PageSize = GetPageSizeCached();
87 ReservedSize = RoundUpTo(ReservedSize, PageSize);
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000088 // Account for 2 guard pages, one before and one after the chunk.
Kostya Kortchinskye95ef872018-03-12 19:29:38 +000089 ReservedSize += 2 * PageSize;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +000090
Kostya Kortchinsky2ba105a2017-11-13 20:38:22 +000091 ReservedAddressRange AddressRange;
Kostya Kortchinskye95ef872018-03-12 19:29:38 +000092 uptr ReservedBeg = AddressRange.Init(ReservedSize);
93 if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
Alex Shlyapnikovccab11b2017-06-20 21:23:02 +000094 return ReturnNullOrDieOnFailure::OnOOM();
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +000095 // A page-aligned pointer is assumed after that, so check it now.
Kostya Kortchinskye95ef872018-03-12 19:29:38 +000096 DCHECK(IsAligned(ReservedBeg, PageSize));
97 uptr ReservedEnd = ReservedBeg + ReservedSize;
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +000098 // The beginning of the user area for that allocation comes after the
99 // initial guard page, and both headers. This is the pointer that has to
100 // abide by alignment requirements.
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000101 uptr CommittedBeg = ReservedBeg + PageSize;
102 uptr UserBeg = CommittedBeg + HeadersSize;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000103 uptr UserEnd = UserBeg + UserSize;
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000104 uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000105
106 // In the rare event of larger alignments, we will attempt to fit the mmap
107 // area better and unmap extraneous memory. This will also ensure that the
Kostya Kortchinsky2defe4d2016-12-08 19:05:46 +0000108 // offset and unused bytes field of the header stay small.
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000109 if (UNLIKELY(Alignment > MinAlignment)) {
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000110 if (!IsAligned(UserBeg, Alignment)) {
111 UserBeg = RoundUpTo(UserBeg, Alignment);
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000112 CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
113 const uptr NewReservedBeg = CommittedBeg - PageSize;
114 DCHECK_GE(NewReservedBeg, ReservedBeg);
115 if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
116 AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
117 ReservedBeg = NewReservedBeg;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000118 }
119 UserEnd = UserBeg + UserSize;
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000120 CommittedEnd = RoundUpTo(UserEnd, PageSize);
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000121 }
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000122 const uptr NewReservedEnd = CommittedEnd + PageSize;
123 DCHECK_LE(NewReservedEnd, ReservedEnd);
124 if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
125 AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
126 ReservedEnd = NewReservedEnd;
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000127 }
Kostya Kortchinsky71dcc332016-10-26 16:16:58 +0000128 }
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000129
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000130 DCHECK_LE(UserEnd, CommittedEnd);
131 const uptr CommittedSize = CommittedEnd - CommittedBeg;
132 // Actually mmap the memory, preserving the guard pages on either sides.
133 CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
Kostya Kortchinskybeeea622018-02-27 16:14:49 +0000134 const uptr Ptr = UserBeg - Chunk::getHeaderSize();
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000135 LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
136 H->StoredRange = AddressRange;
137 H->Size = CommittedEnd - Ptr;
138 H->CommittedSize = CommittedSize;
Kostya Kortchinsky2ba105a2017-11-13 20:38:22 +0000139
Kostya Kortchinsky1148dc52016-11-30 17:32:20 +0000140 // The primary adds the whole class size to the stats when allocating a
141 // chunk, so we will do something similar here. But we will not account for
142 // the guard pages.
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000143 {
144 SpinMutexLock l(&StatsMutex);
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000145 Stats->Add(AllocatorStatAllocated, CommittedSize);
146 Stats->Add(AllocatorStatMapped, CommittedSize);
147 AllocatedBytes += CommittedSize;
148 if (LargestSize < CommittedSize)
149 LargestSize = CommittedSize;
150 NumberOfAllocs++;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000151 }
Kostya Kortchinskyc74da7c2016-12-13 19:31:54 +0000152
Kostya Kortchinsky01a66fc2017-05-11 21:40:45 +0000153 return reinterpret_cast<void *>(Ptr);
Vitaly Buka0ec5a282016-09-29 23:00:54 +0000154 }
155
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +0000156 void Deallocate(AllocatorStats *Stats, void *Ptr) {
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000157 LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
Kostya Kortchinsky2ba105a2017-11-13 20:38:22 +0000158 // Since we're unmapping the entirety of where the ReservedAddressRange
159 // actually is, copy onto the stack.
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000160 ReservedAddressRange AddressRange = H->StoredRange;
161 const uptr Size = H->CommittedSize;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000162 {
163 SpinMutexLock l(&StatsMutex);
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000164 Stats->Sub(AllocatorStatAllocated, Size);
165 Stats->Sub(AllocatorStatMapped, Size);
166 FreedBytes += Size;
167 NumberOfFrees++;
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000168 }
Kostya Kortchinsky2ba105a2017-11-13 20:38:22 +0000169 AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
170 AddressRange.size());
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +0000171 }
172
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000173 static uptr GetActuallyAllocatedSize(void *Ptr) {
174 return LargeChunk::getHeader(Ptr)->Size;
175 }
176
177 void PrintStats() {
178 Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
179 "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
180 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
181 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
182 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +0000183 }
184
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +0000185 private:
Kostya Kortchinskyddf4ef32017-12-06 16:53:24 +0000186 static constexpr uptr HeadersSize =
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000187 LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
Kostya Kortchinsky2ba105a2017-11-13 20:38:22 +0000188
Kostya Kortchinsky006805d2017-04-20 15:11:00 +0000189 SpinMutex StatsMutex;
Kostya Kortchinskye95ef872018-03-12 19:29:38 +0000190 u32 NumberOfAllocs;
191 u32 NumberOfFrees;
192 uptr AllocatedBytes;
193 uptr FreedBytes;
194 uptr LargestSize;
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +0000195};
196
Kostya Kortchinsky3beafff2016-09-19 21:11:55 +0000197#endif // SCUDO_ALLOCATOR_SECONDARY_H_