blob: 619a94c24eb8cdc4c93f6ed5656669c4e4e05223 [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
Dynamic Tools Team09e6d482019-11-26 18:18:14 -08009#include "tests/scudo_unit_test.h"
10
Dynamic Tools Team517193e2019-09-11 14:48:41 +000011#include "allocator_config.h"
12#include "combined.h"
13
Dynamic Tools Team517193e2019-09-11 14:48:41 +000014#include <condition_variable>
Hans Wennborgcbbd0562020-07-27 13:35:35 +020015#include <memory>
Dynamic Tools Team517193e2019-09-11 14:48:41 +000016#include <mutex>
Kostya Kortchinsky80bb8b82020-08-28 11:44:39 -070017#include <set>
Vitaly Buka3f5399f2021-05-04 16:34:59 -070018#include <stdlib.h>
Dynamic Tools Team517193e2019-09-11 14:48:41 +000019#include <thread>
Dynamic Tools Team09e6d482019-11-26 18:18:14 -080020#include <vector>
Dynamic Tools Team517193e2019-09-11 14:48:41 +000021
Dynamic Tools Team517193e2019-09-11 14:48:41 +000022static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
Vitaly Buka4f264f32021-04-01 19:19:38 -070023static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
Dynamic Tools Team517193e2019-09-11 14:48:41 +000024
Kostya Kortchinsky80bb8b82020-08-28 11:44:39 -070025// Fuchsia complains that the function is not used.
26UNUSED static void disableDebuggerdMaybe() {
Dynamic Tools Team48429c72019-12-04 17:46:15 -080027#if SCUDO_ANDROID
28 // Disable the debuggerd signal handler on Android, without this we can end
29 // up spending a significant amount of time creating tombstones.
30 signal(SIGSEGV, SIG_DFL);
31#endif
32}
33
34template <class AllocatorT>
Peter Collingbourne882b85a2020-12-16 11:24:30 -080035bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -080036 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
37 if (Alignment < MinAlignment)
38 Alignment = MinAlignment;
39 const scudo::uptr NeededSize =
40 scudo::roundUpTo(Size, MinAlignment) +
41 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
42 return AllocatorT::PrimaryT::canAllocate(NeededSize);
43}
44
45template <class AllocatorT>
46void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
47 scudo::uptr Alignment) {
Peter Collingbournecc3d4932020-12-21 18:39:03 -080048 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
49 Size = scudo::roundUpTo(Size, MinAlignment);
50 if (Allocator->useMemoryTaggingTestOnly())
51 EXPECT_DEATH(
52 {
53 disableDebuggerdMaybe();
54 reinterpret_cast<char *>(P)[-1] = 0xaa;
55 },
56 "");
57 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
58 ? Allocator->useMemoryTaggingTestOnly()
59 : Alignment == MinAlignment) {
60 EXPECT_DEATH(
61 {
62 disableDebuggerdMaybe();
63 reinterpret_cast<char *>(P)[Size] = 0xaa;
64 },
65 "");
66 }
Dynamic Tools Team48429c72019-12-04 17:46:15 -080067}
68
Peter Collingbourned2aa0372020-07-23 11:31:32 -070069template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
70 TestAllocator() {
71 this->reset();
72 this->initThreadMaybe();
Peter Collingbournecc3d4932020-12-21 18:39:03 -080073 if (scudo::archSupportsMemoryTagging() &&
74 !scudo::systemDetectsMemoryTagFaultsTestOnly())
75 this->disableMemoryTagging();
Peter Collingbourned2aa0372020-07-23 11:31:32 -070076 }
77 ~TestAllocator() { this->unmapTestOnly(); }
Vitaly Buka3f5399f2021-05-04 16:34:59 -070078
79 void *operator new(size_t size) {
80 void *p = nullptr;
81 EXPECT_EQ(0, posix_memalign(&p, alignof(TestAllocator), size));
82 return p;
83 }
84
85 void operator delete(void *ptr) { free(ptr); }
Peter Collingbourned2aa0372020-07-23 11:31:32 -070086};
87
Vitaly Bukac2b30882021-04-13 16:44:06 -070088template <class TypeParam> struct ScudoCombinedTest : public Test {
Vitaly Buka8f1fa682021-04-01 18:44:55 -070089 ScudoCombinedTest() {
Vitaly Bukac2b30882021-04-13 16:44:06 -070090 UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
Vitaly Buka8f1fa682021-04-01 18:44:55 -070091 Allocator = std::make_unique<AllocatorT>();
92 }
Vitaly Buka4f264f32021-04-01 19:19:38 -070093 ~ScudoCombinedTest() {
94 Allocator->releaseToOS();
95 UseQuarantine = true;
96 }
97
Vitaly Bukac2b30882021-04-13 16:44:06 -070098 void RunTest();
99
Vitaly Buka4f264f32021-04-01 19:19:38 -0700100 void BasicTest(scudo::uptr SizeLogMin, scudo::uptr SizeLogMax);
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700101
Vitaly Bukac2b30882021-04-13 16:44:06 -0700102 using AllocatorT = TestAllocator<TypeParam>;
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700103 std::unique_ptr<AllocatorT> Allocator;
104};
Vitaly Bukab53c6502021-04-01 17:31:52 -0700105
Vitaly Bukab53c6502021-04-01 17:31:52 -0700106#if SCUDO_FUCHSIA
Vitaly Bukac2b30882021-04-13 16:44:06 -0700107#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
108 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
109 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
Vitaly Bukab53c6502021-04-01 17:31:52 -0700110#else
Vitaly Bukac2b30882021-04-13 16:44:06 -0700111#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
112 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidSvelteConfig) \
113 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
114 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig)
Vitaly Bukab53c6502021-04-01 17:31:52 -0700115#endif
Vitaly Bukab53c6502021-04-01 17:31:52 -0700116
Vitaly Bukac2b30882021-04-13 16:44:06 -0700117#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
118 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
119 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { Run(); }
120
121#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
122 template <class TypeParam> \
123 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
124 void Run(); \
125 }; \
126 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
127 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
128
129SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700130 auto *Allocator = this->Allocator.get();
Kostya Kortchinsky241449c2020-09-23 10:22:15 -0700131 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
132 EXPECT_FALSE(
133 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
134
135 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
136 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
137 StackBuffer[I] = 0x42U;
138 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
139 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
140 EXPECT_EQ(StackBuffer[I], 0x42U);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700141}
Dynamic Tools Teamc9106952019-12-13 09:43:51 -0800142
Vitaly Buka4f264f32021-04-01 19:19:38 -0700143template <class Config>
144void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLogMin,
145 scudo::uptr SizeLogMax) {
146 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000147
148 // This allocates and deallocates a bunch of chunks, with a wide range of
149 // sizes and alignments, with a focus on sizes that could trigger weird
150 // behaviors (plus or minus a small delta of a power of two for example).
Vitaly Buka4f264f32021-04-01 19:19:38 -0700151 for (scudo::uptr SizeLog = SizeLogMin; SizeLog <= SizeLogMax; SizeLog++) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000152 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
153 const scudo::uptr Align = 1U << AlignLog;
154 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
155 if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
156 continue;
157 const scudo::uptr Size = (1U << SizeLog) + Delta;
158 void *P = Allocator->allocate(Size, Origin, Align);
159 EXPECT_NE(P, nullptr);
Dynamic Tools Teamc9106952019-12-13 09:43:51 -0800160 EXPECT_TRUE(Allocator->isOwned(P));
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000161 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
162 EXPECT_LE(Size, Allocator->getUsableSize(P));
163 memset(P, 0xaa, Size);
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700164 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000165 Allocator->deallocate(P, Origin, Size);
166 }
167 }
168 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700169}
170
Vitaly Bukac2b30882021-04-13 16:44:06 -0700171SCUDO_TYPED_TEST(ScudoCombinedTest, BasicCombined0) { this->BasicTest(0, 16); }
172SCUDO_TYPED_TEST(ScudoCombinedTest, BasicCombined1) { this->BasicTest(17, 18); }
173SCUDO_TYPED_TEST(ScudoCombinedTest, BasicCombined2) { this->BasicTest(19, 19); }
174SCUDO_TYPED_TEST(ScudoCombinedTest, BasicCombined3) { this->BasicTest(20, 20); }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700175
Vitaly Bukac2b30882021-04-13 16:44:06 -0700176SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700177 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000178
Dynamic Tools Teamd29271f2019-10-31 10:31:49 -0700179 // Ensure that specifying ZeroContents returns a zero'd out block.
180 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
181 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
182 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
183 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
184 EXPECT_NE(P, nullptr);
185 for (scudo::uptr I = 0; I < Size; I++)
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700186 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
187 memset(P, 0xaa, Size);
188 Allocator->deallocate(P, Origin, Size);
189 }
190 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700191}
192
Vitaly Bukac2b30882021-04-13 16:44:06 -0700193SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700194 auto *Allocator = this->Allocator.get();
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700195
196 // Ensure that specifying ZeroContents returns a zero'd out block.
197 Allocator->setFillContents(scudo::ZeroFill);
198 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
199 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
200 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
201 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
202 EXPECT_NE(P, nullptr);
203 for (scudo::uptr I = 0; I < Size; I++)
204 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
205 memset(P, 0xaa, Size);
206 Allocator->deallocate(P, Origin, Size);
207 }
208 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700209}
210
Vitaly Bukac2b30882021-04-13 16:44:06 -0700211SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700212 auto *Allocator = this->Allocator.get();
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700213
Peter Collingbourne882b85a2020-12-16 11:24:30 -0800214 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
215 // block. The primary allocator only produces pattern filled blocks if MTE
216 // is disabled, so we only require pattern filled blocks in that case.
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700217 Allocator->setFillContents(scudo::PatternOrZeroFill);
218 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
219 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
220 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
221 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
222 EXPECT_NE(P, nullptr);
223 for (scudo::uptr I = 0; I < Size; I++) {
224 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700225 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
226 1U << MinAlignLog) &&
Peter Collingbourne4dd20572020-12-22 11:48:53 -0800227 !Allocator->useMemoryTaggingTestOnly())
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700228 ASSERT_EQ(V, scudo::PatternFillByte);
229 else
230 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
231 }
Dynamic Tools Teamd29271f2019-10-31 10:31:49 -0700232 memset(P, 0xaa, Size);
233 Allocator->deallocate(P, Origin, Size);
234 }
235 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700236}
237
Vitaly Bukac2b30882021-04-13 16:44:06 -0700238SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700239 auto *Allocator = this->Allocator.get();
Dynamic Tools Teamd29271f2019-10-31 10:31:49 -0700240
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000241 // Verify that a chunk will end up being reused, at some point.
242 const scudo::uptr NeedleSize = 1024U;
243 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
244 Allocator->deallocate(NeedleP, Origin);
245 bool Found = false;
246 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
247 void *P = Allocator->allocate(NeedleSize, Origin);
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800248 if (Allocator->getHeaderTaggedPointer(P) ==
249 Allocator->getHeaderTaggedPointer(NeedleP))
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000250 Found = true;
251 Allocator->deallocate(P, Origin);
252 }
253 EXPECT_TRUE(Found);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700254}
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000255
Peter Collingbourne81a10ce2021-05-18 12:57:19 -0700256SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
257 auto *Allocator = this->Allocator.get();
258
259 // Reallocate a chunk all the way up to a secondary allocation, verifying that
260 // we preserve the data in the process.
261 scudo::uptr Size = 16;
262 void *P = Allocator->allocate(Size, Origin);
263 const char Marker = 0xab;
264 memset(P, Marker, Size);
265 while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
266 void *NewP = Allocator->reallocate(P, Size * 2);
267 EXPECT_NE(NewP, nullptr);
268 for (scudo::uptr J = 0; J < Size; J++)
269 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
270 memset(reinterpret_cast<char *>(NewP) + Size, Marker, Size);
271 Size *= 2U;
272 P = NewP;
273 }
274 Allocator->deallocate(P, Origin);
275}
276
277SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700278 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000279
280 // Reallocate a large chunk all the way down to a byte, verifying that we
281 // preserve the data in the process.
Vitaly Buka4f264f32021-04-01 19:19:38 -0700282 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000283 const scudo::uptr DataSize = 2048U;
284 void *P = Allocator->allocate(Size, Origin);
285 const char Marker = 0xab;
286 memset(P, Marker, scudo::Min(Size, DataSize));
287 while (Size > 1U) {
288 Size /= 2U;
289 void *NewP = Allocator->reallocate(P, Size);
290 EXPECT_NE(NewP, nullptr);
291 for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
292 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
293 P = NewP;
294 }
295 Allocator->deallocate(P, Origin);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700296}
297
Vitaly Bukac2b30882021-04-13 16:44:06 -0700298SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateSame) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700299 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000300
301 // Check that reallocating a chunk to a slightly smaller or larger size
302 // returns the same chunk. This requires that all the sizes we iterate on use
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800303 // the same block size, but that should be the case for MaxSize - 64 with our
304 // default class size maps.
Vitaly Buka4f264f32021-04-01 19:19:38 -0700305 constexpr scudo::uptr ReallocSize =
306 TypeParam::Primary::SizeClassMap::MaxSize - 64;
307 void *P = Allocator->allocate(ReallocSize, Origin);
308 const char Marker = 0xab;
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800309 memset(P, Marker, ReallocSize);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000310 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800311 const scudo::uptr NewSize = ReallocSize + Delta;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000312 void *NewP = Allocator->reallocate(P, NewSize);
313 EXPECT_EQ(NewP, P);
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800314 for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000315 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700316 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000317 }
318 Allocator->deallocate(P, Origin);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700319}
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000320
Vitaly Bukac2b30882021-04-13 16:44:06 -0700321SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700322 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000323 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
324 // they are the ones we allocated. This requires the allocator to not have any
325 // other allocated chunk at this point (eg: won't work with the Quarantine).
Vitaly Buka15a5cd72021-04-03 23:52:06 -0700326 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
327 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
328 // them will fail.
329 if (!UseQuarantine) {
330 std::vector<void *> V;
331 for (scudo::uptr I = 0; I < 64U; I++)
332 V.push_back(Allocator->allocate(
333 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
334 Allocator->disable();
335 Allocator->iterateOverChunks(
336 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
337 [](uintptr_t Base, size_t Size, void *Arg) {
338 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
339 void *P = reinterpret_cast<void *>(Base);
340 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
341 },
342 reinterpret_cast<void *>(&V));
343 Allocator->enable();
344 for (auto P : V)
345 Allocator->deallocate(P, Origin);
346 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700347}
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000348
Vitaly Bukac2b30882021-04-13 16:44:06 -0700349SCUDO_TYPED_TEST(ScudoCombinedTest, UseAfterFree) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700350 auto *Allocator = this->Allocator.get();
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000351
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800352 // Check that use-after-free is detected.
353 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
354 const scudo::uptr Size = 1U << SizeLog;
355 if (!Allocator->useMemoryTaggingTestOnly())
356 continue;
357 EXPECT_DEATH(
358 {
359 disableDebuggerdMaybe();
360 void *P = Allocator->allocate(Size, Origin);
361 Allocator->deallocate(P, Origin);
362 reinterpret_cast<char *>(P)[0] = 0xaa;
363 },
364 "");
365 EXPECT_DEATH(
366 {
367 disableDebuggerdMaybe();
368 void *P = Allocator->allocate(Size, Origin);
369 Allocator->deallocate(P, Origin);
370 reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
371 },
372 "");
373 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700374}
375
Vitaly Bukac2b30882021-04-13 16:44:06 -0700376SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemoryTagging) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700377 auto *Allocator = this->Allocator.get();
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800378
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800379 if (Allocator->useMemoryTaggingTestOnly()) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800380 // Check that disabling memory tagging works correctly.
381 void *P = Allocator->allocate(2048, Origin);
382 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
Vitaly Buka4b58de12021-05-20 17:16:27 -0700383 if (scudo::disableMemoryTagChecksTestOnly()) {
384 Allocator->disableMemoryTagging();
385 reinterpret_cast<char *>(P)[2048] = 0xaa;
386 Allocator->deallocate(P, Origin);
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800387
Vitaly Buka4b58de12021-05-20 17:16:27 -0700388 P = Allocator->allocate(2048, Origin);
389 EXPECT_EQ(scudo::untagPointer(P), P);
390 reinterpret_cast<char *>(P)[2048] = 0xaa;
391 Allocator->deallocate(P, Origin);
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800392
Vitaly Buka4b58de12021-05-20 17:16:27 -0700393 // Disabling memory tag checks may interfere with subsequent tests.
394 // Re-enable them now.
395 scudo::enableMemoryTagChecksTestOnly();
396 }
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800397 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700398}
399
Vitaly Bukac2b30882021-04-13 16:44:06 -0700400SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700401 auto *Allocator = this->Allocator.get();
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800402
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000403 scudo::uptr BufferSize = 8192;
404 std::vector<char> Buffer(BufferSize);
405 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
406 while (ActualSize > BufferSize) {
407 BufferSize = ActualSize + 1024;
408 Buffer.resize(BufferSize);
409 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
410 }
411 std::string Stats(Buffer.begin(), Buffer.end());
412 // Basic checks on the contents of the statistics output, which also allows us
413 // to verify that we got it all.
414 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
415 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
416 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700417}
418
Vitaly Bukac2b30882021-04-13 16:44:06 -0700419SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) {
Vitaly Buka4f264f32021-04-01 19:19:38 -0700420 auto *Allocator = this->Allocator.get();
421
422 std::vector<void *> V;
423 for (scudo::uptr I = 0; I < 64U; I++)
424 V.push_back(Allocator->allocate(
425 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
426 for (auto P : V)
427 Allocator->deallocate(P, Origin);
Vitaly Bukaff350932021-04-01 12:44:31 -0700428
429 bool UnlockRequired;
430 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
431 EXPECT_TRUE(!TSD->Cache.isEmpty());
432 TSD->Cache.drain();
433 EXPECT_TRUE(TSD->Cache.isEmpty());
434 if (UnlockRequired)
435 TSD->unlock();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000436}
437
Vitaly Bukac2b30882021-04-13 16:44:06 -0700438SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
Vitaly Buka19bc0172021-04-01 18:33:49 -0700439 std::mutex Mutex;
440 std::condition_variable Cv;
441 bool Ready = false;
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700442 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000443 std::thread Threads[32];
444 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
Vitaly Buka19bc0172021-04-01 18:33:49 -0700445 Threads[I] = std::thread([&]() {
446 {
447 std::unique_lock<std::mutex> Lock(Mutex);
448 while (!Ready)
449 Cv.wait(Lock);
450 }
451 std::vector<std::pair<void *, scudo::uptr>> V;
452 for (scudo::uptr I = 0; I < 256U; I++) {
453 const scudo::uptr Size = std::rand() % 4096U;
454 void *P = Allocator->allocate(Size, Origin);
455 // A region could have ran out of memory, resulting in a null P.
456 if (P)
457 V.push_back(std::make_pair(P, Size));
458 }
459 while (!V.empty()) {
460 auto Pair = V.back();
461 Allocator->deallocate(Pair.first, Origin, Pair.second);
462 V.pop_back();
463 }
464 });
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000465 {
466 std::unique_lock<std::mutex> Lock(Mutex);
467 Ready = true;
468 Cv.notify_all();
469 }
470 for (auto &T : Threads)
471 T.join();
472 Allocator->releaseToOS();
473}
474
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800475#if SCUDO_FUCHSIA
Vitaly Bukab53c6502021-04-01 17:31:52 -0700476#define SKIP_ON_FUCHSIA(T) DISABLED_##T
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800477#else
Vitaly Bukab53c6502021-04-01 17:31:52 -0700478#define SKIP_ON_FUCHSIA(T) T
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800479#endif
Vitaly Bukab53c6502021-04-01 17:31:52 -0700480
481// Test that multiple instantiations of the allocator have not messed up the
482// process's signal handlers (GWP-ASan used to do this).
483TEST(ScudoCombinedTest, SKIP_ON_FUCHSIA(testSEGV)) {
484 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
485 scudo::MapPlatformData Data = {};
486 void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
487 EXPECT_NE(P, nullptr);
488 EXPECT_DEATH(memset(P, 0xaa, Size), "");
489 scudo::unmap(P, Size, UNMAP_ALL, &Data);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000490}
491
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800492struct DeathSizeClassConfig {
493 static const scudo::uptr NumBits = 1;
494 static const scudo::uptr MinSizeLog = 10;
495 static const scudo::uptr MidSizeLog = 10;
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700496 static const scudo::uptr MaxSizeLog = 13;
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800497 static const scudo::u32 MaxNumCachedHint = 4;
498 static const scudo::uptr MaxBytesCachedLog = 12;
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800499};
500
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800501static const scudo::uptr DeathRegionSizeLog = 20U;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000502struct DeathConfig {
Peter Collingbourne6be49192020-12-15 14:26:10 -0800503 static const bool MaySupportMemoryTagging = false;
504
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700505 // Tiny allocator, its Primary only serves chunks of four sizes.
Peter Collingbourne6be49192020-12-15 14:26:10 -0800506 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
507 typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
508 static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
509 static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
510 static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800511 typedef scudo::uptr PrimaryCompactPtrT;
512 static const scudo::uptr PrimaryCompactPtrScale = 0;
Peter Collingbourne6be49192020-12-15 14:26:10 -0800513
Peter Collingbourne7488a172020-12-14 13:57:59 -0800514 typedef scudo::MapAllocatorNoCache SecondaryCache;
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700515 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000516};
517
518TEST(ScudoCombinedTest, DeathCombined) {
Peter Collingbourned2aa0372020-07-23 11:31:32 -0700519 using AllocatorT = TestAllocator<DeathConfig>;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200520 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000521
522 const scudo::uptr Size = 1000U;
523 void *P = Allocator->allocate(Size, Origin);
524 EXPECT_NE(P, nullptr);
525
526 // Invalid sized deallocation.
527 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
528
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800529 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
530 UNUSED void *MisalignedP =
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000531 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
532 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
533 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
534
535 // Header corruption.
536 scudo::u64 *H =
537 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
538 *H ^= 0x42U;
539 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
540 *H ^= 0x420042U;
541 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
542 *H ^= 0x420000U;
543
544 // Invalid chunk state.
545 Allocator->deallocate(P, Origin, Size);
546 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
547 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
548 EXPECT_DEATH(Allocator->getUsableSize(P), "");
549}
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800550
551// Ensure that releaseToOS can be called prior to any other allocator
552// operation without issue.
553TEST(ScudoCombinedTest, ReleaseToOS) {
Peter Collingbourned2aa0372020-07-23 11:31:32 -0700554 using AllocatorT = TestAllocator<DeathConfig>;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200555 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800556
557 Allocator->releaseToOS();
558}
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800559
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700560// Verify that when a region gets full, the allocator will still manage to
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800561// fulfill the allocation through a larger size class.
562TEST(ScudoCombinedTest, FullRegion) {
Peter Collingbourned2aa0372020-07-23 11:31:32 -0700563 using AllocatorT = TestAllocator<DeathConfig>;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200564 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800565
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800566 std::vector<void *> V;
567 scudo::uptr FailedAllocationsCount = 0;
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700568 for (scudo::uptr ClassId = 1U;
Peter Collingbourne6be49192020-12-15 14:26:10 -0800569 ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700570 const scudo::uptr Size =
Peter Collingbourne6be49192020-12-15 14:26:10 -0800571 DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700572 // Allocate enough to fill all of the regions above this one.
573 const scudo::uptr MaxNumberOfChunks =
574 ((1U << DeathRegionSizeLog) / Size) *
Peter Collingbourne6be49192020-12-15 14:26:10 -0800575 (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700576 void *P;
577 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
578 P = Allocator->allocate(Size - 64U, Origin);
579 if (!P)
580 FailedAllocationsCount++;
581 else
582 V.push_back(P);
583 }
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700584 while (!V.empty()) {
585 Allocator->deallocate(V.back(), Origin);
586 V.pop_back();
587 }
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800588 }
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700589 EXPECT_EQ(FailedAllocationsCount, 0U);
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800590}
Peter Collingbourneb232c852020-07-22 13:45:14 -0700591
592TEST(ScudoCombinedTest, OddEven) {
593 using AllocatorT = TestAllocator<scudo::AndroidConfig>;
594 using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200595 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Peter Collingbourneb232c852020-07-22 13:45:14 -0700596
Peter Collingbourne4dd20572020-12-22 11:48:53 -0800597 if (!Allocator->useMemoryTaggingTestOnly())
Peter Collingbourneb232c852020-07-22 13:45:14 -0700598 return;
599
600 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
601 scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
602 scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
603 EXPECT_NE(Tag1 % 2, Tag2 % 2);
604 };
605
606 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
607 ClassId++) {
608 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
609
610 std::set<scudo::uptr> Ptrs;
611 bool Found = false;
612 for (unsigned I = 0; I != 65536; ++I) {
613 scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
614 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
615 if (Ptrs.count(P - Size)) {
616 Found = true;
617 CheckOddEven(P, P - Size);
618 break;
619 }
620 if (Ptrs.count(P + Size)) {
621 Found = true;
622 CheckOddEven(P, P + Size);
623 break;
624 }
625 Ptrs.insert(P);
626 }
627 EXPECT_TRUE(Found);
628 }
629}
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700630
631TEST(ScudoCombinedTest, DisableMemInit) {
632 using AllocatorT = TestAllocator<scudo::AndroidConfig>;
633 using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap;
634 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
635
636 std::vector<void *> Ptrs(65536, nullptr);
637
638 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
639
640 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
641
642 // Test that if mem-init is disabled on a thread, calloc should still work as
643 // expected. This is tricky to ensure when MTE is enabled, so this test tries
644 // to exercise the relevant code on our MTE path.
645 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
646 const scudo::uptr Size =
647 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
648 if (Size < 8)
649 continue;
650 for (unsigned I = 0; I != Ptrs.size(); ++I) {
651 Ptrs[I] = Allocator->allocate(Size, Origin);
652 memset(Ptrs[I], 0xaa, Size);
653 }
654 for (unsigned I = 0; I != Ptrs.size(); ++I)
655 Allocator->deallocate(Ptrs[I], Origin, Size);
656 for (unsigned I = 0; I != Ptrs.size(); ++I) {
657 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
658 memset(Ptrs[I], 0xbb, Size - 8);
659 }
660 for (unsigned I = 0; I != Ptrs.size(); ++I)
661 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
662 for (unsigned I = 0; I != Ptrs.size(); ++I) {
663 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
664 for (scudo::uptr J = 0; J < Size; ++J)
665 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
666 }
667 }
668
669 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
670}