blob: bbe304e697fb79c47a53f71638ca37b33d2e9853 [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
Dynamic Tools Team09e6d482019-11-26 18:18:14 -08009#include "tests/scudo_unit_test.h"
10
Dynamic Tools Team517193e2019-09-11 14:48:41 +000011#include "allocator_config.h"
12#include "combined.h"
13
Dynamic Tools Team517193e2019-09-11 14:48:41 +000014#include <condition_variable>
Hans Wennborgcbbd0562020-07-27 13:35:35 +020015#include <memory>
Dynamic Tools Team517193e2019-09-11 14:48:41 +000016#include <mutex>
Kostya Kortchinsky80bb8b82020-08-28 11:44:39 -070017#include <set>
Dynamic Tools Team517193e2019-09-11 14:48:41 +000018#include <thread>
Dynamic Tools Team09e6d482019-11-26 18:18:14 -080019#include <vector>
Dynamic Tools Team517193e2019-09-11 14:48:41 +000020
Dynamic Tools Team517193e2019-09-11 14:48:41 +000021static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
Vitaly Buka4f264f32021-04-01 19:19:38 -070022static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
Dynamic Tools Team517193e2019-09-11 14:48:41 +000023
Kostya Kortchinsky80bb8b82020-08-28 11:44:39 -070024// Fuchsia complains that the function is not used.
25UNUSED static void disableDebuggerdMaybe() {
Dynamic Tools Team48429c72019-12-04 17:46:15 -080026#if SCUDO_ANDROID
27 // Disable the debuggerd signal handler on Android, without this we can end
28 // up spending a significant amount of time creating tombstones.
29 signal(SIGSEGV, SIG_DFL);
30#endif
31}
32
33template <class AllocatorT>
Peter Collingbourne882b85a2020-12-16 11:24:30 -080034bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -080035 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
36 if (Alignment < MinAlignment)
37 Alignment = MinAlignment;
38 const scudo::uptr NeededSize =
39 scudo::roundUpTo(Size, MinAlignment) +
40 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
41 return AllocatorT::PrimaryT::canAllocate(NeededSize);
42}
43
44template <class AllocatorT>
45void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
46 scudo::uptr Alignment) {
Peter Collingbournecc3d4932020-12-21 18:39:03 -080047 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
48 Size = scudo::roundUpTo(Size, MinAlignment);
49 if (Allocator->useMemoryTaggingTestOnly())
50 EXPECT_DEATH(
51 {
52 disableDebuggerdMaybe();
53 reinterpret_cast<char *>(P)[-1] = 0xaa;
54 },
55 "");
56 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
57 ? Allocator->useMemoryTaggingTestOnly()
58 : Alignment == MinAlignment) {
59 EXPECT_DEATH(
60 {
61 disableDebuggerdMaybe();
62 reinterpret_cast<char *>(P)[Size] = 0xaa;
63 },
64 "");
65 }
Dynamic Tools Team48429c72019-12-04 17:46:15 -080066}
67
Peter Collingbourned2aa0372020-07-23 11:31:32 -070068template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
69 TestAllocator() {
70 this->reset();
71 this->initThreadMaybe();
Peter Collingbournecc3d4932020-12-21 18:39:03 -080072 if (scudo::archSupportsMemoryTagging() &&
73 !scudo::systemDetectsMemoryTagFaultsTestOnly())
74 this->disableMemoryTagging();
Peter Collingbourned2aa0372020-07-23 11:31:32 -070075 }
76 ~TestAllocator() { this->unmapTestOnly(); }
77};
78
Vitaly Bukab53c6502021-04-01 17:31:52 -070079namespace testing {
80namespace internal {
81#define SCUDO_DEFINE_GTEST_TYPE_NAME(TYPE) \
82 template <> std::string GetTypeName<scudo::TYPE>() { return #TYPE; }
83SCUDO_DEFINE_GTEST_TYPE_NAME(AndroidSvelteConfig)
84#if SCUDO_FUCHSIA
85SCUDO_DEFINE_GTEST_TYPE_NAME(FuchsiaConfig)
86#else
87SCUDO_DEFINE_GTEST_TYPE_NAME(DefaultConfig)
88SCUDO_DEFINE_GTEST_TYPE_NAME(AndroidConfig)
89#endif
90#undef SCUDO_DEFINE_GTEST_TYPE_NAME
91} // namespace internal
92} // namespace testing
93
Vitaly Buka8f1fa682021-04-01 18:44:55 -070094template <class Config> struct ScudoCombinedTest : public ::testing::Test {
95 ScudoCombinedTest() {
96 UseQuarantine = std::is_same<Config, scudo::AndroidConfig>::value;
97 Allocator = std::make_unique<AllocatorT>();
98 }
Vitaly Buka4f264f32021-04-01 19:19:38 -070099 ~ScudoCombinedTest() {
100 Allocator->releaseToOS();
101 UseQuarantine = true;
102 }
103
104 void BasicTest(scudo::uptr SizeLogMin, scudo::uptr SizeLogMax);
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700105
106 using AllocatorT = TestAllocator<Config>;
107 std::unique_ptr<AllocatorT> Allocator;
108};
Vitaly Bukab53c6502021-04-01 17:31:52 -0700109
110using ScudoCombinedTestTypes = testing::Types<scudo::AndroidSvelteConfig,
111#if SCUDO_FUCHSIA
112 scudo::FuchsiaConfig,
113#else
114 scudo::DefaultConfig,
115 scudo::AndroidConfig
116#endif
117 >;
118TYPED_TEST_CASE(ScudoCombinedTest, ScudoCombinedTestTypes);
119
Vitaly Buka4f264f32021-04-01 19:19:38 -0700120TYPED_TEST(ScudoCombinedTest, IsOwned) {
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700121 auto *Allocator = this->Allocator.get();
Kostya Kortchinsky241449c2020-09-23 10:22:15 -0700122 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
123 EXPECT_FALSE(
124 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
125
126 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
127 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
128 StackBuffer[I] = 0x42U;
129 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
130 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
131 EXPECT_EQ(StackBuffer[I], 0x42U);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700132}
Dynamic Tools Teamc9106952019-12-13 09:43:51 -0800133
Vitaly Buka4f264f32021-04-01 19:19:38 -0700134template <class Config>
135void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLogMin,
136 scudo::uptr SizeLogMax) {
137 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000138
139 // This allocates and deallocates a bunch of chunks, with a wide range of
140 // sizes and alignments, with a focus on sizes that could trigger weird
141 // behaviors (plus or minus a small delta of a power of two for example).
Vitaly Buka4f264f32021-04-01 19:19:38 -0700142 for (scudo::uptr SizeLog = SizeLogMin; SizeLog <= SizeLogMax; SizeLog++) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000143 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
144 const scudo::uptr Align = 1U << AlignLog;
145 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
146 if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
147 continue;
148 const scudo::uptr Size = (1U << SizeLog) + Delta;
149 void *P = Allocator->allocate(Size, Origin, Align);
150 EXPECT_NE(P, nullptr);
Dynamic Tools Teamc9106952019-12-13 09:43:51 -0800151 EXPECT_TRUE(Allocator->isOwned(P));
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000152 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
153 EXPECT_LE(Size, Allocator->getUsableSize(P));
154 memset(P, 0xaa, Size);
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700155 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000156 Allocator->deallocate(P, Origin, Size);
157 }
158 }
159 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700160}
161
162TYPED_TEST(ScudoCombinedTest, BasicCombined0) { this->BasicTest(0, 16); }
163TYPED_TEST(ScudoCombinedTest, BasicCombined1) { this->BasicTest(17, 18); }
164TYPED_TEST(ScudoCombinedTest, BasicCombined2) { this->BasicTest(19, 19); }
165TYPED_TEST(ScudoCombinedTest, BasicCombined3) { this->BasicTest(20, 20); }
166
167TYPED_TEST(ScudoCombinedTest, ZeroContents) {
168 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000169
Dynamic Tools Teamd29271f2019-10-31 10:31:49 -0700170 // Ensure that specifying ZeroContents returns a zero'd out block.
171 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
172 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
173 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
174 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
175 EXPECT_NE(P, nullptr);
176 for (scudo::uptr I = 0; I < Size; I++)
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700177 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
178 memset(P, 0xaa, Size);
179 Allocator->deallocate(P, Origin, Size);
180 }
181 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700182}
183
184TYPED_TEST(ScudoCombinedTest, ZeroFill) {
185 auto *Allocator = this->Allocator.get();
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700186
187 // Ensure that specifying ZeroContents returns a zero'd out block.
188 Allocator->setFillContents(scudo::ZeroFill);
189 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
190 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
191 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
192 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
193 EXPECT_NE(P, nullptr);
194 for (scudo::uptr I = 0; I < Size; I++)
195 ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0);
196 memset(P, 0xaa, Size);
197 Allocator->deallocate(P, Origin, Size);
198 }
199 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700200}
201
202TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
203 auto *Allocator = this->Allocator.get();
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700204
Peter Collingbourne882b85a2020-12-16 11:24:30 -0800205 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
206 // block. The primary allocator only produces pattern filled blocks if MTE
207 // is disabled, so we only require pattern filled blocks in that case.
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700208 Allocator->setFillContents(scudo::PatternOrZeroFill);
209 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
210 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
211 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
212 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
213 EXPECT_NE(P, nullptr);
214 for (scudo::uptr I = 0; I < Size; I++) {
215 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700216 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
217 1U << MinAlignLog) &&
Peter Collingbourne4dd20572020-12-22 11:48:53 -0800218 !Allocator->useMemoryTaggingTestOnly())
Dynamic Tools Team90d0d182020-04-29 14:39:23 -0700219 ASSERT_EQ(V, scudo::PatternFillByte);
220 else
221 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
222 }
Dynamic Tools Teamd29271f2019-10-31 10:31:49 -0700223 memset(P, 0xaa, Size);
224 Allocator->deallocate(P, Origin, Size);
225 }
226 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700227}
228
229TYPED_TEST(ScudoCombinedTest, BlockReuse) {
230 auto *Allocator = this->Allocator.get();
Dynamic Tools Teamd29271f2019-10-31 10:31:49 -0700231
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000232 // Verify that a chunk will end up being reused, at some point.
233 const scudo::uptr NeedleSize = 1024U;
234 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
235 Allocator->deallocate(NeedleP, Origin);
236 bool Found = false;
237 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
238 void *P = Allocator->allocate(NeedleSize, Origin);
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800239 if (Allocator->getHeaderTaggedPointer(P) ==
240 Allocator->getHeaderTaggedPointer(NeedleP))
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000241 Found = true;
242 Allocator->deallocate(P, Origin);
243 }
244 EXPECT_TRUE(Found);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700245}
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000246
Vitaly Buka4f264f32021-04-01 19:19:38 -0700247TYPED_TEST(ScudoCombinedTest, ReallocateLarge) {
248 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000249
250 // Reallocate a large chunk all the way down to a byte, verifying that we
251 // preserve the data in the process.
Vitaly Buka4f264f32021-04-01 19:19:38 -0700252 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000253 const scudo::uptr DataSize = 2048U;
254 void *P = Allocator->allocate(Size, Origin);
255 const char Marker = 0xab;
256 memset(P, Marker, scudo::Min(Size, DataSize));
257 while (Size > 1U) {
258 Size /= 2U;
259 void *NewP = Allocator->reallocate(P, Size);
260 EXPECT_NE(NewP, nullptr);
261 for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
262 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
263 P = NewP;
264 }
265 Allocator->deallocate(P, Origin);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700266}
267
268TYPED_TEST(ScudoCombinedTest, ReallocateSame) {
269 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000270
271 // Check that reallocating a chunk to a slightly smaller or larger size
272 // returns the same chunk. This requires that all the sizes we iterate on use
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800273 // the same block size, but that should be the case for MaxSize - 64 with our
274 // default class size maps.
Vitaly Buka4f264f32021-04-01 19:19:38 -0700275 constexpr scudo::uptr ReallocSize =
276 TypeParam::Primary::SizeClassMap::MaxSize - 64;
277 void *P = Allocator->allocate(ReallocSize, Origin);
278 const char Marker = 0xab;
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800279 memset(P, Marker, ReallocSize);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000280 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800281 const scudo::uptr NewSize = ReallocSize + Delta;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000282 void *NewP = Allocator->reallocate(P, NewSize);
283 EXPECT_EQ(NewP, P);
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800284 for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000285 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700286 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000287 }
288 Allocator->deallocate(P, Origin);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700289}
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000290
Vitaly Buka4f264f32021-04-01 19:19:38 -0700291TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
292 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000293 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
294 // they are the ones we allocated. This requires the allocator to not have any
295 // other allocated chunk at this point (eg: won't work with the Quarantine).
Vitaly Buka4f264f32021-04-01 19:19:38 -0700296 std::vector<void *> V;
297 for (scudo::uptr I = 0; I < 64U; I++)
298 V.push_back(Allocator->allocate(
299 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
300 Allocator->disable();
301 Allocator->iterateOverChunks(
302 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
303 [](uintptr_t Base, size_t Size, void *Arg) {
304 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
305 void *P = reinterpret_cast<void *>(Base);
306 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
307 },
308 reinterpret_cast<void *>(&V));
309 Allocator->enable();
310 for (auto P : V)
311 Allocator->deallocate(P, Origin);
312}
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000313
Vitaly Buka4f264f32021-04-01 19:19:38 -0700314TYPED_TEST(ScudoCombinedTest, UseAfterFree) {
315 auto *Allocator = this->Allocator.get();
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000316
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800317 // Check that use-after-free is detected.
318 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
319 const scudo::uptr Size = 1U << SizeLog;
320 if (!Allocator->useMemoryTaggingTestOnly())
321 continue;
322 EXPECT_DEATH(
323 {
324 disableDebuggerdMaybe();
325 void *P = Allocator->allocate(Size, Origin);
326 Allocator->deallocate(P, Origin);
327 reinterpret_cast<char *>(P)[0] = 0xaa;
328 },
329 "");
330 EXPECT_DEATH(
331 {
332 disableDebuggerdMaybe();
333 void *P = Allocator->allocate(Size, Origin);
334 Allocator->deallocate(P, Origin);
335 reinterpret_cast<char *>(P)[Size - 1] = 0xaa;
336 },
337 "");
338 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700339}
340
341TYPED_TEST(ScudoCombinedTest, DisableMemoryTagging) {
342 auto *Allocator = this->Allocator.get();
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800343
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800344 if (Allocator->useMemoryTaggingTestOnly()) {
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800345 // Check that disabling memory tagging works correctly.
346 void *P = Allocator->allocate(2048, Origin);
347 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, "");
348 scudo::disableMemoryTagChecksTestOnly();
349 Allocator->disableMemoryTagging();
350 reinterpret_cast<char *>(P)[2048] = 0xaa;
351 Allocator->deallocate(P, Origin);
352
353 P = Allocator->allocate(2048, Origin);
Peter Collingbournecc3d4932020-12-21 18:39:03 -0800354 EXPECT_EQ(scudo::untagPointer(P), P);
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800355 reinterpret_cast<char *>(P)[2048] = 0xaa;
356 Allocator->deallocate(P, Origin);
357
358 Allocator->releaseToOS();
359
360 // Disabling memory tag checks may interfere with subsequent tests.
361 // Re-enable them now.
362 scudo::enableMemoryTagChecksTestOnly();
363 }
Vitaly Buka4f264f32021-04-01 19:19:38 -0700364}
365
366TYPED_TEST(ScudoCombinedTest, Stats) {
367 auto *Allocator = this->Allocator.get();
Dynamic Tools Team48429c72019-12-04 17:46:15 -0800368
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000369 scudo::uptr BufferSize = 8192;
370 std::vector<char> Buffer(BufferSize);
371 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
372 while (ActualSize > BufferSize) {
373 BufferSize = ActualSize + 1024;
374 Buffer.resize(BufferSize);
375 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
376 }
377 std::string Stats(Buffer.begin(), Buffer.end());
378 // Basic checks on the contents of the statistics output, which also allows us
379 // to verify that we got it all.
380 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
381 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
382 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
Vitaly Buka4f264f32021-04-01 19:19:38 -0700383}
384
385TYPED_TEST(ScudoCombinedTest, CacheDrain) {
386 auto *Allocator = this->Allocator.get();
387
388 std::vector<void *> V;
389 for (scudo::uptr I = 0; I < 64U; I++)
390 V.push_back(Allocator->allocate(
391 rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
392 for (auto P : V)
393 Allocator->deallocate(P, Origin);
Vitaly Bukaff350932021-04-01 12:44:31 -0700394
395 bool UnlockRequired;
396 auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
397 EXPECT_TRUE(!TSD->Cache.isEmpty());
398 TSD->Cache.drain();
399 EXPECT_TRUE(TSD->Cache.isEmpty());
400 if (UnlockRequired)
401 TSD->unlock();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000402}
403
Vitaly Bukab53c6502021-04-01 17:31:52 -0700404TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
Vitaly Buka19bc0172021-04-01 18:33:49 -0700405 std::mutex Mutex;
406 std::condition_variable Cv;
407 bool Ready = false;
Vitaly Buka8f1fa682021-04-01 18:44:55 -0700408 auto *Allocator = this->Allocator.get();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000409 std::thread Threads[32];
410 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
Vitaly Buka19bc0172021-04-01 18:33:49 -0700411 Threads[I] = std::thread([&]() {
412 {
413 std::unique_lock<std::mutex> Lock(Mutex);
414 while (!Ready)
415 Cv.wait(Lock);
416 }
417 std::vector<std::pair<void *, scudo::uptr>> V;
418 for (scudo::uptr I = 0; I < 256U; I++) {
419 const scudo::uptr Size = std::rand() % 4096U;
420 void *P = Allocator->allocate(Size, Origin);
421 // A region could have ran out of memory, resulting in a null P.
422 if (P)
423 V.push_back(std::make_pair(P, Size));
424 }
425 while (!V.empty()) {
426 auto Pair = V.back();
427 Allocator->deallocate(Pair.first, Origin, Pair.second);
428 V.pop_back();
429 }
430 });
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000431 {
432 std::unique_lock<std::mutex> Lock(Mutex);
433 Ready = true;
434 Cv.notify_all();
435 }
436 for (auto &T : Threads)
437 T.join();
438 Allocator->releaseToOS();
439}
440
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800441#if SCUDO_FUCHSIA
Vitaly Bukab53c6502021-04-01 17:31:52 -0700442#define SKIP_ON_FUCHSIA(T) DISABLED_##T
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800443#else
Vitaly Bukab53c6502021-04-01 17:31:52 -0700444#define SKIP_ON_FUCHSIA(T) T
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800445#endif
Vitaly Bukab53c6502021-04-01 17:31:52 -0700446
447// Test that multiple instantiations of the allocator have not messed up the
448// process's signal handlers (GWP-ASan used to do this).
449TEST(ScudoCombinedTest, SKIP_ON_FUCHSIA(testSEGV)) {
450 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
451 scudo::MapPlatformData Data = {};
452 void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data);
453 EXPECT_NE(P, nullptr);
454 EXPECT_DEATH(memset(P, 0xaa, Size), "");
455 scudo::unmap(P, Size, UNMAP_ALL, &Data);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000456}
457
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800458struct DeathSizeClassConfig {
459 static const scudo::uptr NumBits = 1;
460 static const scudo::uptr MinSizeLog = 10;
461 static const scudo::uptr MidSizeLog = 10;
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700462 static const scudo::uptr MaxSizeLog = 13;
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800463 static const scudo::u32 MaxNumCachedHint = 4;
464 static const scudo::uptr MaxBytesCachedLog = 12;
Dynamic Tools Team130bfdb2020-02-10 15:53:07 -0800465};
466
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800467static const scudo::uptr DeathRegionSizeLog = 20U;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000468struct DeathConfig {
Peter Collingbourne6be49192020-12-15 14:26:10 -0800469 static const bool MaySupportMemoryTagging = false;
470
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700471 // Tiny allocator, its Primary only serves chunks of four sizes.
Peter Collingbourne6be49192020-12-15 14:26:10 -0800472 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
473 typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
474 static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
475 static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
476 static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800477 typedef scudo::uptr PrimaryCompactPtrT;
478 static const scudo::uptr PrimaryCompactPtrScale = 0;
Peter Collingbourne6be49192020-12-15 14:26:10 -0800479
Peter Collingbourne7488a172020-12-14 13:57:59 -0800480 typedef scudo::MapAllocatorNoCache SecondaryCache;
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700481 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000482};
483
484TEST(ScudoCombinedTest, DeathCombined) {
Peter Collingbourned2aa0372020-07-23 11:31:32 -0700485 using AllocatorT = TestAllocator<DeathConfig>;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200486 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000487
488 const scudo::uptr Size = 1000U;
489 void *P = Allocator->allocate(Size, Origin);
490 EXPECT_NE(P, nullptr);
491
492 // Invalid sized deallocation.
493 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
494
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800495 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
496 UNUSED void *MisalignedP =
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000497 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
498 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
499 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
500
501 // Header corruption.
502 scudo::u64 *H =
503 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
504 *H ^= 0x42U;
505 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
506 *H ^= 0x420042U;
507 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
508 *H ^= 0x420000U;
509
510 // Invalid chunk state.
511 Allocator->deallocate(P, Origin, Size);
512 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
513 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
514 EXPECT_DEATH(Allocator->getUsableSize(P), "");
515}
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800516
517// Ensure that releaseToOS can be called prior to any other allocator
518// operation without issue.
519TEST(ScudoCombinedTest, ReleaseToOS) {
Peter Collingbourned2aa0372020-07-23 11:31:32 -0700520 using AllocatorT = TestAllocator<DeathConfig>;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200521 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800522
523 Allocator->releaseToOS();
524}
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800525
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700526// Verify that when a region gets full, the allocator will still manage to
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800527// fulfill the allocation through a larger size class.
528TEST(ScudoCombinedTest, FullRegion) {
Peter Collingbourned2aa0372020-07-23 11:31:32 -0700529 using AllocatorT = TestAllocator<DeathConfig>;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200530 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800531
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800532 std::vector<void *> V;
533 scudo::uptr FailedAllocationsCount = 0;
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700534 for (scudo::uptr ClassId = 1U;
Peter Collingbourne6be49192020-12-15 14:26:10 -0800535 ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700536 const scudo::uptr Size =
Peter Collingbourne6be49192020-12-15 14:26:10 -0800537 DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700538 // Allocate enough to fill all of the regions above this one.
539 const scudo::uptr MaxNumberOfChunks =
540 ((1U << DeathRegionSizeLog) / Size) *
Peter Collingbourne6be49192020-12-15 14:26:10 -0800541 (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700542 void *P;
543 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
544 P = Allocator->allocate(Size - 64U, Origin);
545 if (!P)
546 FailedAllocationsCount++;
547 else
548 V.push_back(P);
549 }
Christopher Ferris6fbf5f12020-06-18 11:18:16 -0700550 while (!V.empty()) {
551 Allocator->deallocate(V.back(), Origin);
552 V.pop_back();
553 }
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800554 }
Dynamic Tools Team874b5c92020-03-19 07:55:13 -0700555 EXPECT_EQ(FailedAllocationsCount, 0U);
Dynamic Tools Teama98a9962020-02-13 09:27:18 -0800556}
Peter Collingbourneb232c852020-07-22 13:45:14 -0700557
558TEST(ScudoCombinedTest, OddEven) {
559 using AllocatorT = TestAllocator<scudo::AndroidConfig>;
560 using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap;
Hans Wennborg0611b7d2020-07-28 12:26:37 +0200561 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
Peter Collingbourneb232c852020-07-22 13:45:14 -0700562
Peter Collingbourne4dd20572020-12-22 11:48:53 -0800563 if (!Allocator->useMemoryTaggingTestOnly())
Peter Collingbourneb232c852020-07-22 13:45:14 -0700564 return;
565
566 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
567 scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1));
568 scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2));
569 EXPECT_NE(Tag1 % 2, Tag2 % 2);
570 };
571
572 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
573 ClassId++) {
574 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
575
576 std::set<scudo::uptr> Ptrs;
577 bool Found = false;
578 for (unsigned I = 0; I != 65536; ++I) {
579 scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>(
580 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
581 if (Ptrs.count(P - Size)) {
582 Found = true;
583 CheckOddEven(P, P - Size);
584 break;
585 }
586 if (Ptrs.count(P + Size)) {
587 Found = true;
588 CheckOddEven(P, P + Size);
589 break;
590 }
591 Ptrs.insert(P);
592 }
593 EXPECT_TRUE(Found);
594 }
595}
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700596
597TEST(ScudoCombinedTest, DisableMemInit) {
598 using AllocatorT = TestAllocator<scudo::AndroidConfig>;
599 using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap;
600 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
601
602 std::vector<void *> Ptrs(65536, nullptr);
603
604 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
605
606 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
607
608 // Test that if mem-init is disabled on a thread, calloc should still work as
609 // expected. This is tricky to ensure when MTE is enabled, so this test tries
610 // to exercise the relevant code on our MTE path.
611 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
612 const scudo::uptr Size =
613 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
614 if (Size < 8)
615 continue;
616 for (unsigned I = 0; I != Ptrs.size(); ++I) {
617 Ptrs[I] = Allocator->allocate(Size, Origin);
618 memset(Ptrs[I], 0xaa, Size);
619 }
620 for (unsigned I = 0; I != Ptrs.size(); ++I)
621 Allocator->deallocate(Ptrs[I], Origin, Size);
622 for (unsigned I = 0; I != Ptrs.size(); ++I) {
623 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
624 memset(Ptrs[I], 0xbb, Size - 8);
625 }
626 for (unsigned I = 0; I != Ptrs.size(); ++I)
627 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
628 for (unsigned I = 0; I != Ptrs.size(); ++I) {
629 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
630 for (scudo::uptr J = 0; J < Size; ++J)
631 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0);
632 }
633 }
634
635 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
636}