Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 1 | //===-- combined_test.cpp ---------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 9 | #include "tests/scudo_unit_test.h" |
| 10 | |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 11 | #include "allocator_config.h" |
| 12 | #include "combined.h" |
| 13 | |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 14 | #include <condition_variable> |
Hans Wennborg | cbbd056 | 2020-07-27 13:35:35 +0200 | [diff] [blame] | 15 | #include <memory> |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 16 | #include <mutex> |
Kostya Kortchinsky | 80bb8b8 | 2020-08-28 11:44:39 -0700 | [diff] [blame] | 17 | #include <set> |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 18 | #include <thread> |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 19 | #include <vector> |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 20 | |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 21 | static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc; |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 22 | static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 23 | |
Kostya Kortchinsky | 80bb8b8 | 2020-08-28 11:44:39 -0700 | [diff] [blame] | 24 | // Fuchsia complains that the function is not used. |
| 25 | UNUSED static void disableDebuggerdMaybe() { |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 26 | #if SCUDO_ANDROID |
| 27 | // Disable the debuggerd signal handler on Android, without this we can end |
| 28 | // up spending a significant amount of time creating tombstones. |
| 29 | signal(SIGSEGV, SIG_DFL); |
| 30 | #endif |
| 31 | } |
| 32 | |
| 33 | template <class AllocatorT> |
Peter Collingbourne | 882b85a | 2020-12-16 11:24:30 -0800 | [diff] [blame] | 34 | bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) { |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 35 | const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG; |
| 36 | if (Alignment < MinAlignment) |
| 37 | Alignment = MinAlignment; |
| 38 | const scudo::uptr NeededSize = |
| 39 | scudo::roundUpTo(Size, MinAlignment) + |
| 40 | ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize()); |
| 41 | return AllocatorT::PrimaryT::canAllocate(NeededSize); |
| 42 | } |
| 43 | |
| 44 | template <class AllocatorT> |
| 45 | void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size, |
| 46 | scudo::uptr Alignment) { |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 47 | const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG; |
| 48 | Size = scudo::roundUpTo(Size, MinAlignment); |
| 49 | if (Allocator->useMemoryTaggingTestOnly()) |
| 50 | EXPECT_DEATH( |
| 51 | { |
| 52 | disableDebuggerdMaybe(); |
| 53 | reinterpret_cast<char *>(P)[-1] = 0xaa; |
| 54 | }, |
| 55 | ""); |
| 56 | if (isPrimaryAllocation<AllocatorT>(Size, Alignment) |
| 57 | ? Allocator->useMemoryTaggingTestOnly() |
| 58 | : Alignment == MinAlignment) { |
| 59 | EXPECT_DEATH( |
| 60 | { |
| 61 | disableDebuggerdMaybe(); |
| 62 | reinterpret_cast<char *>(P)[Size] = 0xaa; |
| 63 | }, |
| 64 | ""); |
| 65 | } |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 66 | } |
| 67 | |
Peter Collingbourne | d2aa037 | 2020-07-23 11:31:32 -0700 | [diff] [blame] | 68 | template <typename Config> struct TestAllocator : scudo::Allocator<Config> { |
| 69 | TestAllocator() { |
| 70 | this->reset(); |
| 71 | this->initThreadMaybe(); |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 72 | if (scudo::archSupportsMemoryTagging() && |
| 73 | !scudo::systemDetectsMemoryTagFaultsTestOnly()) |
| 74 | this->disableMemoryTagging(); |
Peter Collingbourne | d2aa037 | 2020-07-23 11:31:32 -0700 | [diff] [blame] | 75 | } |
| 76 | ~TestAllocator() { this->unmapTestOnly(); } |
| 77 | }; |
| 78 | |
Vitaly Buka | b53c650 | 2021-04-01 17:31:52 -0700 | [diff] [blame] | 79 | namespace testing { |
| 80 | namespace internal { |
| 81 | #define SCUDO_DEFINE_GTEST_TYPE_NAME(TYPE) \ |
| 82 | template <> std::string GetTypeName<scudo::TYPE>() { return #TYPE; } |
| 83 | SCUDO_DEFINE_GTEST_TYPE_NAME(AndroidSvelteConfig) |
| 84 | #if SCUDO_FUCHSIA |
| 85 | SCUDO_DEFINE_GTEST_TYPE_NAME(FuchsiaConfig) |
| 86 | #else |
| 87 | SCUDO_DEFINE_GTEST_TYPE_NAME(DefaultConfig) |
| 88 | SCUDO_DEFINE_GTEST_TYPE_NAME(AndroidConfig) |
| 89 | #endif |
| 90 | #undef SCUDO_DEFINE_GTEST_TYPE_NAME |
| 91 | } // namespace internal |
| 92 | } // namespace testing |
| 93 | |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 94 | template <class Config> struct ScudoCombinedTest : public ::testing::Test { |
| 95 | ScudoCombinedTest() { |
| 96 | UseQuarantine = std::is_same<Config, scudo::AndroidConfig>::value; |
| 97 | Allocator = std::make_unique<AllocatorT>(); |
| 98 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 99 | ~ScudoCombinedTest() { |
| 100 | Allocator->releaseToOS(); |
| 101 | UseQuarantine = true; |
| 102 | } |
| 103 | |
| 104 | void BasicTest(scudo::uptr SizeLogMin, scudo::uptr SizeLogMax); |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 105 | |
| 106 | using AllocatorT = TestAllocator<Config>; |
| 107 | std::unique_ptr<AllocatorT> Allocator; |
| 108 | }; |
Vitaly Buka | b53c650 | 2021-04-01 17:31:52 -0700 | [diff] [blame] | 109 | |
| 110 | using ScudoCombinedTestTypes = testing::Types<scudo::AndroidSvelteConfig, |
| 111 | #if SCUDO_FUCHSIA |
| 112 | scudo::FuchsiaConfig, |
| 113 | #else |
| 114 | scudo::DefaultConfig, |
| 115 | scudo::AndroidConfig |
| 116 | #endif |
| 117 | >; |
| 118 | TYPED_TEST_CASE(ScudoCombinedTest, ScudoCombinedTestTypes); |
| 119 | |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 120 | TYPED_TEST(ScudoCombinedTest, IsOwned) { |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 121 | auto *Allocator = this->Allocator.get(); |
Kostya Kortchinsky | 241449c | 2020-09-23 10:22:15 -0700 | [diff] [blame] | 122 | static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1]; |
| 123 | EXPECT_FALSE( |
| 124 | Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()])); |
| 125 | |
| 126 | scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1]; |
| 127 | for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++) |
| 128 | StackBuffer[I] = 0x42U; |
| 129 | EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()])); |
| 130 | for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++) |
| 131 | EXPECT_EQ(StackBuffer[I], 0x42U); |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 132 | } |
Dynamic Tools Team | c910695 | 2019-12-13 09:43:51 -0800 | [diff] [blame] | 133 | |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 134 | template <class Config> |
| 135 | void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLogMin, |
| 136 | scudo::uptr SizeLogMax) { |
| 137 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 138 | |
| 139 | // This allocates and deallocates a bunch of chunks, with a wide range of |
| 140 | // sizes and alignments, with a focus on sizes that could trigger weird |
| 141 | // behaviors (plus or minus a small delta of a power of two for example). |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 142 | for (scudo::uptr SizeLog = SizeLogMin; SizeLog <= SizeLogMax; SizeLog++) { |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 143 | for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) { |
| 144 | const scudo::uptr Align = 1U << AlignLog; |
| 145 | for (scudo::sptr Delta = -32; Delta <= 32; Delta++) { |
| 146 | if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0) |
| 147 | continue; |
| 148 | const scudo::uptr Size = (1U << SizeLog) + Delta; |
| 149 | void *P = Allocator->allocate(Size, Origin, Align); |
| 150 | EXPECT_NE(P, nullptr); |
Dynamic Tools Team | c910695 | 2019-12-13 09:43:51 -0800 | [diff] [blame] | 151 | EXPECT_TRUE(Allocator->isOwned(P)); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 152 | EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align)); |
| 153 | EXPECT_LE(Size, Allocator->getUsableSize(P)); |
| 154 | memset(P, 0xaa, Size); |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 155 | checkMemoryTaggingMaybe(Allocator, P, Size, Align); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 156 | Allocator->deallocate(P, Origin, Size); |
| 157 | } |
| 158 | } |
| 159 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 160 | } |
| 161 | |
| 162 | TYPED_TEST(ScudoCombinedTest, BasicCombined0) { this->BasicTest(0, 16); } |
| 163 | TYPED_TEST(ScudoCombinedTest, BasicCombined1) { this->BasicTest(17, 18); } |
| 164 | TYPED_TEST(ScudoCombinedTest, BasicCombined2) { this->BasicTest(19, 19); } |
| 165 | TYPED_TEST(ScudoCombinedTest, BasicCombined3) { this->BasicTest(20, 20); } |
| 166 | |
| 167 | TYPED_TEST(ScudoCombinedTest, ZeroContents) { |
| 168 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 169 | |
Dynamic Tools Team | d29271f | 2019-10-31 10:31:49 -0700 | [diff] [blame] | 170 | // Ensure that specifying ZeroContents returns a zero'd out block. |
| 171 | for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { |
| 172 | for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { |
| 173 | const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; |
| 174 | void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true); |
| 175 | EXPECT_NE(P, nullptr); |
| 176 | for (scudo::uptr I = 0; I < Size; I++) |
Dynamic Tools Team | 90d0d18 | 2020-04-29 14:39:23 -0700 | [diff] [blame] | 177 | ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0); |
| 178 | memset(P, 0xaa, Size); |
| 179 | Allocator->deallocate(P, Origin, Size); |
| 180 | } |
| 181 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 182 | } |
| 183 | |
| 184 | TYPED_TEST(ScudoCombinedTest, ZeroFill) { |
| 185 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 90d0d18 | 2020-04-29 14:39:23 -0700 | [diff] [blame] | 186 | |
| 187 | // Ensure that specifying ZeroContents returns a zero'd out block. |
| 188 | Allocator->setFillContents(scudo::ZeroFill); |
| 189 | for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { |
| 190 | for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { |
| 191 | const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; |
| 192 | void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false); |
| 193 | EXPECT_NE(P, nullptr); |
| 194 | for (scudo::uptr I = 0; I < Size; I++) |
| 195 | ASSERT_EQ((reinterpret_cast<char *>(P))[I], 0); |
| 196 | memset(P, 0xaa, Size); |
| 197 | Allocator->deallocate(P, Origin, Size); |
| 198 | } |
| 199 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 200 | } |
| 201 | |
| 202 | TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) { |
| 203 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 90d0d18 | 2020-04-29 14:39:23 -0700 | [diff] [blame] | 204 | |
Peter Collingbourne | 882b85a | 2020-12-16 11:24:30 -0800 | [diff] [blame] | 205 | // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled |
| 206 | // block. The primary allocator only produces pattern filled blocks if MTE |
| 207 | // is disabled, so we only require pattern filled blocks in that case. |
Dynamic Tools Team | 90d0d18 | 2020-04-29 14:39:23 -0700 | [diff] [blame] | 208 | Allocator->setFillContents(scudo::PatternOrZeroFill); |
| 209 | for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { |
| 210 | for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { |
| 211 | const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; |
| 212 | void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false); |
| 213 | EXPECT_NE(P, nullptr); |
| 214 | for (scudo::uptr I = 0; I < Size; I++) { |
| 215 | unsigned char V = (reinterpret_cast<unsigned char *>(P))[I]; |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 216 | if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size, |
| 217 | 1U << MinAlignLog) && |
Peter Collingbourne | 4dd2057 | 2020-12-22 11:48:53 -0800 | [diff] [blame] | 218 | !Allocator->useMemoryTaggingTestOnly()) |
Dynamic Tools Team | 90d0d18 | 2020-04-29 14:39:23 -0700 | [diff] [blame] | 219 | ASSERT_EQ(V, scudo::PatternFillByte); |
| 220 | else |
| 221 | ASSERT_TRUE(V == scudo::PatternFillByte || V == 0); |
| 222 | } |
Dynamic Tools Team | d29271f | 2019-10-31 10:31:49 -0700 | [diff] [blame] | 223 | memset(P, 0xaa, Size); |
| 224 | Allocator->deallocate(P, Origin, Size); |
| 225 | } |
| 226 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 227 | } |
| 228 | |
| 229 | TYPED_TEST(ScudoCombinedTest, BlockReuse) { |
| 230 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | d29271f | 2019-10-31 10:31:49 -0700 | [diff] [blame] | 231 | |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 232 | // Verify that a chunk will end up being reused, at some point. |
| 233 | const scudo::uptr NeedleSize = 1024U; |
| 234 | void *NeedleP = Allocator->allocate(NeedleSize, Origin); |
| 235 | Allocator->deallocate(NeedleP, Origin); |
| 236 | bool Found = false; |
| 237 | for (scudo::uptr I = 0; I < 1024U && !Found; I++) { |
| 238 | void *P = Allocator->allocate(NeedleSize, Origin); |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 239 | if (Allocator->getHeaderTaggedPointer(P) == |
| 240 | Allocator->getHeaderTaggedPointer(NeedleP)) |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 241 | Found = true; |
| 242 | Allocator->deallocate(P, Origin); |
| 243 | } |
| 244 | EXPECT_TRUE(Found); |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 245 | } |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 246 | |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 247 | TYPED_TEST(ScudoCombinedTest, ReallocateLarge) { |
| 248 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 249 | |
| 250 | // Reallocate a large chunk all the way down to a byte, verifying that we |
| 251 | // preserve the data in the process. |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 252 | scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 253 | const scudo::uptr DataSize = 2048U; |
| 254 | void *P = Allocator->allocate(Size, Origin); |
| 255 | const char Marker = 0xab; |
| 256 | memset(P, Marker, scudo::Min(Size, DataSize)); |
| 257 | while (Size > 1U) { |
| 258 | Size /= 2U; |
| 259 | void *NewP = Allocator->reallocate(P, Size); |
| 260 | EXPECT_NE(NewP, nullptr); |
| 261 | for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++) |
| 262 | EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker); |
| 263 | P = NewP; |
| 264 | } |
| 265 | Allocator->deallocate(P, Origin); |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 266 | } |
| 267 | |
| 268 | TYPED_TEST(ScudoCombinedTest, ReallocateSame) { |
| 269 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 270 | |
| 271 | // Check that reallocating a chunk to a slightly smaller or larger size |
| 272 | // returns the same chunk. This requires that all the sizes we iterate on use |
Dynamic Tools Team | 130bfdb | 2020-02-10 15:53:07 -0800 | [diff] [blame] | 273 | // the same block size, but that should be the case for MaxSize - 64 with our |
| 274 | // default class size maps. |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 275 | constexpr scudo::uptr ReallocSize = |
| 276 | TypeParam::Primary::SizeClassMap::MaxSize - 64; |
| 277 | void *P = Allocator->allocate(ReallocSize, Origin); |
| 278 | const char Marker = 0xab; |
Dynamic Tools Team | 130bfdb | 2020-02-10 15:53:07 -0800 | [diff] [blame] | 279 | memset(P, Marker, ReallocSize); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 280 | for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) { |
Dynamic Tools Team | 130bfdb | 2020-02-10 15:53:07 -0800 | [diff] [blame] | 281 | const scudo::uptr NewSize = ReallocSize + Delta; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 282 | void *NewP = Allocator->reallocate(P, NewSize); |
| 283 | EXPECT_EQ(NewP, P); |
Dynamic Tools Team | 130bfdb | 2020-02-10 15:53:07 -0800 | [diff] [blame] | 284 | for (scudo::uptr I = 0; I < ReallocSize - 32; I++) |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 285 | EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker); |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 286 | checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 287 | } |
| 288 | Allocator->deallocate(P, Origin); |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 289 | } |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 290 | |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 291 | TYPED_TEST(ScudoCombinedTest, IterateOverChunks) { |
| 292 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 293 | // Allocates a bunch of chunks, then iterate over all the chunks, ensuring |
| 294 | // they are the ones we allocated. This requires the allocator to not have any |
| 295 | // other allocated chunk at this point (eg: won't work with the Quarantine). |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 296 | std::vector<void *> V; |
| 297 | for (scudo::uptr I = 0; I < 64U; I++) |
| 298 | V.push_back(Allocator->allocate( |
| 299 | rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin)); |
| 300 | Allocator->disable(); |
| 301 | Allocator->iterateOverChunks( |
| 302 | 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1), |
| 303 | [](uintptr_t Base, size_t Size, void *Arg) { |
| 304 | std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg); |
| 305 | void *P = reinterpret_cast<void *>(Base); |
| 306 | EXPECT_NE(std::find(V->begin(), V->end(), P), V->end()); |
| 307 | }, |
| 308 | reinterpret_cast<void *>(&V)); |
| 309 | Allocator->enable(); |
| 310 | for (auto P : V) |
| 311 | Allocator->deallocate(P, Origin); |
| 312 | } |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 313 | |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 314 | TYPED_TEST(ScudoCombinedTest, UseAfterFree) { |
| 315 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 3e8c65b | 2019-10-18 20:00:32 +0000 | [diff] [blame] | 316 | |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 317 | // Check that use-after-free is detected. |
| 318 | for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { |
| 319 | const scudo::uptr Size = 1U << SizeLog; |
| 320 | if (!Allocator->useMemoryTaggingTestOnly()) |
| 321 | continue; |
| 322 | EXPECT_DEATH( |
| 323 | { |
| 324 | disableDebuggerdMaybe(); |
| 325 | void *P = Allocator->allocate(Size, Origin); |
| 326 | Allocator->deallocate(P, Origin); |
| 327 | reinterpret_cast<char *>(P)[0] = 0xaa; |
| 328 | }, |
| 329 | ""); |
| 330 | EXPECT_DEATH( |
| 331 | { |
| 332 | disableDebuggerdMaybe(); |
| 333 | void *P = Allocator->allocate(Size, Origin); |
| 334 | Allocator->deallocate(P, Origin); |
| 335 | reinterpret_cast<char *>(P)[Size - 1] = 0xaa; |
| 336 | }, |
| 337 | ""); |
| 338 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 339 | } |
| 340 | |
| 341 | TYPED_TEST(ScudoCombinedTest, DisableMemoryTagging) { |
| 342 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 343 | |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 344 | if (Allocator->useMemoryTaggingTestOnly()) { |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 345 | // Check that disabling memory tagging works correctly. |
| 346 | void *P = Allocator->allocate(2048, Origin); |
| 347 | EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 0xaa, ""); |
| 348 | scudo::disableMemoryTagChecksTestOnly(); |
| 349 | Allocator->disableMemoryTagging(); |
| 350 | reinterpret_cast<char *>(P)[2048] = 0xaa; |
| 351 | Allocator->deallocate(P, Origin); |
| 352 | |
| 353 | P = Allocator->allocate(2048, Origin); |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 354 | EXPECT_EQ(scudo::untagPointer(P), P); |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 355 | reinterpret_cast<char *>(P)[2048] = 0xaa; |
| 356 | Allocator->deallocate(P, Origin); |
| 357 | |
| 358 | Allocator->releaseToOS(); |
| 359 | |
| 360 | // Disabling memory tag checks may interfere with subsequent tests. |
| 361 | // Re-enable them now. |
| 362 | scudo::enableMemoryTagChecksTestOnly(); |
| 363 | } |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 364 | } |
| 365 | |
| 366 | TYPED_TEST(ScudoCombinedTest, Stats) { |
| 367 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 48429c7 | 2019-12-04 17:46:15 -0800 | [diff] [blame] | 368 | |
Dynamic Tools Team | 3e8c65b | 2019-10-18 20:00:32 +0000 | [diff] [blame] | 369 | scudo::uptr BufferSize = 8192; |
| 370 | std::vector<char> Buffer(BufferSize); |
| 371 | scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize); |
| 372 | while (ActualSize > BufferSize) { |
| 373 | BufferSize = ActualSize + 1024; |
| 374 | Buffer.resize(BufferSize); |
| 375 | ActualSize = Allocator->getStats(Buffer.data(), BufferSize); |
| 376 | } |
| 377 | std::string Stats(Buffer.begin(), Buffer.end()); |
| 378 | // Basic checks on the contents of the statistics output, which also allows us |
| 379 | // to verify that we got it all. |
| 380 | EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos); |
| 381 | EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos); |
| 382 | EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos); |
Vitaly Buka | 4f264f3 | 2021-04-01 19:19:38 -0700 | [diff] [blame^] | 383 | } |
| 384 | |
| 385 | TYPED_TEST(ScudoCombinedTest, CacheDrain) { |
| 386 | auto *Allocator = this->Allocator.get(); |
| 387 | |
| 388 | std::vector<void *> V; |
| 389 | for (scudo::uptr I = 0; I < 64U; I++) |
| 390 | V.push_back(Allocator->allocate( |
| 391 | rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin)); |
| 392 | for (auto P : V) |
| 393 | Allocator->deallocate(P, Origin); |
Vitaly Buka | ff35093 | 2021-04-01 12:44:31 -0700 | [diff] [blame] | 394 | |
| 395 | bool UnlockRequired; |
| 396 | auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired); |
| 397 | EXPECT_TRUE(!TSD->Cache.isEmpty()); |
| 398 | TSD->Cache.drain(); |
| 399 | EXPECT_TRUE(TSD->Cache.isEmpty()); |
| 400 | if (UnlockRequired) |
| 401 | TSD->unlock(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 402 | } |
| 403 | |
Vitaly Buka | b53c650 | 2021-04-01 17:31:52 -0700 | [diff] [blame] | 404 | TYPED_TEST(ScudoCombinedTest, ThreadedCombined) { |
Vitaly Buka | 19bc017 | 2021-04-01 18:33:49 -0700 | [diff] [blame] | 405 | std::mutex Mutex; |
| 406 | std::condition_variable Cv; |
| 407 | bool Ready = false; |
Vitaly Buka | 8f1fa68 | 2021-04-01 18:44:55 -0700 | [diff] [blame] | 408 | auto *Allocator = this->Allocator.get(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 409 | std::thread Threads[32]; |
| 410 | for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) |
Vitaly Buka | 19bc017 | 2021-04-01 18:33:49 -0700 | [diff] [blame] | 411 | Threads[I] = std::thread([&]() { |
| 412 | { |
| 413 | std::unique_lock<std::mutex> Lock(Mutex); |
| 414 | while (!Ready) |
| 415 | Cv.wait(Lock); |
| 416 | } |
| 417 | std::vector<std::pair<void *, scudo::uptr>> V; |
| 418 | for (scudo::uptr I = 0; I < 256U; I++) { |
| 419 | const scudo::uptr Size = std::rand() % 4096U; |
| 420 | void *P = Allocator->allocate(Size, Origin); |
| 421 | // A region could have ran out of memory, resulting in a null P. |
| 422 | if (P) |
| 423 | V.push_back(std::make_pair(P, Size)); |
| 424 | } |
| 425 | while (!V.empty()) { |
| 426 | auto Pair = V.back(); |
| 427 | Allocator->deallocate(Pair.first, Origin, Pair.second); |
| 428 | V.pop_back(); |
| 429 | } |
| 430 | }); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 431 | { |
| 432 | std::unique_lock<std::mutex> Lock(Mutex); |
| 433 | Ready = true; |
| 434 | Cv.notify_all(); |
| 435 | } |
| 436 | for (auto &T : Threads) |
| 437 | T.join(); |
| 438 | Allocator->releaseToOS(); |
| 439 | } |
| 440 | |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 441 | #if SCUDO_FUCHSIA |
Vitaly Buka | b53c650 | 2021-04-01 17:31:52 -0700 | [diff] [blame] | 442 | #define SKIP_ON_FUCHSIA(T) DISABLED_##T |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 443 | #else |
Vitaly Buka | b53c650 | 2021-04-01 17:31:52 -0700 | [diff] [blame] | 444 | #define SKIP_ON_FUCHSIA(T) T |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 445 | #endif |
Vitaly Buka | b53c650 | 2021-04-01 17:31:52 -0700 | [diff] [blame] | 446 | |
| 447 | // Test that multiple instantiations of the allocator have not messed up the |
| 448 | // process's signal handlers (GWP-ASan used to do this). |
| 449 | TEST(ScudoCombinedTest, SKIP_ON_FUCHSIA(testSEGV)) { |
| 450 | const scudo::uptr Size = 4 * scudo::getPageSizeCached(); |
| 451 | scudo::MapPlatformData Data = {}; |
| 452 | void *P = scudo::map(nullptr, Size, "testSEGV", MAP_NOACCESS, &Data); |
| 453 | EXPECT_NE(P, nullptr); |
| 454 | EXPECT_DEATH(memset(P, 0xaa, Size), ""); |
| 455 | scudo::unmap(P, Size, UNMAP_ALL, &Data); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 456 | } |
| 457 | |
Dynamic Tools Team | 130bfdb | 2020-02-10 15:53:07 -0800 | [diff] [blame] | 458 | struct DeathSizeClassConfig { |
| 459 | static const scudo::uptr NumBits = 1; |
| 460 | static const scudo::uptr MinSizeLog = 10; |
| 461 | static const scudo::uptr MidSizeLog = 10; |
Christopher Ferris | 6fbf5f1 | 2020-06-18 11:18:16 -0700 | [diff] [blame] | 462 | static const scudo::uptr MaxSizeLog = 13; |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 463 | static const scudo::u32 MaxNumCachedHint = 4; |
| 464 | static const scudo::uptr MaxBytesCachedLog = 12; |
Dynamic Tools Team | 130bfdb | 2020-02-10 15:53:07 -0800 | [diff] [blame] | 465 | }; |
| 466 | |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 467 | static const scudo::uptr DeathRegionSizeLog = 20U; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 468 | struct DeathConfig { |
Peter Collingbourne | 6be4919 | 2020-12-15 14:26:10 -0800 | [diff] [blame] | 469 | static const bool MaySupportMemoryTagging = false; |
| 470 | |
Christopher Ferris | 6fbf5f1 | 2020-06-18 11:18:16 -0700 | [diff] [blame] | 471 | // Tiny allocator, its Primary only serves chunks of four sizes. |
Peter Collingbourne | 6be4919 | 2020-12-15 14:26:10 -0800 | [diff] [blame] | 472 | using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>; |
| 473 | typedef scudo::SizeClassAllocator64<DeathConfig> Primary; |
| 474 | static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog; |
| 475 | static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN; |
| 476 | static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX; |
Kostya Kortchinsky | c936954 | 2021-02-10 10:17:18 -0800 | [diff] [blame] | 477 | typedef scudo::uptr PrimaryCompactPtrT; |
| 478 | static const scudo::uptr PrimaryCompactPtrScale = 0; |
Peter Collingbourne | 6be4919 | 2020-12-15 14:26:10 -0800 | [diff] [blame] | 479 | |
Peter Collingbourne | 7488a17 | 2020-12-14 13:57:59 -0800 | [diff] [blame] | 480 | typedef scudo::MapAllocatorNoCache SecondaryCache; |
Kostya Kortchinsky | c72ca56 | 2020-07-27 09:13:42 -0700 | [diff] [blame] | 481 | template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 482 | }; |
| 483 | |
| 484 | TEST(ScudoCombinedTest, DeathCombined) { |
Peter Collingbourne | d2aa037 | 2020-07-23 11:31:32 -0700 | [diff] [blame] | 485 | using AllocatorT = TestAllocator<DeathConfig>; |
Hans Wennborg | 0611b7d | 2020-07-28 12:26:37 +0200 | [diff] [blame] | 486 | auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 487 | |
| 488 | const scudo::uptr Size = 1000U; |
| 489 | void *P = Allocator->allocate(Size, Origin); |
| 490 | EXPECT_NE(P, nullptr); |
| 491 | |
| 492 | // Invalid sized deallocation. |
| 493 | EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), ""); |
| 494 | |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 495 | // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available. |
| 496 | UNUSED void *MisalignedP = |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 497 | reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U); |
| 498 | EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), ""); |
| 499 | EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), ""); |
| 500 | |
| 501 | // Header corruption. |
| 502 | scudo::u64 *H = |
| 503 | reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P)); |
| 504 | *H ^= 0x42U; |
| 505 | EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), ""); |
| 506 | *H ^= 0x420042U; |
| 507 | EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), ""); |
| 508 | *H ^= 0x420000U; |
| 509 | |
| 510 | // Invalid chunk state. |
| 511 | Allocator->deallocate(P, Origin, Size); |
| 512 | EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), ""); |
| 513 | EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), ""); |
| 514 | EXPECT_DEATH(Allocator->getUsableSize(P), ""); |
| 515 | } |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 516 | |
| 517 | // Ensure that releaseToOS can be called prior to any other allocator |
| 518 | // operation without issue. |
| 519 | TEST(ScudoCombinedTest, ReleaseToOS) { |
Peter Collingbourne | d2aa037 | 2020-07-23 11:31:32 -0700 | [diff] [blame] | 520 | using AllocatorT = TestAllocator<DeathConfig>; |
Hans Wennborg | 0611b7d | 2020-07-28 12:26:37 +0200 | [diff] [blame] | 521 | auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); |
Dynamic Tools Team | 09e6d48 | 2019-11-26 18:18:14 -0800 | [diff] [blame] | 522 | |
| 523 | Allocator->releaseToOS(); |
| 524 | } |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 525 | |
Dynamic Tools Team | 874b5c9 | 2020-03-19 07:55:13 -0700 | [diff] [blame] | 526 | // Verify that when a region gets full, the allocator will still manage to |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 527 | // fulfill the allocation through a larger size class. |
| 528 | TEST(ScudoCombinedTest, FullRegion) { |
Peter Collingbourne | d2aa037 | 2020-07-23 11:31:32 -0700 | [diff] [blame] | 529 | using AllocatorT = TestAllocator<DeathConfig>; |
Hans Wennborg | 0611b7d | 2020-07-28 12:26:37 +0200 | [diff] [blame] | 530 | auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 531 | |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 532 | std::vector<void *> V; |
| 533 | scudo::uptr FailedAllocationsCount = 0; |
Dynamic Tools Team | 874b5c9 | 2020-03-19 07:55:13 -0700 | [diff] [blame] | 534 | for (scudo::uptr ClassId = 1U; |
Peter Collingbourne | 6be4919 | 2020-12-15 14:26:10 -0800 | [diff] [blame] | 535 | ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) { |
Dynamic Tools Team | 874b5c9 | 2020-03-19 07:55:13 -0700 | [diff] [blame] | 536 | const scudo::uptr Size = |
Peter Collingbourne | 6be4919 | 2020-12-15 14:26:10 -0800 | [diff] [blame] | 537 | DeathConfig::SizeClassMap::getSizeByClassId(ClassId); |
Christopher Ferris | 6fbf5f1 | 2020-06-18 11:18:16 -0700 | [diff] [blame] | 538 | // Allocate enough to fill all of the regions above this one. |
| 539 | const scudo::uptr MaxNumberOfChunks = |
| 540 | ((1U << DeathRegionSizeLog) / Size) * |
Peter Collingbourne | 6be4919 | 2020-12-15 14:26:10 -0800 | [diff] [blame] | 541 | (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1); |
Dynamic Tools Team | 874b5c9 | 2020-03-19 07:55:13 -0700 | [diff] [blame] | 542 | void *P; |
| 543 | for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) { |
| 544 | P = Allocator->allocate(Size - 64U, Origin); |
| 545 | if (!P) |
| 546 | FailedAllocationsCount++; |
| 547 | else |
| 548 | V.push_back(P); |
| 549 | } |
Christopher Ferris | 6fbf5f1 | 2020-06-18 11:18:16 -0700 | [diff] [blame] | 550 | while (!V.empty()) { |
| 551 | Allocator->deallocate(V.back(), Origin); |
| 552 | V.pop_back(); |
| 553 | } |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 554 | } |
Dynamic Tools Team | 874b5c9 | 2020-03-19 07:55:13 -0700 | [diff] [blame] | 555 | EXPECT_EQ(FailedAllocationsCount, 0U); |
Dynamic Tools Team | a98a996 | 2020-02-13 09:27:18 -0800 | [diff] [blame] | 556 | } |
Peter Collingbourne | b232c85 | 2020-07-22 13:45:14 -0700 | [diff] [blame] | 557 | |
| 558 | TEST(ScudoCombinedTest, OddEven) { |
| 559 | using AllocatorT = TestAllocator<scudo::AndroidConfig>; |
| 560 | using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap; |
Hans Wennborg | 0611b7d | 2020-07-28 12:26:37 +0200 | [diff] [blame] | 561 | auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); |
Peter Collingbourne | b232c85 | 2020-07-22 13:45:14 -0700 | [diff] [blame] | 562 | |
Peter Collingbourne | 4dd2057 | 2020-12-22 11:48:53 -0800 | [diff] [blame] | 563 | if (!Allocator->useMemoryTaggingTestOnly()) |
Peter Collingbourne | b232c85 | 2020-07-22 13:45:14 -0700 | [diff] [blame] | 564 | return; |
| 565 | |
| 566 | auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) { |
| 567 | scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1)); |
| 568 | scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2)); |
| 569 | EXPECT_NE(Tag1 % 2, Tag2 % 2); |
| 570 | }; |
| 571 | |
| 572 | for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId; |
| 573 | ClassId++) { |
| 574 | const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId); |
| 575 | |
| 576 | std::set<scudo::uptr> Ptrs; |
| 577 | bool Found = false; |
| 578 | for (unsigned I = 0; I != 65536; ++I) { |
| 579 | scudo::uptr P = scudo::untagPointer(reinterpret_cast<scudo::uptr>( |
| 580 | Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin))); |
| 581 | if (Ptrs.count(P - Size)) { |
| 582 | Found = true; |
| 583 | CheckOddEven(P, P - Size); |
| 584 | break; |
| 585 | } |
| 586 | if (Ptrs.count(P + Size)) { |
| 587 | Found = true; |
| 588 | CheckOddEven(P, P + Size); |
| 589 | break; |
| 590 | } |
| 591 | Ptrs.insert(P); |
| 592 | } |
| 593 | EXPECT_TRUE(Found); |
| 594 | } |
| 595 | } |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 596 | |
| 597 | TEST(ScudoCombinedTest, DisableMemInit) { |
| 598 | using AllocatorT = TestAllocator<scudo::AndroidConfig>; |
| 599 | using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap; |
| 600 | auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); |
| 601 | |
| 602 | std::vector<void *> Ptrs(65536, nullptr); |
| 603 | |
| 604 | Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1); |
| 605 | |
| 606 | constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U); |
| 607 | |
| 608 | // Test that if mem-init is disabled on a thread, calloc should still work as |
| 609 | // expected. This is tricky to ensure when MTE is enabled, so this test tries |
| 610 | // to exercise the relevant code on our MTE path. |
| 611 | for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) { |
| 612 | const scudo::uptr Size = |
| 613 | SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize(); |
| 614 | if (Size < 8) |
| 615 | continue; |
| 616 | for (unsigned I = 0; I != Ptrs.size(); ++I) { |
| 617 | Ptrs[I] = Allocator->allocate(Size, Origin); |
| 618 | memset(Ptrs[I], 0xaa, Size); |
| 619 | } |
| 620 | for (unsigned I = 0; I != Ptrs.size(); ++I) |
| 621 | Allocator->deallocate(Ptrs[I], Origin, Size); |
| 622 | for (unsigned I = 0; I != Ptrs.size(); ++I) { |
| 623 | Ptrs[I] = Allocator->allocate(Size - 8, Origin); |
| 624 | memset(Ptrs[I], 0xbb, Size - 8); |
| 625 | } |
| 626 | for (unsigned I = 0; I != Ptrs.size(); ++I) |
| 627 | Allocator->deallocate(Ptrs[I], Origin, Size - 8); |
| 628 | for (unsigned I = 0; I != Ptrs.size(); ++I) { |
| 629 | Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true); |
| 630 | for (scudo::uptr J = 0; J < Size; ++J) |
| 631 | ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], 0); |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0); |
| 636 | } |