blob: 7ba334568146ab156d1ce19c42ea3c7d49b70676 [file] [log] [blame]
Dmitry Vyukov225f5312012-06-25 15:09:24 +00001//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Dmitry Vyukova3eca812012-06-29 17:32:18 +000010// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
Kostya Serebryany72166ca2012-12-05 10:09:15 +000011// Tests for sanitizer_allocator.h.
Dmitry Vyukov225f5312012-06-25 15:09:24 +000012//
13//===----------------------------------------------------------------------===//
Kostya Serebryany72166ca2012-12-05 10:09:15 +000014#include "sanitizer_common/sanitizer_allocator.h"
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000015#include "sanitizer_common/sanitizer_allocator_internal.h"
Dmitry Vyukov225f5312012-06-25 15:09:24 +000016#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov225f5312012-06-25 15:09:24 +000017
Evgeniy Stepanov48ddbef2013-01-14 15:12:26 +000018#include "sanitizer_test_utils.h"
Stephen Hines2d1fdb22014-05-28 23:58:16 -070019#include "sanitizer_pthread_wrappers.h"
Evgeniy Stepanov48ddbef2013-01-14 15:12:26 +000020
Kostya Serebryany72166ca2012-12-05 10:09:15 +000021#include "gtest/gtest.h"
22
23#include <stdlib.h>
24#include <algorithm>
25#include <vector>
Kostya Serebryany300f9532013-03-15 11:39:41 +000026#include <set>
Kostya Serebryany72166ca2012-12-05 10:09:15 +000027
Dmitry Vyukov68902f42012-12-14 10:17:22 +000028// Too slow for debug build
Stephen Hines86277eb2015-03-23 12:06:32 -070029#if !SANITIZER_DEBUG
Dmitry Vyukov68902f42012-12-14 10:17:22 +000030
Stephen Hines86277eb2015-03-23 12:06:32 -070031#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +000032static const uptr kAllocatorSpace = 0x700000000000ULL;
33static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
Kostya Serebryany45595ba2012-12-06 12:49:28 +000034static const u64 kAddressSpaceSize = 1ULL << 47;
Kostya Serebryany72166ca2012-12-05 10:09:15 +000035
36typedef SizeClassAllocator64<
37 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
38
39typedef SizeClassAllocator64<
40 kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
Stephen Hines86277eb2015-03-23 12:06:32 -070041#elif defined(__mips64)
42static const u64 kAddressSpaceSize = 1ULL << 40;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080043#elif defined(__aarch64__)
44static const u64 kAddressSpaceSize = 1ULL << 39;
Kostya Serebryany45595ba2012-12-06 12:49:28 +000045#else
46static const u64 kAddressSpaceSize = 1ULL << 32;
Kostya Serebryany72166ca2012-12-05 10:09:15 +000047#endif
48
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +000049static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
50static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
51
Kostya Serebryany45595ba2012-12-06 12:49:28 +000052typedef SizeClassAllocator32<
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +000053 0, kAddressSpaceSize,
54 /*kMetadataSize*/16,
55 CompactSizeClassMap,
56 kRegionSizeLog,
57 FlatByteMap<kFlatByteMapSize> >
58 Allocator32Compact;
Kostya Serebryany45595ba2012-12-06 12:49:28 +000059
Kostya Serebryany72166ca2012-12-05 10:09:15 +000060template <class SizeClassMap>
61void TestSizeClassMap() {
62 typedef SizeClassMap SCMap;
Kostya Serebryanybb5d0572012-12-25 07:50:35 +000063 // SCMap::Print();
Kostya Serebryany038820f2012-12-24 13:41:07 +000064 SCMap::Validate();
Kostya Serebryany72166ca2012-12-05 10:09:15 +000065}
66
67TEST(SanitizerCommon, DefaultSizeClassMap) {
68 TestSizeClassMap<DefaultSizeClassMap>();
69}
70
71TEST(SanitizerCommon, CompactSizeClassMap) {
72 TestSizeClassMap<CompactSizeClassMap>();
73}
74
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000075TEST(SanitizerCommon, InternalSizeClassMap) {
76 TestSizeClassMap<InternalSizeClassMap>();
77}
78
Kostya Serebryany72166ca2012-12-05 10:09:15 +000079template <class Allocator>
80void TestSizeClassAllocator() {
Kostya Serebryany45595ba2012-12-06 12:49:28 +000081 Allocator *a = new Allocator;
82 a->Init();
Dmitry Vyukova343ca02013-01-11 16:41:19 +000083 SizeClassAllocatorLocalCache<Allocator> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000084 memset(&cache, 0, sizeof(cache));
85 cache.Init(0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +000086
87 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +000088 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
Kostya Serebryany72166ca2012-12-05 10:09:15 +000089
90 std::vector<void *> allocated;
91
92 uptr last_total_allocated = 0;
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +000093 for (int i = 0; i < 3; i++) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +000094 // Allocate a bunch of chunks.
Kostya Serebryany45595ba2012-12-06 12:49:28 +000095 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +000096 uptr size = sizes[s];
Kostya Serebryany45595ba2012-12-06 12:49:28 +000097 if (!a->CanAllocate(size, 1)) continue;
Kostya Serebryany72166ca2012-12-05 10:09:15 +000098 // printf("s = %ld\n", size);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080099 uptr n_iter = std::max((uptr)6, 4000000 / size);
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +0000100 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000101 for (uptr i = 0; i < n_iter; i++) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000102 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
103 char *x = (char*)cache.Allocate(a, class_id0);
Kostya Serebryanyd9b74042012-12-15 18:36:23 +0000104 x[0] = 0;
105 x[size - 1] = 0;
106 x[size / 2] = 0;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000107 allocated.push_back(x);
Kostya Serebryany8a41bdc2012-12-06 13:13:58 +0000108 CHECK_EQ(x, a->GetBlockBegin(x));
Kostya Serebryanyd9b74042012-12-15 18:36:23 +0000109 CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000110 CHECK(a->PointerIsMine(x));
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000111 CHECK(a->PointerIsMine(x + size - 1));
112 CHECK(a->PointerIsMine(x + size / 2));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000113 CHECK_GE(a->GetActuallyAllocatedSize(x), size);
114 uptr class_id = a->GetSizeClass(x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000115 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000116 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000117 metadata[0] = reinterpret_cast<uptr>(x) + 1;
118 metadata[1] = 0xABCD;
119 }
120 }
121 // Deallocate all.
122 for (uptr i = 0; i < allocated.size(); i++) {
123 void *x = allocated[i];
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000124 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000125 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
126 CHECK_EQ(metadata[1], 0xABCD);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000127 cache.Deallocate(a, a->GetSizeClass(x), x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000128 }
129 allocated.clear();
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000130 uptr total_allocated = a->TotalMemoryUsed();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000131 if (last_total_allocated == 0)
132 last_total_allocated = total_allocated;
133 CHECK_EQ(last_total_allocated, total_allocated);
134 }
135
Kostya Serebryanyaa0f20d2013-03-11 09:43:12 +0000136 // Check that GetBlockBegin never crashes.
137 for (uptr x = 0, step = kAddressSpaceSize / 100000;
138 x < kAddressSpaceSize - step; x += step)
139 if (a->PointerIsMine(reinterpret_cast<void *>(x)))
140 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
141
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000142 a->TestOnlyUnmap();
143 delete a;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000144}
145
Stephen Hines86277eb2015-03-23 12:06:32 -0700146#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000147TEST(SanitizerCommon, SizeClassAllocator64) {
148 TestSizeClassAllocator<Allocator64>();
149}
150
151TEST(SanitizerCommon, SizeClassAllocator64Compact) {
152 TestSizeClassAllocator<Allocator64Compact>();
153}
154#endif
155
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000156TEST(SanitizerCommon, SizeClassAllocator32Compact) {
157 TestSizeClassAllocator<Allocator32Compact>();
158}
159
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000160template <class Allocator>
Kostya Serebryany784935d2012-12-06 13:00:11 +0000161void SizeClassAllocatorMetadataStress() {
162 Allocator *a = new Allocator;
163 a->Init();
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000164 SizeClassAllocatorLocalCache<Allocator> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000165 memset(&cache, 0, sizeof(cache));
166 cache.Init(0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000167
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000168 const uptr kNumAllocs = 1 << 13;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000169 void *allocated[kNumAllocs];
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000170 void *meta[kNumAllocs];
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000171 for (uptr i = 0; i < kNumAllocs; i++) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000172 void *x = cache.Allocate(a, 1 + i % 50);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000173 allocated[i] = x;
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000174 meta[i] = a->GetMetaData(x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000175 }
176 // Get Metadata kNumAllocs^2 times.
177 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000178 uptr idx = i % kNumAllocs;
179 void *m = a->GetMetaData(allocated[idx]);
180 EXPECT_EQ(m, meta[idx]);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000181 }
182 for (uptr i = 0; i < kNumAllocs; i++) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000183 cache.Deallocate(a, 1 + i % 50, allocated[i]);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000184 }
185
Kostya Serebryany784935d2012-12-06 13:00:11 +0000186 a->TestOnlyUnmap();
Kostya Serebryany784935d2012-12-06 13:00:11 +0000187 delete a;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000188}
189
Stephen Hines86277eb2015-03-23 12:06:32 -0700190#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000191TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
Kostya Serebryany784935d2012-12-06 13:00:11 +0000192 SizeClassAllocatorMetadataStress<Allocator64>();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000193}
194
195TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
Kostya Serebryany784935d2012-12-06 13:00:11 +0000196 SizeClassAllocatorMetadataStress<Allocator64Compact>();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000197}
Stephen Hines86277eb2015-03-23 12:06:32 -0700198#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany784935d2012-12-06 13:00:11 +0000199TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
200 SizeClassAllocatorMetadataStress<Allocator32Compact>();
201}
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000202
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000203template <class Allocator>
204void SizeClassAllocatorGetBlockBeginStress() {
205 Allocator *a = new Allocator;
206 a->Init();
207 SizeClassAllocatorLocalCache<Allocator> cache;
208 memset(&cache, 0, sizeof(cache));
209 cache.Init(0);
210
211 uptr max_size_class = Allocator::kNumClasses - 1;
212 uptr size = Allocator::SizeClassMapT::Size(max_size_class);
213 u64 G8 = 1ULL << 33;
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000214 // Make sure we correctly compute GetBlockBegin() w/o overflow.
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000215 for (size_t i = 0; i <= G8 / size; i++) {
216 void *x = cache.Allocate(a, max_size_class);
217 void *beg = a->GetBlockBegin(x);
Dmitry Vyukov374fd352013-05-17 11:54:37 +0000218 // if ((i & (i - 1)) == 0)
219 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000220 EXPECT_EQ(x, beg);
221 }
222
223 a->TestOnlyUnmap();
224 delete a;
225}
226
Stephen Hines86277eb2015-03-23 12:06:32 -0700227#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000228TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000229 SizeClassAllocatorGetBlockBeginStress<Allocator64>();
230}
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000231TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
232 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
233}
234TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
235 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
236}
Stephen Hines86277eb2015-03-23 12:06:32 -0700237#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000238
Kostya Serebryany214621f2012-12-12 14:32:18 +0000239struct TestMapUnmapCallback {
240 static int map_count, unmap_count;
241 void OnMap(uptr p, uptr size) const { map_count++; }
242 void OnUnmap(uptr p, uptr size) const { unmap_count++; }
243};
244int TestMapUnmapCallback::map_count;
245int TestMapUnmapCallback::unmap_count;
246
Stephen Hines86277eb2015-03-23 12:06:32 -0700247#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany214621f2012-12-12 14:32:18 +0000248TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
249 TestMapUnmapCallback::map_count = 0;
250 TestMapUnmapCallback::unmap_count = 0;
251 typedef SizeClassAllocator64<
252 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
253 TestMapUnmapCallback> Allocator64WithCallBack;
254 Allocator64WithCallBack *a = new Allocator64WithCallBack;
255 a->Init();
256 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000257 SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000258 memset(&cache, 0, sizeof(cache));
259 cache.Init(0);
260 AllocatorStats stats;
261 stats.Init();
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000262 a->AllocateBatch(&stats, &cache, 32);
Kostya Serebryany567ad072012-12-13 05:05:11 +0000263 EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
Kostya Serebryany214621f2012-12-12 14:32:18 +0000264 a->TestOnlyUnmap();
265 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
266 delete a;
267}
268#endif
269
270TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
271 TestMapUnmapCallback::map_count = 0;
272 TestMapUnmapCallback::unmap_count = 0;
273 typedef SizeClassAllocator32<
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000274 0, kAddressSpaceSize,
275 /*kMetadataSize*/16,
276 CompactSizeClassMap,
277 kRegionSizeLog,
278 FlatByteMap<kFlatByteMapSize>,
279 TestMapUnmapCallback>
280 Allocator32WithCallBack;
Kostya Serebryany214621f2012-12-12 14:32:18 +0000281 Allocator32WithCallBack *a = new Allocator32WithCallBack;
282 a->Init();
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000283 EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000284 SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000285 memset(&cache, 0, sizeof(cache));
286 cache.Init(0);
287 AllocatorStats stats;
288 stats.Init();
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000289 a->AllocateBatch(&stats, &cache, 32);
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000290 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000291 a->TestOnlyUnmap();
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000292 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000293 delete a;
Dmitry Vyukovca661f52012-12-14 10:12:14 +0000294 // fprintf(stderr, "Map: %d Unmap: %d\n",
295 // TestMapUnmapCallback::map_count,
296 // TestMapUnmapCallback::unmap_count);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000297}
298
299TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
300 TestMapUnmapCallback::map_count = 0;
301 TestMapUnmapCallback::unmap_count = 0;
302 LargeMmapAllocator<TestMapUnmapCallback> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700303 a.Init(/* may_return_null */ false);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000304 AllocatorStats stats;
305 stats.Init();
306 void *x = a.Allocate(&stats, 1 << 20, 1);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000307 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000308 a.Deallocate(&stats, x);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000309 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
310}
311
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000312template<class Allocator>
313void FailInAssertionOnOOM() {
314 Allocator a;
315 a.Init();
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000316 SizeClassAllocatorLocalCache<Allocator> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000317 memset(&cache, 0, sizeof(cache));
318 cache.Init(0);
319 AllocatorStats stats;
320 stats.Init();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000321 for (int i = 0; i < 1000000; i++) {
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000322 a.AllocateBatch(&stats, &cache, 52);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000323 }
324
325 a.TestOnlyUnmap();
326}
327
Stephen Hines86277eb2015-03-23 12:06:32 -0700328#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000329TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
330 EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
331}
332#endif
333
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700334#if !defined(_WIN32) // FIXME: This currently fails on Windows.
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000335TEST(SanitizerCommon, LargeMmapAllocator) {
Kostya Serebryany214621f2012-12-12 14:32:18 +0000336 LargeMmapAllocator<> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700337 a.Init(/* may_return_null */ false);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000338 AllocatorStats stats;
339 stats.Init();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000340
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000341 static const int kNumAllocs = 1000;
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000342 char *allocated[kNumAllocs];
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000343 static const uptr size = 4000;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000344 // Allocate some.
345 for (int i = 0; i < kNumAllocs; i++) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000346 allocated[i] = (char *)a.Allocate(&stats, size, 1);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000347 CHECK(a.PointerIsMine(allocated[i]));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000348 }
349 // Deallocate all.
350 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
351 for (int i = 0; i < kNumAllocs; i++) {
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000352 char *p = allocated[i];
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000353 CHECK(a.PointerIsMine(p));
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000354 a.Deallocate(&stats, p);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000355 }
356 // Check that non left.
357 CHECK_EQ(a.TotalMemoryUsed(), 0);
358
359 // Allocate some more, also add metadata.
360 for (int i = 0; i < kNumAllocs; i++) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000361 char *x = (char *)a.Allocate(&stats, size, 1);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000362 CHECK_GE(a.GetActuallyAllocatedSize(x), size);
363 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
364 *meta = i;
365 allocated[i] = x;
366 }
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000367 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
368 char *p = allocated[i % kNumAllocs];
369 CHECK(a.PointerIsMine(p));
370 CHECK(a.PointerIsMine(p + 2000));
371 }
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000372 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
373 // Deallocate all in reverse order.
374 for (int i = 0; i < kNumAllocs; i++) {
375 int idx = kNumAllocs - i - 1;
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000376 char *p = allocated[idx];
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000377 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
378 CHECK_EQ(*meta, idx);
379 CHECK(a.PointerIsMine(p));
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000380 a.Deallocate(&stats, p);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000381 }
382 CHECK_EQ(a.TotalMemoryUsed(), 0);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000383
384 // Test alignments.
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000385 uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
386 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000387 const uptr kNumAlignedAllocs = 100;
Kostya Serebryany68d3a1b2012-12-24 14:53:13 +0000388 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000389 uptr size = ((i % 10) + 1) * 4096;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000390 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000391 CHECK_EQ(p, a.GetBlockBegin(p));
392 CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
393 CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000394 CHECK_EQ(0, (uptr)allocated[i] % alignment);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000395 p[0] = p[size - 1] = 0;
396 }
Kostya Serebryany68d3a1b2012-12-24 14:53:13 +0000397 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000398 a.Deallocate(&stats, allocated[i]);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000399 }
400 }
Kostya Serebryanya2c1d982013-04-08 08:43:22 +0000401
402 // Regression test for boundary condition in GetBlockBegin().
403 uptr page_size = GetPageSizeCached();
404 char *p = (char *)a.Allocate(&stats, page_size, 1);
405 CHECK_EQ(p, a.GetBlockBegin(p));
406 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
407 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
408 a.Deallocate(&stats, p);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000409}
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700410#endif
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000411
412template
413<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
414void TestCombinedAllocator() {
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000415 typedef
416 CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
417 Allocator;
418 Allocator *a = new Allocator;
Stephen Hines86277eb2015-03-23 12:06:32 -0700419 a->Init(/* may_return_null */ true);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000420
421 AllocatorCache cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000422 memset(&cache, 0, sizeof(cache));
423 a->InitCache(&cache);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000424
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000425 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
426 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
427 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
428 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
429 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000430
Stephen Hines86277eb2015-03-23 12:06:32 -0700431 // Set to false
432 a->SetMayReturnNull(false);
Kostya Serebryany9150f392013-09-06 09:25:11 +0000433 EXPECT_DEATH(a->Allocate(&cache, -1, 1),
434 "allocator is terminating the process");
Kostya Serebryany9150f392013-09-06 09:25:11 +0000435
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000436 const uptr kNumAllocs = 100000;
437 const uptr kNumIter = 10;
438 for (uptr iter = 0; iter < kNumIter; iter++) {
439 std::vector<void*> allocated;
440 for (uptr i = 0; i < kNumAllocs; i++) {
441 uptr size = (i % (1 << 14)) + 1;
442 if ((i % 1024) == 0)
443 size = 1 << (10 + (i % 14));
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000444 void *x = a->Allocate(&cache, size, 1);
445 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000446 CHECK_EQ(*meta, 0);
447 *meta = size;
448 allocated.push_back(x);
449 }
450
451 random_shuffle(allocated.begin(), allocated.end());
452
453 for (uptr i = 0; i < kNumAllocs; i++) {
454 void *x = allocated[i];
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000455 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000456 CHECK_NE(*meta, 0);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000457 CHECK(a->PointerIsMine(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000458 *meta = 0;
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000459 a->Deallocate(&cache, x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000460 }
461 allocated.clear();
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000462 a->SwallowCache(&cache);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000463 }
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000464 a->DestroyCache(&cache);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000465 a->TestOnlyUnmap();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000466}
467
Stephen Hines86277eb2015-03-23 12:06:32 -0700468#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000469TEST(SanitizerCommon, CombinedAllocator64) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000470 TestCombinedAllocator<Allocator64,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000471 LargeMmapAllocator<>,
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000472 SizeClassAllocatorLocalCache<Allocator64> > ();
473}
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000474
475TEST(SanitizerCommon, CombinedAllocator64Compact) {
476 TestCombinedAllocator<Allocator64Compact,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000477 LargeMmapAllocator<>,
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000478 SizeClassAllocatorLocalCache<Allocator64Compact> > ();
479}
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000480#endif
481
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700482#if !defined(_WIN32) // FIXME: This currently fails on Windows.
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000483TEST(SanitizerCommon, CombinedAllocator32Compact) {
484 TestCombinedAllocator<Allocator32Compact,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000485 LargeMmapAllocator<>,
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000486 SizeClassAllocatorLocalCache<Allocator32Compact> > ();
487}
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700488#endif
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000489
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000490template <class AllocatorCache>
491void TestSizeClassAllocatorLocalCache() {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000492 AllocatorCache cache;
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000493 typedef typename AllocatorCache::Allocator Allocator;
494 Allocator *a = new Allocator();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000495
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000496 a->Init();
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000497 memset(&cache, 0, sizeof(cache));
498 cache.Init(0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000499
500 const uptr kNumAllocs = 10000;
501 const int kNumIter = 100;
502 uptr saved_total = 0;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000503 for (int class_id = 1; class_id <= 5; class_id++) {
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000504 for (int it = 0; it < kNumIter; it++) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000505 void *allocated[kNumAllocs];
506 for (uptr i = 0; i < kNumAllocs; i++) {
507 allocated[i] = cache.Allocate(a, class_id);
508 }
509 for (uptr i = 0; i < kNumAllocs; i++) {
510 cache.Deallocate(a, class_id, allocated[i]);
511 }
512 cache.Drain(a);
513 uptr total_allocated = a->TotalMemoryUsed();
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000514 if (it)
Kostya Serebryany038820f2012-12-24 13:41:07 +0000515 CHECK_EQ(saved_total, total_allocated);
516 saved_total = total_allocated;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000517 }
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000518 }
519
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000520 a->TestOnlyUnmap();
521 delete a;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000522}
523
Stephen Hines86277eb2015-03-23 12:06:32 -0700524#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000525TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
526 TestSizeClassAllocatorLocalCache<
527 SizeClassAllocatorLocalCache<Allocator64> >();
528}
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000529
530TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
531 TestSizeClassAllocatorLocalCache<
532 SizeClassAllocatorLocalCache<Allocator64Compact> >();
533}
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000534#endif
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000535
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000536TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
537 TestSizeClassAllocatorLocalCache<
538 SizeClassAllocatorLocalCache<Allocator32Compact> >();
539}
540
Stephen Hines86277eb2015-03-23 12:06:32 -0700541#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000542typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
Evgeniy Stepanov0d02d752013-01-14 14:06:58 +0000543static AllocatorCache static_allocator_cache;
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000544
545void *AllocatorLeakTestWorker(void *arg) {
546 typedef AllocatorCache::Allocator Allocator;
547 Allocator *a = (Allocator*)(arg);
548 static_allocator_cache.Allocate(a, 10);
549 static_allocator_cache.Drain(a);
550 return 0;
551}
552
553TEST(SanitizerCommon, AllocatorLeakTest) {
Alexey Samsonov68acb902012-12-14 15:37:35 +0000554 typedef AllocatorCache::Allocator Allocator;
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000555 Allocator a;
556 a.Init();
557 uptr total_used_memory = 0;
558 for (int i = 0; i < 100; i++) {
559 pthread_t t;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700560 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
561 PTHREAD_JOIN(t, 0);
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000562 if (i == 0)
563 total_used_memory = a.TotalMemoryUsed();
564 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
565 }
566
567 a.TestOnlyUnmap();
568}
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000569
570// Struct which is allocated to pass info to new threads. The new thread frees
571// it.
572struct NewThreadParams {
573 AllocatorCache *thread_cache;
574 AllocatorCache::Allocator *allocator;
575 uptr class_id;
576};
577
578// Called in a new thread. Just frees its argument.
579static void *DeallocNewThreadWorker(void *arg) {
580 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
581 params->thread_cache->Deallocate(params->allocator, params->class_id, params);
582 return NULL;
583}
584
585// The allocator cache is supposed to be POD and zero initialized. We should be
586// able to call Deallocate on a zeroed cache, and it will self-initialize.
587TEST(Allocator, AllocatorCacheDeallocNewThread) {
588 AllocatorCache::Allocator allocator;
589 allocator.Init();
590 AllocatorCache main_cache;
591 AllocatorCache child_cache;
592 memset(&main_cache, 0, sizeof(main_cache));
593 memset(&child_cache, 0, sizeof(child_cache));
594
595 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
596 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
597 main_cache.Allocate(&allocator, class_id));
598 params->thread_cache = &child_cache;
599 params->allocator = &allocator;
600 params->class_id = class_id;
601 pthread_t t;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700602 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
603 PTHREAD_JOIN(t, 0);
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000604}
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000605#endif
606
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000607TEST(Allocator, Basic) {
608 char *p = (char*)InternalAlloc(10);
609 EXPECT_NE(p, (char*)0);
610 char *p2 = (char*)InternalAlloc(20);
611 EXPECT_NE(p2, (char*)0);
612 EXPECT_NE(p2, p);
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000613 InternalFree(p);
614 InternalFree(p2);
615}
616
617TEST(Allocator, Stress) {
618 const int kCount = 1000;
619 char *ptrs[kCount];
620 unsigned rnd = 42;
621 for (int i = 0; i < kCount; i++) {
Evgeniy Stepanov48ddbef2013-01-14 15:12:26 +0000622 uptr sz = my_rand_r(&rnd) % 1000;
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000623 char *p = (char*)InternalAlloc(sz);
624 EXPECT_NE(p, (char*)0);
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000625 ptrs[i] = p;
626 }
627 for (int i = 0; i < kCount; i++) {
628 InternalFree(ptrs[i]);
629 }
630}
631
Stephen Hines6d186232014-11-26 17:56:19 -0800632TEST(Allocator, LargeAlloc) {
633 void *p = InternalAlloc(10 << 20);
634 InternalFree(p);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000635}
636
Alexey Samsonov6611abe2012-08-21 08:13:37 +0000637TEST(Allocator, ScopedBuffer) {
638 const int kSize = 512;
639 {
640 InternalScopedBuffer<int> int_buf(kSize);
641 EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT
642 }
643 InternalScopedBuffer<char> char_buf(kSize);
644 EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT
Kostya Serebryany0bb6e5d2012-12-07 10:13:10 +0000645 internal_memset(char_buf.data(), 'c', kSize);
Alexey Samsonov6611abe2012-08-21 08:13:37 +0000646 for (int i = 0; i < kSize; i++) {
647 EXPECT_EQ('c', char_buf[i]);
648 }
649}
Dmitry Vyukov68902f42012-12-14 10:17:22 +0000650
Sergey Matveevac78d002013-06-24 08:34:50 +0000651void IterationTestCallback(uptr chunk, void *arg) {
652 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
Sergey Matveevc87f7372013-06-24 09:12:11 +0000653}
Kostya Serebryany300f9532013-03-15 11:39:41 +0000654
655template <class Allocator>
656void TestSizeClassAllocatorIteration() {
657 Allocator *a = new Allocator;
658 a->Init();
659 SizeClassAllocatorLocalCache<Allocator> cache;
660 memset(&cache, 0, sizeof(cache));
661 cache.Init(0);
662
663 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
664 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
665
666 std::vector<void *> allocated;
667
668 // Allocate a bunch of chunks.
669 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
670 uptr size = sizes[s];
671 if (!a->CanAllocate(size, 1)) continue;
672 // printf("s = %ld\n", size);
673 uptr n_iter = std::max((uptr)6, 80000 / size);
674 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
675 for (uptr j = 0; j < n_iter; j++) {
676 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
677 void *x = cache.Allocate(a, class_id0);
678 allocated.push_back(x);
679 }
680 }
681
Sergey Matveevac78d002013-06-24 08:34:50 +0000682 std::set<uptr> reported_chunks;
Kostya Serebryany300f9532013-03-15 11:39:41 +0000683 a->ForceLock();
Sergey Matveevac78d002013-06-24 08:34:50 +0000684 a->ForEachChunk(IterationTestCallback, &reported_chunks);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000685 a->ForceUnlock();
686
687 for (uptr i = 0; i < allocated.size(); i++) {
688 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
Sergey Matveevac78d002013-06-24 08:34:50 +0000689 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
690 reported_chunks.end());
Kostya Serebryany300f9532013-03-15 11:39:41 +0000691 }
692
693 a->TestOnlyUnmap();
694 delete a;
695}
696
Stephen Hines86277eb2015-03-23 12:06:32 -0700697#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany300f9532013-03-15 11:39:41 +0000698TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
699 TestSizeClassAllocatorIteration<Allocator64>();
700}
701#endif
702
703TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
704 TestSizeClassAllocatorIteration<Allocator32Compact>();
705}
706
Kostya Serebryany300f9532013-03-15 11:39:41 +0000707TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
708 LargeMmapAllocator<> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700709 a.Init(/* may_return_null */ false);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000710 AllocatorStats stats;
711 stats.Init();
712
Kostya Serebryany6c876e42013-03-15 12:27:52 +0000713 static const uptr kNumAllocs = 1000;
Kostya Serebryany300f9532013-03-15 11:39:41 +0000714 char *allocated[kNumAllocs];
715 static const uptr size = 40;
716 // Allocate some.
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000717 for (uptr i = 0; i < kNumAllocs; i++)
Kostya Serebryany300f9532013-03-15 11:39:41 +0000718 allocated[i] = (char *)a.Allocate(&stats, size, 1);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000719
Sergey Matveevac78d002013-06-24 08:34:50 +0000720 std::set<uptr> reported_chunks;
Kostya Serebryany300f9532013-03-15 11:39:41 +0000721 a.ForceLock();
Sergey Matveevac78d002013-06-24 08:34:50 +0000722 a.ForEachChunk(IterationTestCallback, &reported_chunks);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000723 a.ForceUnlock();
724
725 for (uptr i = 0; i < kNumAllocs; i++) {
726 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
Sergey Matveevac78d002013-06-24 08:34:50 +0000727 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
728 reported_chunks.end());
Kostya Serebryany300f9532013-03-15 11:39:41 +0000729 }
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000730 for (uptr i = 0; i < kNumAllocs; i++)
731 a.Deallocate(&stats, allocated[i]);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000732}
733
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000734TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
735 LargeMmapAllocator<> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700736 a.Init(/* may_return_null */ false);
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000737 AllocatorStats stats;
738 stats.Init();
739
740 static const uptr kNumAllocs = 1024;
741 static const uptr kNumExpectedFalseLookups = 10000000;
742 char *allocated[kNumAllocs];
743 static const uptr size = 4096;
744 // Allocate some.
745 for (uptr i = 0; i < kNumAllocs; i++) {
746 allocated[i] = (char *)a.Allocate(&stats, size, 1);
747 }
748
Kostya Serebryany8fa4edc2013-10-17 11:18:11 +0000749 a.ForceLock();
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000750 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
751 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
752 char *p1 = allocated[i % kNumAllocs];
Sergey Matveev4fcc5652013-05-31 11:33:21 +0000753 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
754 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
755 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
756 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000757 }
758
759 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
760 void *p = reinterpret_cast<void *>(i % 1024);
Sergey Matveev4fcc5652013-05-31 11:33:21 +0000761 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000762 p = reinterpret_cast<void *>(~0L - (i % 1024));
Sergey Matveev4fcc5652013-05-31 11:33:21 +0000763 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000764 }
Kostya Serebryany8fa4edc2013-10-17 11:18:11 +0000765 a.ForceUnlock();
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000766
767 for (uptr i = 0; i < kNumAllocs; i++)
768 a.Deallocate(&stats, allocated[i]);
769}
770
771
Stephen Hines86277eb2015-03-23 12:06:32 -0700772#if SANITIZER_CAN_USE_ALLOCATOR64
Sergey Matveevf14ef722013-05-16 12:58:34 +0000773// Regression test for out-of-memory condition in PopulateFreeList().
774TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
775 // In a world where regions are small and chunks are huge...
776 typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
777 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
778 SpecialSizeClassMap> SpecialAllocator64;
779 const uptr kRegionSize =
780 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
781 SpecialAllocator64 *a = new SpecialAllocator64;
782 a->Init();
783 SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
784 memset(&cache, 0, sizeof(cache));
785 cache.Init(0);
786
787 // ...one man is on a mission to overflow a region with a series of
788 // successive allocations.
789 const uptr kClassID = 107;
790 const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
791 ASSERT_LT(2 * kAllocationSize, kRegionSize);
792 ASSERT_GT(3 * kAllocationSize, kRegionSize);
793 cache.Allocate(a, kClassID);
794 EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
795 "The process has exhausted");
796 a->TestOnlyUnmap();
797 delete a;
798}
799#endif
800
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700801TEST(SanitizerCommon, TwoLevelByteMap) {
802 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
803 const u64 n = kSize1 * kSize2;
804 TwoLevelByteMap<kSize1, kSize2> m;
805 m.TestOnlyInit();
806 for (u64 i = 0; i < n; i += 7) {
807 m.set(i, (i % 100) + 1);
808 }
809 for (u64 j = 0; j < n; j++) {
810 if (j % 7)
811 EXPECT_EQ(m[j], 0);
812 else
813 EXPECT_EQ(m[j], (j % 100) + 1);
814 }
815
816 m.TestOnlyUnmap();
817}
818
819
820typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
821
822struct TestByteMapParam {
823 TestByteMap *m;
824 size_t shard;
825 size_t num_shards;
826};
827
828void *TwoLevelByteMapUserThread(void *param) {
829 TestByteMapParam *p = (TestByteMapParam*)param;
830 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
831 size_t val = (i % 100) + 1;
832 p->m->set(i, val);
833 EXPECT_EQ((*p->m)[i], val);
834 }
835 return 0;
836}
837
838TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
839 TestByteMap m;
840 m.TestOnlyInit();
841 TestMapUnmapCallback::map_count = 0;
842 TestMapUnmapCallback::unmap_count = 0;
843 static const int kNumThreads = 4;
844 pthread_t t[kNumThreads];
845 TestByteMapParam p[kNumThreads];
846 for (int i = 0; i < kNumThreads; i++) {
847 p[i].m = &m;
848 p[i].shard = i;
849 p[i].num_shards = kNumThreads;
850 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
851 }
852 for (int i = 0; i < kNumThreads; i++) {
853 PTHREAD_JOIN(t[i], 0);
854 }
855 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
856 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
857 m.TestOnlyUnmap();
858 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
859 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
860}
861
Stephen Hines86277eb2015-03-23 12:06:32 -0700862#endif // #if !SANITIZER_DEBUG