blob: be8fc91aa861943a92de2ed9e81e6cda61495a34 [file] [log] [blame]
Dmitry Vyukov225f5312012-06-25 15:09:24 +00001//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Dmitry Vyukova3eca812012-06-29 17:32:18 +000010// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
Kostya Serebryany72166ca2012-12-05 10:09:15 +000011// Tests for sanitizer_allocator.h.
Dmitry Vyukov225f5312012-06-25 15:09:24 +000012//
13//===----------------------------------------------------------------------===//
Kostya Serebryany72166ca2012-12-05 10:09:15 +000014#include "sanitizer_common/sanitizer_allocator.h"
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000015#include "sanitizer_common/sanitizer_allocator_internal.h"
Dmitry Vyukov225f5312012-06-25 15:09:24 +000016#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov225f5312012-06-25 15:09:24 +000017
Evgeniy Stepanov48ddbef2013-01-14 15:12:26 +000018#include "sanitizer_test_utils.h"
Stephen Hines2d1fdb22014-05-28 23:58:16 -070019#include "sanitizer_pthread_wrappers.h"
Evgeniy Stepanov48ddbef2013-01-14 15:12:26 +000020
Kostya Serebryany72166ca2012-12-05 10:09:15 +000021#include "gtest/gtest.h"
22
23#include <stdlib.h>
24#include <algorithm>
25#include <vector>
Kostya Serebryany300f9532013-03-15 11:39:41 +000026#include <set>
Kostya Serebryany72166ca2012-12-05 10:09:15 +000027
Dmitry Vyukov68902f42012-12-14 10:17:22 +000028// Too slow for debug build
Stephen Hines86277eb2015-03-23 12:06:32 -070029#if !SANITIZER_DEBUG
Dmitry Vyukov68902f42012-12-14 10:17:22 +000030
Stephen Hines86277eb2015-03-23 12:06:32 -070031#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +000032static const uptr kAllocatorSpace = 0x700000000000ULL;
33static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
Kostya Serebryany45595ba2012-12-06 12:49:28 +000034static const u64 kAddressSpaceSize = 1ULL << 47;
Kostya Serebryany72166ca2012-12-05 10:09:15 +000035
36typedef SizeClassAllocator64<
37 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
38
39typedef SizeClassAllocator64<
40 kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
Stephen Hines86277eb2015-03-23 12:06:32 -070041#elif defined(__mips64)
42static const u64 kAddressSpaceSize = 1ULL << 40;
Kostya Serebryany45595ba2012-12-06 12:49:28 +000043#else
44static const u64 kAddressSpaceSize = 1ULL << 32;
Kostya Serebryany72166ca2012-12-05 10:09:15 +000045#endif
46
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +000047static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
48static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
49
Kostya Serebryany45595ba2012-12-06 12:49:28 +000050typedef SizeClassAllocator32<
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +000051 0, kAddressSpaceSize,
52 /*kMetadataSize*/16,
53 CompactSizeClassMap,
54 kRegionSizeLog,
55 FlatByteMap<kFlatByteMapSize> >
56 Allocator32Compact;
Kostya Serebryany45595ba2012-12-06 12:49:28 +000057
Kostya Serebryany72166ca2012-12-05 10:09:15 +000058template <class SizeClassMap>
59void TestSizeClassMap() {
60 typedef SizeClassMap SCMap;
Kostya Serebryanybb5d0572012-12-25 07:50:35 +000061 // SCMap::Print();
Kostya Serebryany038820f2012-12-24 13:41:07 +000062 SCMap::Validate();
Kostya Serebryany72166ca2012-12-05 10:09:15 +000063}
64
65TEST(SanitizerCommon, DefaultSizeClassMap) {
66 TestSizeClassMap<DefaultSizeClassMap>();
67}
68
69TEST(SanitizerCommon, CompactSizeClassMap) {
70 TestSizeClassMap<CompactSizeClassMap>();
71}
72
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +000073TEST(SanitizerCommon, InternalSizeClassMap) {
74 TestSizeClassMap<InternalSizeClassMap>();
75}
76
Kostya Serebryany72166ca2012-12-05 10:09:15 +000077template <class Allocator>
78void TestSizeClassAllocator() {
Kostya Serebryany45595ba2012-12-06 12:49:28 +000079 Allocator *a = new Allocator;
80 a->Init();
Dmitry Vyukova343ca02013-01-11 16:41:19 +000081 SizeClassAllocatorLocalCache<Allocator> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +000082 memset(&cache, 0, sizeof(cache));
83 cache.Init(0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +000084
85 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +000086 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
Kostya Serebryany72166ca2012-12-05 10:09:15 +000087
88 std::vector<void *> allocated;
89
90 uptr last_total_allocated = 0;
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +000091 for (int i = 0; i < 3; i++) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +000092 // Allocate a bunch of chunks.
Kostya Serebryany45595ba2012-12-06 12:49:28 +000093 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +000094 uptr size = sizes[s];
Kostya Serebryany45595ba2012-12-06 12:49:28 +000095 if (!a->CanAllocate(size, 1)) continue;
Kostya Serebryany72166ca2012-12-05 10:09:15 +000096 // printf("s = %ld\n", size);
Evgeniy Stepanov7fe55262013-02-19 13:38:27 +000097 uptr n_iter = std::max((uptr)6, 8000000 / size);
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +000098 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
Kostya Serebryany72166ca2012-12-05 10:09:15 +000099 for (uptr i = 0; i < n_iter; i++) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000100 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
101 char *x = (char*)cache.Allocate(a, class_id0);
Kostya Serebryanyd9b74042012-12-15 18:36:23 +0000102 x[0] = 0;
103 x[size - 1] = 0;
104 x[size / 2] = 0;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000105 allocated.push_back(x);
Kostya Serebryany8a41bdc2012-12-06 13:13:58 +0000106 CHECK_EQ(x, a->GetBlockBegin(x));
Kostya Serebryanyd9b74042012-12-15 18:36:23 +0000107 CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000108 CHECK(a->PointerIsMine(x));
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000109 CHECK(a->PointerIsMine(x + size - 1));
110 CHECK(a->PointerIsMine(x + size / 2));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000111 CHECK_GE(a->GetActuallyAllocatedSize(x), size);
112 uptr class_id = a->GetSizeClass(x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000113 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000114 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000115 metadata[0] = reinterpret_cast<uptr>(x) + 1;
116 metadata[1] = 0xABCD;
117 }
118 }
119 // Deallocate all.
120 for (uptr i = 0; i < allocated.size(); i++) {
121 void *x = allocated[i];
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000122 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000123 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
124 CHECK_EQ(metadata[1], 0xABCD);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000125 cache.Deallocate(a, a->GetSizeClass(x), x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000126 }
127 allocated.clear();
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000128 uptr total_allocated = a->TotalMemoryUsed();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000129 if (last_total_allocated == 0)
130 last_total_allocated = total_allocated;
131 CHECK_EQ(last_total_allocated, total_allocated);
132 }
133
Kostya Serebryanyaa0f20d2013-03-11 09:43:12 +0000134 // Check that GetBlockBegin never crashes.
135 for (uptr x = 0, step = kAddressSpaceSize / 100000;
136 x < kAddressSpaceSize - step; x += step)
137 if (a->PointerIsMine(reinterpret_cast<void *>(x)))
138 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
139
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000140 a->TestOnlyUnmap();
141 delete a;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000142}
143
Stephen Hines86277eb2015-03-23 12:06:32 -0700144#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000145TEST(SanitizerCommon, SizeClassAllocator64) {
146 TestSizeClassAllocator<Allocator64>();
147}
148
149TEST(SanitizerCommon, SizeClassAllocator64Compact) {
150 TestSizeClassAllocator<Allocator64Compact>();
151}
152#endif
153
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000154TEST(SanitizerCommon, SizeClassAllocator32Compact) {
155 TestSizeClassAllocator<Allocator32Compact>();
156}
157
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000158template <class Allocator>
Kostya Serebryany784935d2012-12-06 13:00:11 +0000159void SizeClassAllocatorMetadataStress() {
160 Allocator *a = new Allocator;
161 a->Init();
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000162 SizeClassAllocatorLocalCache<Allocator> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000163 memset(&cache, 0, sizeof(cache));
164 cache.Init(0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000165
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000166 const uptr kNumAllocs = 1 << 13;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000167 void *allocated[kNumAllocs];
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000168 void *meta[kNumAllocs];
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000169 for (uptr i = 0; i < kNumAllocs; i++) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000170 void *x = cache.Allocate(a, 1 + i % 50);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000171 allocated[i] = x;
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000172 meta[i] = a->GetMetaData(x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000173 }
174 // Get Metadata kNumAllocs^2 times.
175 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000176 uptr idx = i % kNumAllocs;
177 void *m = a->GetMetaData(allocated[idx]);
178 EXPECT_EQ(m, meta[idx]);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000179 }
180 for (uptr i = 0; i < kNumAllocs; i++) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000181 cache.Deallocate(a, 1 + i % 50, allocated[i]);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000182 }
183
Kostya Serebryany784935d2012-12-06 13:00:11 +0000184 a->TestOnlyUnmap();
Kostya Serebryany784935d2012-12-06 13:00:11 +0000185 delete a;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000186}
187
Stephen Hines86277eb2015-03-23 12:06:32 -0700188#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000189TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
Kostya Serebryany784935d2012-12-06 13:00:11 +0000190 SizeClassAllocatorMetadataStress<Allocator64>();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000191}
192
193TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
Kostya Serebryany784935d2012-12-06 13:00:11 +0000194 SizeClassAllocatorMetadataStress<Allocator64Compact>();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000195}
Stephen Hines86277eb2015-03-23 12:06:32 -0700196#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany784935d2012-12-06 13:00:11 +0000197TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
198 SizeClassAllocatorMetadataStress<Allocator32Compact>();
199}
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000200
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000201template <class Allocator>
202void SizeClassAllocatorGetBlockBeginStress() {
203 Allocator *a = new Allocator;
204 a->Init();
205 SizeClassAllocatorLocalCache<Allocator> cache;
206 memset(&cache, 0, sizeof(cache));
207 cache.Init(0);
208
209 uptr max_size_class = Allocator::kNumClasses - 1;
210 uptr size = Allocator::SizeClassMapT::Size(max_size_class);
211 u64 G8 = 1ULL << 33;
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000212 // Make sure we correctly compute GetBlockBegin() w/o overflow.
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000213 for (size_t i = 0; i <= G8 / size; i++) {
214 void *x = cache.Allocate(a, max_size_class);
215 void *beg = a->GetBlockBegin(x);
Dmitry Vyukov374fd352013-05-17 11:54:37 +0000216 // if ((i & (i - 1)) == 0)
217 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000218 EXPECT_EQ(x, beg);
219 }
220
221 a->TestOnlyUnmap();
222 delete a;
223}
224
Stephen Hines86277eb2015-03-23 12:06:32 -0700225#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000226TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000227 SizeClassAllocatorGetBlockBeginStress<Allocator64>();
228}
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000229TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
230 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
231}
232TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
233 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
234}
Stephen Hines86277eb2015-03-23 12:06:32 -0700235#endif // SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryanya1560bd2013-05-16 05:22:50 +0000236
Kostya Serebryany214621f2012-12-12 14:32:18 +0000237struct TestMapUnmapCallback {
238 static int map_count, unmap_count;
239 void OnMap(uptr p, uptr size) const { map_count++; }
240 void OnUnmap(uptr p, uptr size) const { unmap_count++; }
241};
242int TestMapUnmapCallback::map_count;
243int TestMapUnmapCallback::unmap_count;
244
Stephen Hines86277eb2015-03-23 12:06:32 -0700245#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany214621f2012-12-12 14:32:18 +0000246TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
247 TestMapUnmapCallback::map_count = 0;
248 TestMapUnmapCallback::unmap_count = 0;
249 typedef SizeClassAllocator64<
250 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
251 TestMapUnmapCallback> Allocator64WithCallBack;
252 Allocator64WithCallBack *a = new Allocator64WithCallBack;
253 a->Init();
254 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000255 SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000256 memset(&cache, 0, sizeof(cache));
257 cache.Init(0);
258 AllocatorStats stats;
259 stats.Init();
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000260 a->AllocateBatch(&stats, &cache, 32);
Kostya Serebryany567ad072012-12-13 05:05:11 +0000261 EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
Kostya Serebryany214621f2012-12-12 14:32:18 +0000262 a->TestOnlyUnmap();
263 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
264 delete a;
265}
266#endif
267
268TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
269 TestMapUnmapCallback::map_count = 0;
270 TestMapUnmapCallback::unmap_count = 0;
271 typedef SizeClassAllocator32<
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000272 0, kAddressSpaceSize,
273 /*kMetadataSize*/16,
274 CompactSizeClassMap,
275 kRegionSizeLog,
276 FlatByteMap<kFlatByteMapSize>,
277 TestMapUnmapCallback>
278 Allocator32WithCallBack;
Kostya Serebryany214621f2012-12-12 14:32:18 +0000279 Allocator32WithCallBack *a = new Allocator32WithCallBack;
280 a->Init();
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000281 EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000282 SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000283 memset(&cache, 0, sizeof(cache));
284 cache.Init(0);
285 AllocatorStats stats;
286 stats.Init();
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000287 a->AllocateBatch(&stats, &cache, 32);
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000288 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000289 a->TestOnlyUnmap();
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000290 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000291 delete a;
Dmitry Vyukovca661f52012-12-14 10:12:14 +0000292 // fprintf(stderr, "Map: %d Unmap: %d\n",
293 // TestMapUnmapCallback::map_count,
294 // TestMapUnmapCallback::unmap_count);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000295}
296
297TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
298 TestMapUnmapCallback::map_count = 0;
299 TestMapUnmapCallback::unmap_count = 0;
300 LargeMmapAllocator<TestMapUnmapCallback> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700301 a.Init(/* may_return_null */ false);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000302 AllocatorStats stats;
303 stats.Init();
304 void *x = a.Allocate(&stats, 1 << 20, 1);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000305 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000306 a.Deallocate(&stats, x);
Kostya Serebryany214621f2012-12-12 14:32:18 +0000307 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
308}
309
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000310template<class Allocator>
311void FailInAssertionOnOOM() {
312 Allocator a;
313 a.Init();
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000314 SizeClassAllocatorLocalCache<Allocator> cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000315 memset(&cache, 0, sizeof(cache));
316 cache.Init(0);
317 AllocatorStats stats;
318 stats.Init();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000319 for (int i = 0; i < 1000000; i++) {
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000320 a.AllocateBatch(&stats, &cache, 52);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000321 }
322
323 a.TestOnlyUnmap();
324}
325
Stephen Hines86277eb2015-03-23 12:06:32 -0700326#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000327TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
328 EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
329}
330#endif
331
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700332#if !defined(_WIN32) // FIXME: This currently fails on Windows.
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000333TEST(SanitizerCommon, LargeMmapAllocator) {
Kostya Serebryany214621f2012-12-12 14:32:18 +0000334 LargeMmapAllocator<> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700335 a.Init(/* may_return_null */ false);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000336 AllocatorStats stats;
337 stats.Init();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000338
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000339 static const int kNumAllocs = 1000;
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000340 char *allocated[kNumAllocs];
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000341 static const uptr size = 4000;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000342 // Allocate some.
343 for (int i = 0; i < kNumAllocs; i++) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000344 allocated[i] = (char *)a.Allocate(&stats, size, 1);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000345 CHECK(a.PointerIsMine(allocated[i]));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000346 }
347 // Deallocate all.
348 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
349 for (int i = 0; i < kNumAllocs; i++) {
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000350 char *p = allocated[i];
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000351 CHECK(a.PointerIsMine(p));
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000352 a.Deallocate(&stats, p);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000353 }
354 // Check that non left.
355 CHECK_EQ(a.TotalMemoryUsed(), 0);
356
357 // Allocate some more, also add metadata.
358 for (int i = 0; i < kNumAllocs; i++) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000359 char *x = (char *)a.Allocate(&stats, size, 1);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000360 CHECK_GE(a.GetActuallyAllocatedSize(x), size);
361 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
362 *meta = i;
363 allocated[i] = x;
364 }
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000365 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
366 char *p = allocated[i % kNumAllocs];
367 CHECK(a.PointerIsMine(p));
368 CHECK(a.PointerIsMine(p + 2000));
369 }
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000370 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
371 // Deallocate all in reverse order.
372 for (int i = 0; i < kNumAllocs; i++) {
373 int idx = kNumAllocs - i - 1;
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000374 char *p = allocated[idx];
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000375 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
376 CHECK_EQ(*meta, idx);
377 CHECK(a.PointerIsMine(p));
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000378 a.Deallocate(&stats, p);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000379 }
380 CHECK_EQ(a.TotalMemoryUsed(), 0);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000381
382 // Test alignments.
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000383 uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
384 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000385 const uptr kNumAlignedAllocs = 100;
Kostya Serebryany68d3a1b2012-12-24 14:53:13 +0000386 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000387 uptr size = ((i % 10) + 1) * 4096;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000388 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +0000389 CHECK_EQ(p, a.GetBlockBegin(p));
390 CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
391 CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000392 CHECK_EQ(0, (uptr)allocated[i] % alignment);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000393 p[0] = p[size - 1] = 0;
394 }
Kostya Serebryany68d3a1b2012-12-24 14:53:13 +0000395 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000396 a.Deallocate(&stats, allocated[i]);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000397 }
398 }
Kostya Serebryanya2c1d982013-04-08 08:43:22 +0000399
400 // Regression test for boundary condition in GetBlockBegin().
401 uptr page_size = GetPageSizeCached();
402 char *p = (char *)a.Allocate(&stats, page_size, 1);
403 CHECK_EQ(p, a.GetBlockBegin(p));
404 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
405 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
406 a.Deallocate(&stats, p);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000407}
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700408#endif
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000409
410template
411<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
412void TestCombinedAllocator() {
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000413 typedef
414 CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
415 Allocator;
416 Allocator *a = new Allocator;
Stephen Hines86277eb2015-03-23 12:06:32 -0700417 a->Init(/* may_return_null */ true);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000418
419 AllocatorCache cache;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000420 memset(&cache, 0, sizeof(cache));
421 a->InitCache(&cache);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000422
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000423 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
424 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
425 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
426 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
427 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000428
Stephen Hines86277eb2015-03-23 12:06:32 -0700429 // Set to false
430 a->SetMayReturnNull(false);
Kostya Serebryany9150f392013-09-06 09:25:11 +0000431 EXPECT_DEATH(a->Allocate(&cache, -1, 1),
432 "allocator is terminating the process");
Kostya Serebryany9150f392013-09-06 09:25:11 +0000433
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000434 const uptr kNumAllocs = 100000;
435 const uptr kNumIter = 10;
436 for (uptr iter = 0; iter < kNumIter; iter++) {
437 std::vector<void*> allocated;
438 for (uptr i = 0; i < kNumAllocs; i++) {
439 uptr size = (i % (1 << 14)) + 1;
440 if ((i % 1024) == 0)
441 size = 1 << (10 + (i % 14));
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000442 void *x = a->Allocate(&cache, size, 1);
443 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000444 CHECK_EQ(*meta, 0);
445 *meta = size;
446 allocated.push_back(x);
447 }
448
449 random_shuffle(allocated.begin(), allocated.end());
450
451 for (uptr i = 0; i < kNumAllocs; i++) {
452 void *x = allocated[i];
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000453 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000454 CHECK_NE(*meta, 0);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000455 CHECK(a->PointerIsMine(x));
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000456 *meta = 0;
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000457 a->Deallocate(&cache, x);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000458 }
459 allocated.clear();
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000460 a->SwallowCache(&cache);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000461 }
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000462 a->DestroyCache(&cache);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000463 a->TestOnlyUnmap();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000464}
465
Stephen Hines86277eb2015-03-23 12:06:32 -0700466#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000467TEST(SanitizerCommon, CombinedAllocator64) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000468 TestCombinedAllocator<Allocator64,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000469 LargeMmapAllocator<>,
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000470 SizeClassAllocatorLocalCache<Allocator64> > ();
471}
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000472
473TEST(SanitizerCommon, CombinedAllocator64Compact) {
474 TestCombinedAllocator<Allocator64Compact,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000475 LargeMmapAllocator<>,
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000476 SizeClassAllocatorLocalCache<Allocator64Compact> > ();
477}
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000478#endif
479
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700480#if !defined(_WIN32) // FIXME: This currently fails on Windows.
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000481TEST(SanitizerCommon, CombinedAllocator32Compact) {
482 TestCombinedAllocator<Allocator32Compact,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000483 LargeMmapAllocator<>,
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000484 SizeClassAllocatorLocalCache<Allocator32Compact> > ();
485}
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700486#endif
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000487
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000488template <class AllocatorCache>
489void TestSizeClassAllocatorLocalCache() {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000490 AllocatorCache cache;
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000491 typedef typename AllocatorCache::Allocator Allocator;
492 Allocator *a = new Allocator();
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000493
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000494 a->Init();
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000495 memset(&cache, 0, sizeof(cache));
496 cache.Init(0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000497
498 const uptr kNumAllocs = 10000;
499 const int kNumIter = 100;
500 uptr saved_total = 0;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000501 for (int class_id = 1; class_id <= 5; class_id++) {
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000502 for (int it = 0; it < kNumIter; it++) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000503 void *allocated[kNumAllocs];
504 for (uptr i = 0; i < kNumAllocs; i++) {
505 allocated[i] = cache.Allocate(a, class_id);
506 }
507 for (uptr i = 0; i < kNumAllocs; i++) {
508 cache.Deallocate(a, class_id, allocated[i]);
509 }
510 cache.Drain(a);
511 uptr total_allocated = a->TotalMemoryUsed();
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +0000512 if (it)
Kostya Serebryany038820f2012-12-24 13:41:07 +0000513 CHECK_EQ(saved_total, total_allocated);
514 saved_total = total_allocated;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000515 }
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000516 }
517
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000518 a->TestOnlyUnmap();
519 delete a;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000520}
521
Stephen Hines86277eb2015-03-23 12:06:32 -0700522#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000523TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
524 TestSizeClassAllocatorLocalCache<
525 SizeClassAllocatorLocalCache<Allocator64> >();
526}
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000527
528TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
529 TestSizeClassAllocatorLocalCache<
530 SizeClassAllocatorLocalCache<Allocator64Compact> >();
531}
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000532#endif
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000533
Kostya Serebryanye280ce52012-12-06 14:39:41 +0000534TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
535 TestSizeClassAllocatorLocalCache<
536 SizeClassAllocatorLocalCache<Allocator32Compact> >();
537}
538
Stephen Hines86277eb2015-03-23 12:06:32 -0700539#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000540typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
Evgeniy Stepanov0d02d752013-01-14 14:06:58 +0000541static AllocatorCache static_allocator_cache;
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000542
543void *AllocatorLeakTestWorker(void *arg) {
544 typedef AllocatorCache::Allocator Allocator;
545 Allocator *a = (Allocator*)(arg);
546 static_allocator_cache.Allocate(a, 10);
547 static_allocator_cache.Drain(a);
548 return 0;
549}
550
551TEST(SanitizerCommon, AllocatorLeakTest) {
Alexey Samsonov68acb902012-12-14 15:37:35 +0000552 typedef AllocatorCache::Allocator Allocator;
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000553 Allocator a;
554 a.Init();
555 uptr total_used_memory = 0;
556 for (int i = 0; i < 100; i++) {
557 pthread_t t;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700558 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
559 PTHREAD_JOIN(t, 0);
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000560 if (i == 0)
561 total_used_memory = a.TotalMemoryUsed();
562 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
563 }
564
565 a.TestOnlyUnmap();
566}
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000567
568// Struct which is allocated to pass info to new threads. The new thread frees
569// it.
570struct NewThreadParams {
571 AllocatorCache *thread_cache;
572 AllocatorCache::Allocator *allocator;
573 uptr class_id;
574};
575
576// Called in a new thread. Just frees its argument.
577static void *DeallocNewThreadWorker(void *arg) {
578 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
579 params->thread_cache->Deallocate(params->allocator, params->class_id, params);
580 return NULL;
581}
582
583// The allocator cache is supposed to be POD and zero initialized. We should be
584// able to call Deallocate on a zeroed cache, and it will self-initialize.
585TEST(Allocator, AllocatorCacheDeallocNewThread) {
586 AllocatorCache::Allocator allocator;
587 allocator.Init();
588 AllocatorCache main_cache;
589 AllocatorCache child_cache;
590 memset(&main_cache, 0, sizeof(main_cache));
591 memset(&child_cache, 0, sizeof(child_cache));
592
593 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
594 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
595 main_cache.Allocate(&allocator, class_id));
596 params->thread_cache = &child_cache;
597 params->allocator = &allocator;
598 params->class_id = class_id;
599 pthread_t t;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700600 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
601 PTHREAD_JOIN(t, 0);
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000602}
Kostya Serebryany9fc1f272012-12-14 14:20:29 +0000603#endif
604
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000605TEST(Allocator, Basic) {
606 char *p = (char*)InternalAlloc(10);
607 EXPECT_NE(p, (char*)0);
608 char *p2 = (char*)InternalAlloc(20);
609 EXPECT_NE(p2, (char*)0);
610 EXPECT_NE(p2, p);
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000611 InternalFree(p);
612 InternalFree(p2);
613}
614
615TEST(Allocator, Stress) {
616 const int kCount = 1000;
617 char *ptrs[kCount];
618 unsigned rnd = 42;
619 for (int i = 0; i < kCount; i++) {
Evgeniy Stepanov48ddbef2013-01-14 15:12:26 +0000620 uptr sz = my_rand_r(&rnd) % 1000;
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000621 char *p = (char*)InternalAlloc(sz);
622 EXPECT_NE(p, (char*)0);
Dmitry Vyukov225f5312012-06-25 15:09:24 +0000623 ptrs[i] = p;
624 }
625 for (int i = 0; i < kCount; i++) {
626 InternalFree(ptrs[i]);
627 }
628}
629
Stephen Hines6d186232014-11-26 17:56:19 -0800630TEST(Allocator, LargeAlloc) {
631 void *p = InternalAlloc(10 << 20);
632 InternalFree(p);
Alexey Samsonov1f3c2fe2013-05-29 09:15:39 +0000633}
634
Alexey Samsonov6611abe2012-08-21 08:13:37 +0000635TEST(Allocator, ScopedBuffer) {
636 const int kSize = 512;
637 {
638 InternalScopedBuffer<int> int_buf(kSize);
639 EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT
640 }
641 InternalScopedBuffer<char> char_buf(kSize);
642 EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT
Kostya Serebryany0bb6e5d2012-12-07 10:13:10 +0000643 internal_memset(char_buf.data(), 'c', kSize);
Alexey Samsonov6611abe2012-08-21 08:13:37 +0000644 for (int i = 0; i < kSize; i++) {
645 EXPECT_EQ('c', char_buf[i]);
646 }
647}
Dmitry Vyukov68902f42012-12-14 10:17:22 +0000648
Sergey Matveevac78d002013-06-24 08:34:50 +0000649void IterationTestCallback(uptr chunk, void *arg) {
650 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
Sergey Matveevc87f7372013-06-24 09:12:11 +0000651}
Kostya Serebryany300f9532013-03-15 11:39:41 +0000652
653template <class Allocator>
654void TestSizeClassAllocatorIteration() {
655 Allocator *a = new Allocator;
656 a->Init();
657 SizeClassAllocatorLocalCache<Allocator> cache;
658 memset(&cache, 0, sizeof(cache));
659 cache.Init(0);
660
661 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
662 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
663
664 std::vector<void *> allocated;
665
666 // Allocate a bunch of chunks.
667 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
668 uptr size = sizes[s];
669 if (!a->CanAllocate(size, 1)) continue;
670 // printf("s = %ld\n", size);
671 uptr n_iter = std::max((uptr)6, 80000 / size);
672 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
673 for (uptr j = 0; j < n_iter; j++) {
674 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
675 void *x = cache.Allocate(a, class_id0);
676 allocated.push_back(x);
677 }
678 }
679
Sergey Matveevac78d002013-06-24 08:34:50 +0000680 std::set<uptr> reported_chunks;
Kostya Serebryany300f9532013-03-15 11:39:41 +0000681 a->ForceLock();
Sergey Matveevac78d002013-06-24 08:34:50 +0000682 a->ForEachChunk(IterationTestCallback, &reported_chunks);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000683 a->ForceUnlock();
684
685 for (uptr i = 0; i < allocated.size(); i++) {
686 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
Sergey Matveevac78d002013-06-24 08:34:50 +0000687 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
688 reported_chunks.end());
Kostya Serebryany300f9532013-03-15 11:39:41 +0000689 }
690
691 a->TestOnlyUnmap();
692 delete a;
693}
694
Stephen Hines86277eb2015-03-23 12:06:32 -0700695#if SANITIZER_CAN_USE_ALLOCATOR64
Kostya Serebryany300f9532013-03-15 11:39:41 +0000696TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
697 TestSizeClassAllocatorIteration<Allocator64>();
698}
699#endif
700
701TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
702 TestSizeClassAllocatorIteration<Allocator32Compact>();
703}
704
Kostya Serebryany300f9532013-03-15 11:39:41 +0000705TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
706 LargeMmapAllocator<> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700707 a.Init(/* may_return_null */ false);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000708 AllocatorStats stats;
709 stats.Init();
710
Kostya Serebryany6c876e42013-03-15 12:27:52 +0000711 static const uptr kNumAllocs = 1000;
Kostya Serebryany300f9532013-03-15 11:39:41 +0000712 char *allocated[kNumAllocs];
713 static const uptr size = 40;
714 // Allocate some.
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000715 for (uptr i = 0; i < kNumAllocs; i++)
Kostya Serebryany300f9532013-03-15 11:39:41 +0000716 allocated[i] = (char *)a.Allocate(&stats, size, 1);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000717
Sergey Matveevac78d002013-06-24 08:34:50 +0000718 std::set<uptr> reported_chunks;
Kostya Serebryany300f9532013-03-15 11:39:41 +0000719 a.ForceLock();
Sergey Matveevac78d002013-06-24 08:34:50 +0000720 a.ForEachChunk(IterationTestCallback, &reported_chunks);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000721 a.ForceUnlock();
722
723 for (uptr i = 0; i < kNumAllocs; i++) {
724 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
Sergey Matveevac78d002013-06-24 08:34:50 +0000725 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
726 reported_chunks.end());
Kostya Serebryany300f9532013-03-15 11:39:41 +0000727 }
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000728 for (uptr i = 0; i < kNumAllocs; i++)
729 a.Deallocate(&stats, allocated[i]);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000730}
731
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000732TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
733 LargeMmapAllocator<> a;
Stephen Hines86277eb2015-03-23 12:06:32 -0700734 a.Init(/* may_return_null */ false);
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000735 AllocatorStats stats;
736 stats.Init();
737
738 static const uptr kNumAllocs = 1024;
739 static const uptr kNumExpectedFalseLookups = 10000000;
740 char *allocated[kNumAllocs];
741 static const uptr size = 4096;
742 // Allocate some.
743 for (uptr i = 0; i < kNumAllocs; i++) {
744 allocated[i] = (char *)a.Allocate(&stats, size, 1);
745 }
746
Kostya Serebryany8fa4edc2013-10-17 11:18:11 +0000747 a.ForceLock();
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000748 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
749 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
750 char *p1 = allocated[i % kNumAllocs];
Sergey Matveev4fcc5652013-05-31 11:33:21 +0000751 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
752 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
753 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
754 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000755 }
756
757 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
758 void *p = reinterpret_cast<void *>(i % 1024);
Sergey Matveev4fcc5652013-05-31 11:33:21 +0000759 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000760 p = reinterpret_cast<void *>(~0L - (i % 1024));
Sergey Matveev4fcc5652013-05-31 11:33:21 +0000761 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000762 }
Kostya Serebryany8fa4edc2013-10-17 11:18:11 +0000763 a.ForceUnlock();
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +0000764
765 for (uptr i = 0; i < kNumAllocs; i++)
766 a.Deallocate(&stats, allocated[i]);
767}
768
769
Stephen Hines86277eb2015-03-23 12:06:32 -0700770#if SANITIZER_CAN_USE_ALLOCATOR64
Sergey Matveevf14ef722013-05-16 12:58:34 +0000771// Regression test for out-of-memory condition in PopulateFreeList().
772TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
773 // In a world where regions are small and chunks are huge...
774 typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
775 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
776 SpecialSizeClassMap> SpecialAllocator64;
777 const uptr kRegionSize =
778 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
779 SpecialAllocator64 *a = new SpecialAllocator64;
780 a->Init();
781 SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
782 memset(&cache, 0, sizeof(cache));
783 cache.Init(0);
784
785 // ...one man is on a mission to overflow a region with a series of
786 // successive allocations.
787 const uptr kClassID = 107;
788 const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
789 ASSERT_LT(2 * kAllocationSize, kRegionSize);
790 ASSERT_GT(3 * kAllocationSize, kRegionSize);
791 cache.Allocate(a, kClassID);
792 EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
793 "The process has exhausted");
794 a->TestOnlyUnmap();
795 delete a;
796}
797#endif
798
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700799TEST(SanitizerCommon, TwoLevelByteMap) {
800 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
801 const u64 n = kSize1 * kSize2;
802 TwoLevelByteMap<kSize1, kSize2> m;
803 m.TestOnlyInit();
804 for (u64 i = 0; i < n; i += 7) {
805 m.set(i, (i % 100) + 1);
806 }
807 for (u64 j = 0; j < n; j++) {
808 if (j % 7)
809 EXPECT_EQ(m[j], 0);
810 else
811 EXPECT_EQ(m[j], (j % 100) + 1);
812 }
813
814 m.TestOnlyUnmap();
815}
816
817
818typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
819
820struct TestByteMapParam {
821 TestByteMap *m;
822 size_t shard;
823 size_t num_shards;
824};
825
826void *TwoLevelByteMapUserThread(void *param) {
827 TestByteMapParam *p = (TestByteMapParam*)param;
828 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
829 size_t val = (i % 100) + 1;
830 p->m->set(i, val);
831 EXPECT_EQ((*p->m)[i], val);
832 }
833 return 0;
834}
835
836TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
837 TestByteMap m;
838 m.TestOnlyInit();
839 TestMapUnmapCallback::map_count = 0;
840 TestMapUnmapCallback::unmap_count = 0;
841 static const int kNumThreads = 4;
842 pthread_t t[kNumThreads];
843 TestByteMapParam p[kNumThreads];
844 for (int i = 0; i < kNumThreads; i++) {
845 p[i].m = &m;
846 p[i].shard = i;
847 p[i].num_shards = kNumThreads;
848 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
849 }
850 for (int i = 0; i < kNumThreads; i++) {
851 PTHREAD_JOIN(t[i], 0);
852 }
853 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
854 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
855 m.TestOnlyUnmap();
856 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
857 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
858}
859
Stephen Hines86277eb2015-03-23 12:06:32 -0700860#endif // #if !SANITIZER_DEBUG