blob: 8c3d126090a3b67735c3f220d0d64d0a5d3442e1 [file] [log] [blame]
Alexey Samsonove5f58952012-06-04 13:50:10 +00001//===-- asan_noinst_test.cc ----------------------===//
Kostya Serebryany1e172b42011-11-30 01:07:02 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14#include "asan_allocator.h"
15#include "asan_interface.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20
21#include <assert.h>
22#include <stdio.h>
23#include <stdlib.h>
Alexander Potapenko79d12e82012-07-23 08:22:27 +000024#include <string.h> // for memset()
Kostya Serebryany1e172b42011-11-30 01:07:02 +000025#include <algorithm>
Alexander Potapenko79d12e82012-07-23 08:22:27 +000026#include <vector>
Kostya Serebryany1e172b42011-11-30 01:07:02 +000027#include "gtest/gtest.h"
28
29// Simple stand-alone pseudorandom number generator.
30// Current algorithm is ANSI C linear congruential PRNG.
Kostya Serebryanyee392552012-05-31 15:02:07 +000031static inline u32 my_rand(u32* state) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +000032 return (*state = *state * 1103515245 + 12345) >> 16;
33}
34
Kostya Serebryanyee392552012-05-31 15:02:07 +000035static u32 global_seed = 0;
Kostya Serebryany1e172b42011-11-30 01:07:02 +000036
37
38TEST(AddressSanitizer, InternalSimpleDeathTest) {
39 EXPECT_DEATH(exit(1), "");
40}
41
42static void MallocStress(size_t n) {
Kostya Serebryanyee392552012-05-31 15:02:07 +000043 u32 seed = my_rand(&global_seed);
Kostya Serebryany1e172b42011-11-30 01:07:02 +000044 __asan::AsanStackTrace stack1;
45 stack1.trace[0] = 0xa123;
46 stack1.trace[1] = 0xa456;
47 stack1.size = 2;
48
49 __asan::AsanStackTrace stack2;
50 stack2.trace[0] = 0xb123;
51 stack2.trace[1] = 0xb456;
52 stack2.size = 2;
53
54 __asan::AsanStackTrace stack3;
55 stack3.trace[0] = 0xc123;
56 stack3.trace[1] = 0xc456;
57 stack3.size = 2;
58
59 std::vector<void *> vec;
60 for (size_t i = 0; i < n; i++) {
61 if ((i % 3) == 0) {
62 if (vec.empty()) continue;
63 size_t idx = my_rand(&seed) % vec.size();
64 void *ptr = vec[idx];
65 vec[idx] = vec.back();
66 vec.pop_back();
67 __asan::asan_free(ptr, &stack1);
68 } else {
69 size_t size = my_rand(&seed) % 1000 + 1;
70 switch ((my_rand(&seed) % 128)) {
71 case 0: size += 1024; break;
72 case 1: size += 2048; break;
73 case 2: size += 4096; break;
74 }
75 size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
76 char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
77 vec.push_back(ptr);
78 ptr[0] = 0;
79 ptr[size-1] = 0;
80 ptr[size/2] = 0;
81 }
82 }
83 for (size_t i = 0; i < vec.size(); i++)
84 __asan::asan_free(vec[i], &stack3);
85}
86
87
88TEST(AddressSanitizer, NoInstMallocTest) {
89#ifdef __arm__
90 MallocStress(300000);
91#else
92 MallocStress(1000000);
93#endif
94}
95
Kostya Serebryany3f4c3872012-05-31 14:35:53 +000096static void PrintShadow(const char *tag, uptr ptr, size_t size) {
Kostya Serebryany1e172b42011-11-30 01:07:02 +000097 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
Kostya Serebryany3f4c3872012-05-31 14:35:53 +000098 uptr prev_shadow = 0;
Kostya Serebryanyee392552012-05-31 15:02:07 +000099 for (sptr i = -32; i < (sptr)size + 32; i++) {
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000100 uptr shadow = __asan::MemToShadow(ptr + i);
Kostya Serebryanyee392552012-05-31 15:02:07 +0000101 if (i == 0 || i == (sptr)size)
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000102 fprintf(stderr, ".");
103 if (shadow != prev_shadow) {
104 prev_shadow = shadow;
Kostya Serebryanyee392552012-05-31 15:02:07 +0000105 fprintf(stderr, "%02x", (int)*(u8*)shadow);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000106 }
107 }
108 fprintf(stderr, "\n");
109}
110
111TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
112 for (size_t size = 1; size <= 513; size++) {
113 char *ptr = new char[size];
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000114 PrintShadow("m", (uptr)ptr, size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000115 delete [] ptr;
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000116 PrintShadow("f", (uptr)ptr, size);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000117 }
118}
119
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000120static uptr pc_array[] = {
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000121#if __WORDSIZE == 64
122 0x7effbf756068ULL,
123 0x7effbf75e5abULL,
124 0x7effc0625b7cULL,
125 0x7effc05b8997ULL,
126 0x7effbf990577ULL,
127 0x7effbf990c56ULL,
128 0x7effbf992f3cULL,
129 0x7effbf950c22ULL,
130 0x7effc036dba0ULL,
131 0x7effc03638a3ULL,
132 0x7effc035be4aULL,
133 0x7effc0539c45ULL,
134 0x7effc0539a65ULL,
135 0x7effc03db9b3ULL,
136 0x7effc03db100ULL,
137 0x7effc037c7b8ULL,
138 0x7effc037bfffULL,
139 0x7effc038b777ULL,
140 0x7effc038021cULL,
141 0x7effc037c7d1ULL,
142 0x7effc037bfffULL,
143 0x7effc038b777ULL,
144 0x7effc038021cULL,
145 0x7effc037c7d1ULL,
146 0x7effc037bfffULL,
147 0x7effc038b777ULL,
148 0x7effc038021cULL,
149 0x7effc037c7d1ULL,
150 0x7effc037bfffULL,
151 0x7effc0520d26ULL,
152 0x7effc009ddffULL,
153 0x7effbf90bb50ULL,
154 0x7effbdddfa69ULL,
155 0x7effbdde1fe2ULL,
156 0x7effbdde2424ULL,
157 0x7effbdde27b3ULL,
158 0x7effbddee53bULL,
159 0x7effbdde1988ULL,
160 0x7effbdde0904ULL,
161 0x7effc106ce0dULL,
162 0x7effbcc3fa04ULL,
163 0x7effbcc3f6a4ULL,
164 0x7effbcc3e726ULL,
165 0x7effbcc40852ULL,
166 0x7effb681ec4dULL,
167#endif // __WORDSIZE
168 0xB0B5E768,
169 0x7B682EC1,
170 0x367F9918,
171 0xAE34E13,
172 0xBA0C6C6,
173 0x13250F46,
174 0xA0D6A8AB,
175 0x2B07C1A8,
176 0x6C844F4A,
177 0x2321B53,
178 0x1F3D4F8F,
179 0x3FE2924B,
180 0xB7A2F568,
181 0xBD23950A,
182 0x61020930,
183 0x33E7970C,
184 0x405998A1,
185 0x59F3551D,
186 0x350E3028,
187 0xBC55A28D,
188 0x361F3AED,
189 0xBEAD0F73,
190 0xAEF28479,
191 0x757E971F,
192 0xAEBA450,
193 0x43AD22F5,
194 0x8C2C50C4,
195 0x7AD8A2E1,
196 0x69EE4EE8,
197 0xC08DFF,
198 0x4BA6538,
199 0x3708AB2,
200 0xC24B6475,
201 0x7C8890D7,
202 0x6662495F,
203 0x9B641689,
204 0xD3596B,
205 0xA1049569,
206 0x44CBC16,
207 0x4D39C39F
208};
209
210void CompressStackTraceTest(size_t n_iter) {
Kostya Serebryanyee392552012-05-31 15:02:07 +0000211 u32 seed = my_rand(&global_seed);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000212 const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
Kostya Serebryanyee392552012-05-31 15:02:07 +0000213 u32 compressed[2 * kNumPcs];
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000214
215 for (size_t iter = 0; iter < n_iter; iter++) {
216 std::random_shuffle(pc_array, pc_array + kNumPcs);
217 __asan::AsanStackTrace stack0, stack1;
218 stack0.CopyFrom(pc_array, kNumPcs);
Kostya Serebryany3f4c3872012-05-31 14:35:53 +0000219 stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000220 size_t compress_size =
221 std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
222 size_t n_frames =
223 __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
Alexey Samsonov1a7741b2012-07-24 08:26:19 +0000224 Ident(n_frames);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000225 assert(n_frames <= stack0.size);
226 __asan::AsanStackTrace::UncompressStack(&stack1, compressed, compress_size);
227 assert(stack1.size == n_frames);
228 for (size_t i = 0; i < stack1.size; i++) {
229 assert(stack0.trace[i] == stack1.trace[i]);
230 }
231 }
232}
233
234TEST(AddressSanitizer, CompressStackTraceTest) {
235 CompressStackTraceTest(10000);
236}
237
238void CompressStackTraceBenchmark(size_t n_iter) {
239 const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
Kostya Serebryanyee392552012-05-31 15:02:07 +0000240 u32 compressed[2 * kNumPcs];
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000241 std::random_shuffle(pc_array, pc_array + kNumPcs);
242
243 __asan::AsanStackTrace stack0;
244 stack0.CopyFrom(pc_array, kNumPcs);
245 stack0.size = kNumPcs;
246 for (size_t iter = 0; iter < n_iter; iter++) {
247 size_t compress_size = kNumPcs;
248 size_t n_frames =
249 __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
250 Ident(n_frames);
251 }
252}
253
254TEST(AddressSanitizer, CompressStackTraceBenchmark) {
255 CompressStackTraceBenchmark(1 << 24);
256}
257
258TEST(AddressSanitizer, QuarantineTest) {
259 __asan::AsanStackTrace stack;
260 stack.trace[0] = 0x890;
261 stack.size = 1;
262
263 const int size = 32;
264 void *p = __asan::asan_malloc(size, &stack);
265 __asan::asan_free(p, &stack);
266 size_t i;
267 size_t max_i = 1 << 30;
268 for (i = 0; i < max_i; i++) {
269 void *p1 = __asan::asan_malloc(size, &stack);
270 __asan::asan_free(p1, &stack);
271 if (p1 == p) break;
272 }
273 // fprintf(stderr, "i=%ld\n", i);
274 EXPECT_GE(i, 100000U);
275 EXPECT_LT(i, max_i);
276}
277
278void *ThreadedQuarantineTestWorker(void *unused) {
Alexey Samsonov1a7741b2012-07-24 08:26:19 +0000279 (void)unused;
Kostya Serebryanyee392552012-05-31 15:02:07 +0000280 u32 seed = my_rand(&global_seed);
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000281 __asan::AsanStackTrace stack;
282 stack.trace[0] = 0x890;
283 stack.size = 1;
284
285 for (size_t i = 0; i < 1000; i++) {
286 void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
287 __asan::asan_free(p, &stack);
288 }
289 return NULL;
290}
291
292// Check that the thread local allocators are flushed when threads are
293// destroyed.
294TEST(AddressSanitizer, ThreadedQuarantineTest) {
295 const int n_threads = 3000;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000296 size_t mmaped1 = __asan_get_heap_size();
297 for (int i = 0; i < n_threads; i++) {
298 pthread_t t;
299 pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
300 pthread_join(t, 0);
301 size_t mmaped2 = __asan_get_heap_size();
302 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
303 }
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000304}
305
306void *ThreadedOneSizeMallocStress(void *unused) {
Alexey Samsonov1a7741b2012-07-24 08:26:19 +0000307 (void)unused;
Kostya Serebryany1e172b42011-11-30 01:07:02 +0000308 __asan::AsanStackTrace stack;
309 stack.trace[0] = 0x890;
310 stack.size = 1;
311 const size_t kNumMallocs = 1000;
312 for (int iter = 0; iter < 1000; iter++) {
313 void *p[kNumMallocs];
314 for (size_t i = 0; i < kNumMallocs; i++) {
315 p[i] = __asan::asan_malloc(32, &stack);
316 }
317 for (size_t i = 0; i < kNumMallocs; i++) {
318 __asan::asan_free(p[i], &stack);
319 }
320 }
321 return NULL;
322}
323
324TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
325 const int kNumThreads = 4;
326 pthread_t t[kNumThreads];
327 for (int i = 0; i < kNumThreads; i++) {
328 pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
329 }
330 for (int i = 0; i < kNumThreads; i++) {
331 pthread_join(t[i], 0);
332 }
333}
Kostya Serebryanybff53362012-06-25 14:23:07 +0000334
Alexander Potapenko79d12e82012-07-23 08:22:27 +0000335TEST(AddressSanitizer, MemsetWildAddressTest) {
336 typedef void*(*memset_p)(void*, int, size_t);
337 // Prevent inlining of memset().
338 volatile memset_p libc_memset = (memset_p)memset;
339 EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
340 "unknown-crash.*low shadow");
341 EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
342 "unknown-crash.*shadow gap");
343 EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
344 "unknown-crash.*high shadow");
345}
346
Kostya Serebryanybff53362012-06-25 14:23:07 +0000347TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
Alexey Samsonovd00ecb62012-06-28 13:12:07 +0000348 EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
Kostya Serebryanybff53362012-06-25 14:23:07 +0000349 const size_t sizes[] = { 1, 30, 1<<30 };
350 for (size_t i = 0; i < 3; i++) {
351 EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
352 }
353}
354
355static const char* kGetAllocatedSizeErrorMsg =
356 "attempting to call __asan_get_allocated_size()";
357
358TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
359 const size_t kArraySize = 100;
360 char *array = Ident((char*)malloc(kArraySize));
361 int *int_ptr = Ident(new int);
362
363 // Allocated memory is owned by allocator. Allocated size should be
364 // equal to requested size.
365 EXPECT_EQ(true, __asan_get_ownership(array));
366 EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
367 EXPECT_EQ(true, __asan_get_ownership(int_ptr));
368 EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
369
370 // We cannot call GetAllocatedSize from the memory we didn't map,
371 // and from the interior pointers (not returned by previous malloc).
372 void *wild_addr = (void*)0x1;
373 EXPECT_EQ(false, __asan_get_ownership(wild_addr));
374 EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
375 EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
376 EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
377 kGetAllocatedSizeErrorMsg);
378
379 // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
380 EXPECT_EQ(false, __asan_get_ownership(NULL));
Alexey Samsonovd00ecb62012-06-28 13:12:07 +0000381 EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
Kostya Serebryanybff53362012-06-25 14:23:07 +0000382
383 // When memory is freed, it's not owned, and call to GetAllocatedSize
384 // is forbidden.
385 free(array);
386 EXPECT_EQ(false, __asan_get_ownership(array));
387 EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
388
389 delete int_ptr;
390}
391
392TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
393 size_t before_malloc, after_malloc, after_free;
394 char *array;
395 const size_t kMallocSize = 100;
396 before_malloc = __asan_get_current_allocated_bytes();
397
398 array = Ident((char*)malloc(kMallocSize));
399 after_malloc = __asan_get_current_allocated_bytes();
400 EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
401
402 free(array);
403 after_free = __asan_get_current_allocated_bytes();
404 EXPECT_EQ(before_malloc, after_free);
405}
406
407static void DoDoubleFree() {
408 int *x = Ident(new int);
409 delete Ident(x);
410 delete Ident(x);
411}
412
413// This test is run in a separate process, so that large malloced
414// chunk won't remain in the free lists after the test.
415// Note: use ASSERT_* instead of EXPECT_* here.
416static void RunGetHeapSizeTestAndDie() {
417 size_t old_heap_size, new_heap_size, heap_growth;
418 // We unlikely have have chunk of this size in free list.
419 static const size_t kLargeMallocSize = 1 << 29; // 512M
420 old_heap_size = __asan_get_heap_size();
421 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
422 free(Ident(malloc(kLargeMallocSize)));
423 new_heap_size = __asan_get_heap_size();
424 heap_growth = new_heap_size - old_heap_size;
425 fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
426 ASSERT_GE(heap_growth, kLargeMallocSize);
427 ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
428
429 // Now large chunk should fall into free list, and can be
430 // allocated without increasing heap size.
431 old_heap_size = new_heap_size;
432 free(Ident(malloc(kLargeMallocSize)));
433 heap_growth = __asan_get_heap_size() - old_heap_size;
434 fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
435 ASSERT_LT(heap_growth, kLargeMallocSize);
436
437 // Test passed. Now die with expected double-free.
438 DoDoubleFree();
439}
440
441TEST(AddressSanitizerInterface, GetHeapSizeTest) {
442 EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
443}
444
445// Note: use ASSERT_* instead of EXPECT_* here.
446static void DoLargeMallocForGetFreeBytesTestAndDie() {
447 size_t old_free_bytes, new_free_bytes;
448 static const size_t kLargeMallocSize = 1 << 29; // 512M
449 // If we malloc and free a large memory chunk, it will not fall
450 // into quarantine and will be available for future requests.
451 old_free_bytes = __asan_get_free_bytes();
452 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
453 fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
454 free(Ident(malloc(kLargeMallocSize)));
455 new_free_bytes = __asan_get_free_bytes();
456 fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
457 ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
458 // Test passed.
459 DoDoubleFree();
460}
461
462TEST(AddressSanitizerInterface, GetFreeBytesTest) {
463 static const size_t kNumOfChunks = 100;
464 static const size_t kChunkSize = 100;
465 char *chunks[kNumOfChunks];
466 size_t i;
467 size_t old_free_bytes, new_free_bytes;
468 // Allocate a small chunk. Now allocator probably has a lot of these
469 // chunks to fulfill future requests. So, future requests will decrease
470 // the number of free bytes.
471 chunks[0] = Ident((char*)malloc(kChunkSize));
472 old_free_bytes = __asan_get_free_bytes();
473 for (i = 1; i < kNumOfChunks; i++) {
474 chunks[i] = Ident((char*)malloc(kChunkSize));
475 new_free_bytes = __asan_get_free_bytes();
476 EXPECT_LT(new_free_bytes, old_free_bytes);
477 old_free_bytes = new_free_bytes;
478 }
479 EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
480}
481
482static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
483static const size_t kManyThreadsIterations = 250;
Alexey Samsonov1a7741b2012-07-24 08:26:19 +0000484static const size_t kManyThreadsNumThreads = (__WORDSIZE == 32) ? 40 : 200;
Kostya Serebryanybff53362012-06-25 14:23:07 +0000485
486void *ManyThreadsWithStatsWorker(void *arg) {
Alexey Samsonov1a7741b2012-07-24 08:26:19 +0000487 (void)arg;
Kostya Serebryanybff53362012-06-25 14:23:07 +0000488 for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
489 for (size_t size_index = 0; size_index < 4; size_index++) {
490 free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
491 }
492 }
493 return 0;
494}
495
496TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
497 size_t before_test, after_test, i;
498 pthread_t threads[kManyThreadsNumThreads];
499 before_test = __asan_get_current_allocated_bytes();
500 for (i = 0; i < kManyThreadsNumThreads; i++) {
501 pthread_create(&threads[i], 0,
502 (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
503 }
504 for (i = 0; i < kManyThreadsNumThreads; i++) {
505 pthread_join(threads[i], 0);
506 }
507 after_test = __asan_get_current_allocated_bytes();
508 // ASan stats also reflect memory usage of internal ASan RTL structs,
509 // so we can't check for equality here.
510 EXPECT_LT(after_test, before_test + (1UL<<20));
511}
512
513TEST(AddressSanitizerInterface, ExitCode) {
514 int original_exit_code = __asan_set_error_exit_code(7);
515 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
516 EXPECT_EQ(7, __asan_set_error_exit_code(8));
517 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
518 EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
519 EXPECT_EXIT(DoDoubleFree(),
520 ::testing::ExitedWithCode(original_exit_code), "");
521}
522
523static void MyDeathCallback() {
524 fprintf(stderr, "MyDeathCallback\n");
525}
526
527TEST(AddressSanitizerInterface, DeathCallbackTest) {
528 __asan_set_death_callback(MyDeathCallback);
529 EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
530 __asan_set_death_callback(NULL);
531}
532
533static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
534
535#define GOOD_ACCESS(ptr, offset) \
536 EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
537
538#define BAD_ACCESS(ptr, offset) \
539 EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
540
541TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
542 char *array = Ident((char*)malloc(120));
543 // poison array[40..80)
544 __asan_poison_memory_region(array + 40, 40);
545 GOOD_ACCESS(array, 39);
546 GOOD_ACCESS(array, 80);
547 BAD_ACCESS(array, 40);
548 BAD_ACCESS(array, 60);
549 BAD_ACCESS(array, 79);
550 EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
551 kUseAfterPoisonErrorMessage);
552 __asan_unpoison_memory_region(array + 40, 40);
553 // access previously poisoned memory.
554 GOOD_ACCESS(array, 40);
555 GOOD_ACCESS(array, 79);
556 free(array);
557}
558
559TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
560 char *array = Ident((char*)malloc(120));
561 // Poison [0..40) and [80..120)
562 __asan_poison_memory_region(array, 40);
563 __asan_poison_memory_region(array + 80, 40);
564 BAD_ACCESS(array, 20);
565 GOOD_ACCESS(array, 60);
566 BAD_ACCESS(array, 100);
567 // Poison whole array - [0..120)
568 __asan_poison_memory_region(array, 120);
569 BAD_ACCESS(array, 60);
570 // Unpoison [24..96)
571 __asan_unpoison_memory_region(array + 24, 72);
572 BAD_ACCESS(array, 23);
573 GOOD_ACCESS(array, 24);
574 GOOD_ACCESS(array, 60);
575 GOOD_ACCESS(array, 95);
576 BAD_ACCESS(array, 96);
577 free(array);
578}
579
580TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
581 // Vector of capacity 20
582 char *vec = Ident((char*)malloc(20));
583 __asan_poison_memory_region(vec, 20);
584 for (size_t i = 0; i < 7; i++) {
585 // Simulate push_back.
586 __asan_unpoison_memory_region(vec + i, 1);
587 GOOD_ACCESS(vec, i);
588 BAD_ACCESS(vec, i + 1);
589 }
590 for (size_t i = 7; i > 0; i--) {
591 // Simulate pop_back.
592 __asan_poison_memory_region(vec + i - 1, 1);
593 BAD_ACCESS(vec, i - 1);
594 if (i > 1) GOOD_ACCESS(vec, i - 2);
595 }
596 free(vec);
597}
598
599// Make sure that each aligned block of size "2^granularity" doesn't have
600// "true" value before "false" value.
601static void MakeShadowValid(bool *shadow, int length, int granularity) {
602 bool can_be_poisoned = true;
603 for (int i = length - 1; i >= 0; i--) {
Alexey Samsonov63201b12012-07-23 09:11:58 +0000604 if (!shadow[i])
605 can_be_poisoned = false;
606 if (!can_be_poisoned)
607 shadow[i] = false;
Kostya Serebryanybff53362012-06-25 14:23:07 +0000608 if (i % (1 << granularity) == 0) {
609 can_be_poisoned = true;
610 }
611 }
612}
613
614TEST(AddressSanitizerInterface, PoisoningStressTest) {
615 const size_t kSize = 24;
616 bool expected[kSize];
617 char *arr = Ident((char*)malloc(kSize));
618 for (size_t l1 = 0; l1 < kSize; l1++) {
619 for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
620 for (size_t l2 = 0; l2 < kSize; l2++) {
621 for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
622 // Poison [l1, l1+s1), [l2, l2+s2) and check result.
623 __asan_unpoison_memory_region(arr, kSize);
624 __asan_poison_memory_region(arr + l1, s1);
625 __asan_poison_memory_region(arr + l2, s2);
626 memset(expected, false, kSize);
627 memset(expected + l1, true, s1);
Alexey Samsonov63201b12012-07-23 09:11:58 +0000628 MakeShadowValid(expected, kSize, /*granularity*/ 3);
Kostya Serebryanybff53362012-06-25 14:23:07 +0000629 memset(expected + l2, true, s2);
Alexey Samsonov63201b12012-07-23 09:11:58 +0000630 MakeShadowValid(expected, kSize, /*granularity*/ 3);
Kostya Serebryanybff53362012-06-25 14:23:07 +0000631 for (size_t i = 0; i < kSize; i++) {
632 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
633 }
634 // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
635 __asan_poison_memory_region(arr, kSize);
636 __asan_unpoison_memory_region(arr + l1, s1);
637 __asan_unpoison_memory_region(arr + l2, s2);
638 memset(expected, true, kSize);
639 memset(expected + l1, false, s1);
Alexey Samsonov63201b12012-07-23 09:11:58 +0000640 MakeShadowValid(expected, kSize, /*granularity*/ 3);
Kostya Serebryanybff53362012-06-25 14:23:07 +0000641 memset(expected + l2, false, s2);
Alexey Samsonov63201b12012-07-23 09:11:58 +0000642 MakeShadowValid(expected, kSize, /*granularity*/ 3);
Kostya Serebryanybff53362012-06-25 14:23:07 +0000643 for (size_t i = 0; i < kSize; i++) {
644 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
645 }
646 }
647 }
648 }
649 }
650}
651
652static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
653static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
654
655TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
656 char *array = Ident((char*)malloc(120));
657 __asan_unpoison_memory_region(array, 120);
658 // Try to unpoison not owned memory
659 EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
660 kInvalidUnpoisonMessage);
661 EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
662 kInvalidUnpoisonMessage);
663
664 __asan_poison_memory_region(array, 120);
665 // Try to poison not owned memory.
666 EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
667 EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
668 kInvalidPoisonMessage);
669 free(array);
670}
671
672static void ErrorReportCallbackOneToZ(const char *report) {
673 int len = strlen(report);
674 char *dup = (char*)malloc(len);
675 strcpy(dup, report);
676 for (int i = 0; i < len; i++) {
677 if (dup[i] == '1') dup[i] = 'Z';
678 }
Alexey Samsonov63201b12012-07-23 09:11:58 +0000679 int written = write(2, dup, len);
680 ASSERT_EQ(len, written);
Kostya Serebryanybff53362012-06-25 14:23:07 +0000681 free(dup);
682}
683
684TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
685 __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
686 EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1), "size Z");
687 __asan_set_error_report_callback(NULL);
688}
689
690TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
691 std::vector<char *> pointers;
692 std::vector<size_t> sizes;
693 const size_t kNumMallocs =
694 (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
695 for (size_t i = 0; i < kNumMallocs; i++) {
696 size_t size = i * 100 + 1;
697 pointers.push_back((char*)malloc(size));
698 sizes.push_back(size);
699 }
700 for (size_t i = 0; i < 4000000; i++) {
701 EXPECT_FALSE(__asan_get_ownership(&pointers));
702 EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
703 size_t idx = i % kNumMallocs;
704 EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
705 EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
706 }
707 for (size_t i = 0, n = pointers.size(); i < n; i++)
708 free(pointers[i]);
709}