blob: ba8972f4c581ce700b95367bac75e930673159a4 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_mman.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +000013#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000014#include "sanitizer_common/sanitizer_placement_new.h"
Dmitry Vyukov191f2f72012-08-30 13:02:30 +000015#include "sanitizer_common/sanitizer_stackdepot.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000016#include "tsan_mman.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000017#include "tsan_rtl.h"
18#include "tsan_report.h"
19#include "tsan_flags.h"
20
21namespace __tsan {
22
Dmitry Vyukov191f2f72012-08-30 13:02:30 +000023static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
24Allocator *allocator() {
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000025 return reinterpret_cast<Allocator*>(&allocator_placeholder);
26}
27
28void InitializeAllocator() {
29 allocator()->Init();
30}
31
32void AlloctorThreadFinish(ThreadState *thr) {
33 allocator()->SwallowCache(&thr->alloc_cache);
34}
35
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000036static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
37 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
38 return;
39 StackTrace stack;
40 stack.ObtainCurrent(thr, pc);
41 ScopedReport rep(ReportTypeSignalUnsafe);
42 rep.AddStack(&stack);
Dmitry Vyukov665ce2a2012-05-14 15:28:03 +000043 OutputReport(rep, rep.GetReport()->stacks[0]);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000044}
45
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000046void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000047 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000048 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
49 if (p == 0)
Dmitry Vyukovf64046c2012-05-18 09:41:52 +000050 return 0;
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000051 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000052 b->size = sz;
Dmitry Vyukov191f2f72012-08-30 13:02:30 +000053 b->alloc_tid = thr->unique_id;
54 b->alloc_stack_id = 0;
55 if (thr->shadow_stack_pos) // May happen during bootstrap.
56 b->alloc_stack_id = StackDepotPut(thr->shadow_stack,
57 thr->shadow_stack_pos - thr->shadow_stack);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000058 if (CTX() && CTX()->initialized) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000059 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000060 }
Alexey Samsonov51ae9832012-06-06 13:11:29 +000061 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000062 SignalUnsafeCall(thr, pc);
63 return p;
64}
65
66void user_free(ThreadState *thr, uptr pc, void *p) {
67 CHECK_GT(thr->in_rtl, 0);
68 CHECK_NE(p, (void*)0);
69 DPrintf("#%d: free(%p)\n", thr->tid, p);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000070 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000071 if (b->head) {
72 Lock l(&b->mtx);
73 for (SyncVar *s = b->head; s;) {
74 SyncVar *res = s;
75 s = s->next;
76 StatInc(thr, StatSyncDestroyed);
77 res->mtx.Lock();
78 res->mtx.Unlock();
79 DestroyAndFree(res);
80 }
81 b->head = 0;
82 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000083 if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
84 MemoryRangeFreed(thr, pc, (uptr)p, b->size);
85 }
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000086 allocator()->Deallocate(&thr->alloc_cache, p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000087 SignalUnsafeCall(thr, pc);
88}
89
90void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
91 CHECK_GT(thr->in_rtl, 0);
92 void *p2 = 0;
93 // FIXME: Handle "shrinking" more efficiently,
94 // it seems that some software actually does this.
95 if (sz) {
96 p2 = user_alloc(thr, pc, sz);
Dmitry Vyukov9d2229f2012-05-21 06:46:27 +000097 if (p2 == 0)
98 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000099 if (p) {
100 MBlock *b = user_mblock(thr, p);
101 internal_memcpy(p2, p, min(b->size, sz));
102 }
103 }
104 if (p) {
105 user_free(thr, pc, p);
106 }
107 return p2;
108}
109
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000110MBlock *user_mblock(ThreadState *thr, void *p) {
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000111 // CHECK_GT(thr->in_rtl, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000112 CHECK_NE(p, (void*)0);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +0000113 return (MBlock*)allocator()->GetMetaData(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000114}
115
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000116void *internal_alloc(MBlockType typ, uptr sz) {
117 ThreadState *thr = cur_thread();
118 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000119 if (thr->nomalloc) {
120 thr->nomalloc = 0; // CHECK calls internal_malloc().
121 CHECK(0);
122 }
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +0000123 return InternalAlloc(sz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000124}
125
126void internal_free(void *p) {
127 ThreadState *thr = cur_thread();
128 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000129 if (thr->nomalloc) {
130 thr->nomalloc = 0; // CHECK calls internal_malloc().
131 CHECK(0);
132 }
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +0000133 InternalFree(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000134}
135
136} // namespace __tsan