blob: 1e1022bcdd9186d88b8717f4b065db74de1071d9 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_mman.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +000013#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000014#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_mman.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000016#include "tsan_rtl.h"
17#include "tsan_report.h"
18#include "tsan_flags.h"
19
20namespace __tsan {
21
Dmitry Vyukov191f2f72012-08-30 13:02:30 +000022static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
23Allocator *allocator() {
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000024 return reinterpret_cast<Allocator*>(&allocator_placeholder);
25}
26
27void InitializeAllocator() {
28 allocator()->Init();
29}
30
31void AlloctorThreadFinish(ThreadState *thr) {
32 allocator()->SwallowCache(&thr->alloc_cache);
33}
34
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000035static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
36 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
37 return;
38 StackTrace stack;
39 stack.ObtainCurrent(thr, pc);
40 ScopedReport rep(ReportTypeSignalUnsafe);
41 rep.AddStack(&stack);
Dmitry Vyukov665ce2a2012-05-14 15:28:03 +000042 OutputReport(rep, rep.GetReport()->stacks[0]);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000043}
44
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000045void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000046 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000047 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
48 if (p == 0)
Dmitry Vyukovf64046c2012-05-18 09:41:52 +000049 return 0;
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000050 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000051 b->size = sz;
Dmitry Vyukov191f2f72012-08-30 13:02:30 +000052 b->alloc_tid = thr->unique_id;
Dmitry Vyukov318f7772012-08-31 17:27:49 +000053 b->alloc_stack_id = CurrentStackId(thr, pc);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000054 if (CTX() && CTX()->initialized) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000055 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000056 }
Alexey Samsonov51ae9832012-06-06 13:11:29 +000057 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000058 SignalUnsafeCall(thr, pc);
59 return p;
60}
61
62void user_free(ThreadState *thr, uptr pc, void *p) {
63 CHECK_GT(thr->in_rtl, 0);
64 CHECK_NE(p, (void*)0);
65 DPrintf("#%d: free(%p)\n", thr->tid, p);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000066 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000067 if (b->head) {
68 Lock l(&b->mtx);
69 for (SyncVar *s = b->head; s;) {
70 SyncVar *res = s;
71 s = s->next;
72 StatInc(thr, StatSyncDestroyed);
73 res->mtx.Lock();
74 res->mtx.Unlock();
75 DestroyAndFree(res);
76 }
77 b->head = 0;
78 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000079 if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
80 MemoryRangeFreed(thr, pc, (uptr)p, b->size);
81 }
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000082 allocator()->Deallocate(&thr->alloc_cache, p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000083 SignalUnsafeCall(thr, pc);
84}
85
86void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
87 CHECK_GT(thr->in_rtl, 0);
88 void *p2 = 0;
89 // FIXME: Handle "shrinking" more efficiently,
90 // it seems that some software actually does this.
91 if (sz) {
92 p2 = user_alloc(thr, pc, sz);
Dmitry Vyukov9d2229f2012-05-21 06:46:27 +000093 if (p2 == 0)
94 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000095 if (p) {
96 MBlock *b = user_mblock(thr, p);
97 internal_memcpy(p2, p, min(b->size, sz));
98 }
99 }
100 if (p) {
101 user_free(thr, pc, p);
102 }
103 return p2;
104}
105
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000106MBlock *user_mblock(ThreadState *thr, void *p) {
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000107 // CHECK_GT(thr->in_rtl, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000108 CHECK_NE(p, (void*)0);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +0000109 return (MBlock*)allocator()->GetMetaData(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000110}
111
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000112void *internal_alloc(MBlockType typ, uptr sz) {
113 ThreadState *thr = cur_thread();
114 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000115 if (thr->nomalloc) {
116 thr->nomalloc = 0; // CHECK calls internal_malloc().
117 CHECK(0);
118 }
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +0000119 return InternalAlloc(sz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000120}
121
122void internal_free(void *p) {
123 ThreadState *thr = cur_thread();
124 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000125 if (thr->nomalloc) {
126 thr->nomalloc = 0; // CHECK calls internal_malloc().
127 CHECK(0);
128 }
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +0000129 InternalFree(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000130}
131
132} // namespace __tsan