blob: b87eb0d3ebe353a62795208fdc059f330983dbcb [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_mman.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +000013#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000014#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_mman.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000016#include "tsan_rtl.h"
17#include "tsan_report.h"
18#include "tsan_flags.h"
19
20namespace __tsan {
21
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000022extern char allocator_placeholder[];
23INLINE Allocator *allocator() {
24 return reinterpret_cast<Allocator*>(&allocator_placeholder);
25}
26
27void InitializeAllocator() {
28 allocator()->Init();
29}
30
31void AlloctorThreadFinish(ThreadState *thr) {
32 allocator()->SwallowCache(&thr->alloc_cache);
33}
34
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000035static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
36 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
37 return;
38 StackTrace stack;
39 stack.ObtainCurrent(thr, pc);
40 ScopedReport rep(ReportTypeSignalUnsafe);
41 rep.AddStack(&stack);
Dmitry Vyukov665ce2a2012-05-14 15:28:03 +000042 OutputReport(rep, rep.GetReport()->stacks[0]);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000043}
44
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000045void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000046 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000047 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
48 if (p == 0)
Dmitry Vyukovf64046c2012-05-18 09:41:52 +000049 return 0;
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000050 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000051 b->size = sz;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000052 if (CTX() && CTX()->initialized) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000053 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000054 }
Alexey Samsonov51ae9832012-06-06 13:11:29 +000055 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000056 SignalUnsafeCall(thr, pc);
57 return p;
58}
59
60void user_free(ThreadState *thr, uptr pc, void *p) {
61 CHECK_GT(thr->in_rtl, 0);
62 CHECK_NE(p, (void*)0);
63 DPrintf("#%d: free(%p)\n", thr->tid, p);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000064 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000065 if (b->head) {
66 Lock l(&b->mtx);
67 for (SyncVar *s = b->head; s;) {
68 SyncVar *res = s;
69 s = s->next;
70 StatInc(thr, StatSyncDestroyed);
71 res->mtx.Lock();
72 res->mtx.Unlock();
73 DestroyAndFree(res);
74 }
75 b->head = 0;
76 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000077 if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
78 MemoryRangeFreed(thr, pc, (uptr)p, b->size);
79 }
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +000080 allocator()->Deallocate(&thr->alloc_cache, p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000081 SignalUnsafeCall(thr, pc);
82}
83
84void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
85 CHECK_GT(thr->in_rtl, 0);
86 void *p2 = 0;
87 // FIXME: Handle "shrinking" more efficiently,
88 // it seems that some software actually does this.
89 if (sz) {
90 p2 = user_alloc(thr, pc, sz);
Dmitry Vyukov9d2229f2012-05-21 06:46:27 +000091 if (p2 == 0)
92 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000093 if (p) {
94 MBlock *b = user_mblock(thr, p);
95 internal_memcpy(p2, p, min(b->size, sz));
96 }
97 }
98 if (p) {
99 user_free(thr, pc, p);
100 }
101 return p2;
102}
103
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000104MBlock *user_mblock(ThreadState *thr, void *p) {
105 CHECK_GT(thr->in_rtl, 0);
106 CHECK_NE(p, (void*)0);
Dmitry Vyukov954fc8c2012-08-15 15:35:15 +0000107 return (MBlock*)allocator()->GetMetaData(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000108}
109
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000110void *internal_alloc(MBlockType typ, uptr sz) {
111 ThreadState *thr = cur_thread();
112 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000113 if (thr->nomalloc) {
114 thr->nomalloc = 0; // CHECK calls internal_malloc().
115 CHECK(0);
116 }
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +0000117 return InternalAlloc(sz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000118}
119
120void internal_free(void *p) {
121 ThreadState *thr = cur_thread();
122 CHECK_GT(thr->in_rtl, 0);
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000123 if (thr->nomalloc) {
124 thr->nomalloc = 0; // CHECK calls internal_malloc().
125 CHECK(0);
126 }
Alexey Samsonov91e1a7e2012-06-07 11:54:08 +0000127 InternalFree(p);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000128}
129
130} // namespace __tsan