blob: 59b53f4dcd8421f15e3e7be99d65f14e4766f0a2 [file] [log] [blame]
Dmitry Vyukov1b370172012-08-30 10:02:48 +00001//===-- sanitizer_stackdepot.cc -------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_stackdepot.h"
Stephen Hines2d1fdb22014-05-28 23:58:16 -070015
Dmitry Vyukov1b370172012-08-30 10:02:48 +000016#include "sanitizer_common.h"
Stephen Hines2d1fdb22014-05-28 23:58:16 -070017#include "sanitizer_stackdepotbase.h"
Dmitry Vyukov1b370172012-08-30 10:02:48 +000018
19namespace __sanitizer {
20
Stephen Hines2d1fdb22014-05-28 23:58:16 -070021struct StackDepotNode {
22 StackDepotNode *link;
23 u32 id;
24 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
Stephen Hines86277eb2015-03-23 12:06:32 -070025 u32 size;
26 u32 tag;
Stephen Hines2d1fdb22014-05-28 23:58:16 -070027 uptr stack[1]; // [size]
Dmitry Vyukov1b370172012-08-30 10:02:48 +000028
Stephen Hines6a211c52014-07-21 00:49:56 -070029 static const u32 kTabSizeLog = 20;
30 // Lower kTabSizeLog bits are equal for all items in one bucket.
31 // We use these bits to store the per-stack use counter.
32 static const u32 kUseCountBits = kTabSizeLog;
Stephen Hines2d1fdb22014-05-28 23:58:16 -070033 static const u32 kMaxUseCount = 1 << kUseCountBits;
34 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35 static const u32 kHashMask = ~kUseCountMask;
36
Stephen Hines6d186232014-11-26 17:56:19 -080037 typedef StackTrace args_type;
Stephen Hines2d1fdb22014-05-28 23:58:16 -070038 bool eq(u32 hash, const args_type &args) const {
39 u32 hash_bits =
40 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
Stephen Hines86277eb2015-03-23 12:06:32 -070041 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
42 return false;
Stephen Hines2d1fdb22014-05-28 23:58:16 -070043 uptr i = 0;
44 for (; i < size; i++) {
Stephen Hines6d186232014-11-26 17:56:19 -080045 if (stack[i] != args.trace[i]) return false;
Stephen Hines2d1fdb22014-05-28 23:58:16 -070046 }
47 return true;
48 }
49 static uptr storage_size(const args_type &args) {
50 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
51 }
Stephen Hines6d186232014-11-26 17:56:19 -080052 static u32 hash(const args_type &args) {
53 // murmur2
54 const u32 m = 0x5bd1e995;
55 const u32 seed = 0x9747b28c;
56 const u32 r = 24;
57 u32 h = seed ^ (args.size * sizeof(uptr));
58 for (uptr i = 0; i < args.size; i++) {
59 u32 k = args.trace[i];
60 k *= m;
61 k ^= k >> r;
62 k *= m;
63 h *= m;
64 h ^= k;
65 }
66 h ^= h >> 13;
67 h *= m;
68 h ^= h >> 15;
69 return h;
70 }
71 static bool is_valid(const args_type &args) {
72 return args.size > 0 && args.trace;
73 }
Stephen Hines2d1fdb22014-05-28 23:58:16 -070074 void store(const args_type &args, u32 hash) {
75 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
76 size = args.size;
Stephen Hines86277eb2015-03-23 12:06:32 -070077 tag = args.tag;
Stephen Hines6d186232014-11-26 17:56:19 -080078 internal_memcpy(stack, args.trace, size * sizeof(uptr));
Stephen Hines2d1fdb22014-05-28 23:58:16 -070079 }
80 args_type load() const {
Stephen Hines86277eb2015-03-23 12:06:32 -070081 return args_type(&stack[0], size, tag);
Stephen Hines2d1fdb22014-05-28 23:58:16 -070082 }
83 StackDepotHandle get_handle() { return StackDepotHandle(this); }
84
85 typedef StackDepotHandle handle_type;
86};
87
88COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
89
90u32 StackDepotHandle::id() { return node_->id; }
91int StackDepotHandle::use_count() {
92 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
93 StackDepotNode::kUseCountMask;
94}
95void StackDepotHandle::inc_use_count_unsafe() {
96 u32 prev =
97 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
98 StackDepotNode::kUseCountMask;
99 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
100}
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700101
102// FIXME(dvyukov): this single reserved bit is used in TSan.
Stephen Hines6a211c52014-07-21 00:49:56 -0700103typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
104 StackDepot;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700105static StackDepot theDepot;
Kostya Serebryany9e3bd382012-12-26 06:30:02 +0000106
107StackDepotStats *StackDepotGetStats() {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700108 return theDepot.GetStats();
Dmitry Vyukov1b370172012-08-30 10:02:48 +0000109}
110
Stephen Hines6d186232014-11-26 17:56:19 -0800111u32 StackDepotPut(StackTrace stack) {
112 StackDepotHandle h = theDepot.Put(stack);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700113 return h.valid() ? h.id() : 0;
114}
115
Stephen Hines6d186232014-11-26 17:56:19 -0800116StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
117 return theDepot.Put(stack);
Dmitry Vyukov1b370172012-08-30 10:02:48 +0000118}
119
Stephen Hines6d186232014-11-26 17:56:19 -0800120StackTrace StackDepotGet(u32 id) {
121 return theDepot.Get(id);
122}
123
124void StackDepotLockAll() {
125 theDepot.LockAll();
126}
127
128void StackDepotUnlockAll() {
129 theDepot.UnlockAll();
Dmitry Vyukov1b370172012-08-30 10:02:48 +0000130}
131
Sergey Matveev384a4482013-08-26 13:24:43 +0000132bool StackDepotReverseMap::IdDescPair::IdComparator(
133 const StackDepotReverseMap::IdDescPair &a,
134 const StackDepotReverseMap::IdDescPair &b) {
135 return a.id < b.id;
136}
137
138StackDepotReverseMap::StackDepotReverseMap()
139 : map_(StackDepotGetStats()->n_uniq_ids + 100) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700140 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
141 atomic_uintptr_t *p = &theDepot.tab[idx];
Sergey Matveev384a4482013-08-26 13:24:43 +0000142 uptr v = atomic_load(p, memory_order_consume);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700143 StackDepotNode *s = (StackDepotNode*)(v & ~1);
Sergey Matveev384a4482013-08-26 13:24:43 +0000144 for (; s; s = s->link) {
145 IdDescPair pair = {s->id, s};
146 map_.push_back(pair);
147 }
148 }
149 InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
150}
151
Stephen Hines6d186232014-11-26 17:56:19 -0800152StackTrace StackDepotReverseMap::Get(u32 id) {
153 if (!map_.size())
154 return StackTrace();
Sergey Matveev384a4482013-08-26 13:24:43 +0000155 IdDescPair pair = {id, 0};
156 uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
157 IdDescPair::IdComparator);
Stephen Hines6d186232014-11-26 17:56:19 -0800158 if (idx > map_.size())
159 return StackTrace();
160 return map_[idx].desc->load();
Sergey Matveev384a4482013-08-26 13:24:43 +0000161}
162
Dmitry Vyukov1b370172012-08-30 10:02:48 +0000163} // namespace __sanitizer