Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 1 | //===-- sanitizer_stackdepot.cc -------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is shared between AddressSanitizer and ThreadSanitizer |
| 11 | // run-time libraries. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "sanitizer_stackdepot.h" |
| 15 | #include "sanitizer_common.h" |
| 16 | #include "sanitizer_mutex.h" |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 17 | #include "sanitizer_atomic.h" |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 18 | |
| 19 | namespace __sanitizer { |
| 20 | |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 21 | const int kTabSize = 1024 * 1024; // Hash table size. |
| 22 | const int kPartBits = 10; |
| 23 | const int kPartShift = sizeof(u32) * 8 - kPartBits; |
| 24 | const int kPartCount = 1 << kPartBits; // Number of subparts in the table. |
| 25 | const int kPartSize = kTabSize / kPartCount; |
| 26 | const int kMaxId = 1 << kPartShift; |
| 27 | |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 28 | struct StackDesc { |
| 29 | StackDesc *link; |
| 30 | u32 id; |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 31 | u32 hash; |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 32 | uptr size; |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 33 | uptr stack[1]; // [size] |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 34 | }; |
| 35 | |
| 36 | static struct { |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 37 | StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator. |
| 38 | atomic_uintptr_t region_pos; // Region allocator for StackDesc's. |
| 39 | atomic_uintptr_t region_end; |
| 40 | atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's. |
| 41 | atomic_uint32_t seq[kPartCount]; // Unique id generators. |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 42 | } depot; |
| 43 | |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 44 | static u32 hash(const uptr *stack, uptr size) { |
| 45 | // murmur2 |
| 46 | const u32 m = 0x5bd1e995; |
| 47 | const u32 seed = 0x9747b28c; |
| 48 | const u32 r = 24; |
| 49 | u32 h = seed ^ (size * sizeof(uptr)); |
| 50 | for (uptr i = 0; i < size; i++) { |
| 51 | u32 k = stack[i]; |
| 52 | k *= m; |
| 53 | k ^= k >> r; |
| 54 | k *= m; |
| 55 | h *= m; |
| 56 | h ^= k; |
| 57 | } |
| 58 | h ^= h >> 13; |
| 59 | h *= m; |
| 60 | h ^= h >> 15; |
| 61 | return h; |
| 62 | } |
| 63 | |
| 64 | static StackDesc *tryallocDesc(uptr memsz) { |
| 65 | // Optimisic lock-free allocation, essentially try to bump the region ptr. |
| 66 | for (;;) { |
| 67 | uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire); |
| 68 | uptr end = atomic_load(&depot.region_end, memory_order_acquire); |
| 69 | if (cmp == 0 || cmp + memsz > end) |
| 70 | return 0; |
| 71 | if (atomic_compare_exchange_weak( |
| 72 | &depot.region_pos, &cmp, cmp + memsz, |
| 73 | memory_order_acquire)) |
| 74 | return (StackDesc*)cmp; |
| 75 | } |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | static StackDesc *allocDesc(uptr size) { |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 79 | // Frist, try to allocate optimisitically. |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 80 | uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr); |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 81 | StackDesc *s = tryallocDesc(memsz); |
| 82 | if (s) |
| 83 | return s; |
| 84 | // If failed, lock, retry and alloc new superblock. |
| 85 | SpinMutexLock l(&depot.mtx); |
| 86 | for (;;) { |
| 87 | s = tryallocDesc(memsz); |
| 88 | if (s) |
| 89 | return s; |
| 90 | atomic_store(&depot.region_pos, 0, memory_order_relaxed); |
| 91 | uptr allocsz = 64 * 1024; |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 92 | if (allocsz < memsz) |
| 93 | allocsz = memsz; |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 94 | uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); |
| 95 | atomic_store(&depot.region_end, mem + allocsz, memory_order_release); |
| 96 | atomic_store(&depot.region_pos, mem, memory_order_release); |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 97 | } |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) { |
| 101 | // Searches linked list s for the stack, returns its id. |
| 102 | for (; s; s = s->link) { |
| 103 | if (s->hash == hash && s->size == size) { |
| 104 | uptr i = 0; |
| 105 | for (; i < size; i++) { |
| 106 | if (stack[i] != s->stack[i]) |
| 107 | break; |
| 108 | } |
| 109 | if (i == size) |
| 110 | return s->id; |
| 111 | } |
| 112 | } |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | static StackDesc *lock(atomic_uintptr_t *p) { |
| 117 | // Uses the pointer lsb as mutex. |
| 118 | for (int i = 0;; i++) { |
| 119 | uptr cmp = atomic_load(p, memory_order_relaxed); |
| 120 | if ((cmp & 1) == 0 |
| 121 | && atomic_compare_exchange_weak(p, &cmp, cmp | 1, |
| 122 | memory_order_acquire)) |
| 123 | return (StackDesc*)cmp; |
| 124 | if (i < 10) |
| 125 | proc_yield(10); |
| 126 | else |
| 127 | internal_sched_yield(); |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | static void unlock(atomic_uintptr_t *p, StackDesc *s) { |
| 132 | DCHECK_EQ((uptr)s & 1, 0); |
| 133 | atomic_store(p, (uptr)s, memory_order_release); |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 134 | } |
| 135 | |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 136 | u32 StackDepotPut(const uptr *stack, uptr size) { |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 137 | if (stack == 0 || size == 0) |
| 138 | return 0; |
| 139 | uptr h = hash(stack, size); |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 140 | atomic_uintptr_t *p = &depot.tab[h % kTabSize]; |
| 141 | uptr v = atomic_load(p, memory_order_consume); |
| 142 | StackDesc *s = (StackDesc*)(v & ~1); |
| 143 | // First, try to find the existing stack. |
| 144 | u32 id = find(s, stack, size, h); |
| 145 | if (id) |
| 146 | return id; |
| 147 | // If failed, lock, retry and insert new. |
| 148 | StackDesc *s2 = lock(p); |
| 149 | if (s2 != s) { |
| 150 | id = find(s2, stack, size, h); |
| 151 | if (id) { |
| 152 | unlock(p, s2); |
| 153 | return id; |
| 154 | } |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 155 | } |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 156 | uptr part = (h % kTabSize) / kPartSize; |
| 157 | id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1; |
| 158 | CHECK_LT(id, kMaxId); |
| 159 | id |= part << kPartShift; |
| 160 | s = allocDesc(size); |
| 161 | s->id = id; |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 162 | s->hash = h; |
| 163 | s->size = size; |
| 164 | internal_memcpy(s->stack, stack, size * sizeof(uptr)); |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 165 | s->link = s2; |
| 166 | unlock(p, s); |
| 167 | return id; |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 168 | } |
| 169 | |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 170 | const uptr *StackDepotGet(u32 id, uptr *size) { |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 171 | if (id == 0) |
| 172 | return 0; |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 173 | // High kPartBits contain part id, so we need to scan at most kPartSize lists. |
| 174 | uptr part = id >> kPartShift; |
| 175 | for (int i = 0; i != kPartSize; i++) { |
| 176 | uptr idx = part * kPartSize + i; |
| 177 | CHECK_LT(idx, kTabSize); |
| 178 | atomic_uintptr_t *p = &depot.tab[idx]; |
| 179 | uptr v = atomic_load(p, memory_order_consume); |
| 180 | StackDesc *s = (StackDesc*)(v & ~1); |
| 181 | for (; s; s = s->link) { |
| 182 | if (s->id == id) { |
| 183 | *size = s->size; |
| 184 | return s->stack; |
| 185 | } |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 186 | } |
| 187 | } |
Dmitry Vyukov | ef8d194 | 2012-08-31 12:47:44 +0000 | [diff] [blame] | 188 | *size = 0; |
Dmitry Vyukov | f479287 | 2012-08-30 10:02:48 +0000 | [diff] [blame] | 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | } // namespace __sanitizer |