blob: e55fc4f95a9a10ffa752c5eacb79c445ba98b5d6 [file] [log] [blame]
Stephen Hines2d1fdb22014-05-28 23:58:16 -07001//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Concurrent uptr->T hashmap.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ADDRHASHMAP_H
15#define SANITIZER_ADDRHASHMAP_H
16
17#include "sanitizer_common.h"
18#include "sanitizer_mutex.h"
19#include "sanitizer_atomic.h"
20#include "sanitizer_allocator_internal.h"
21
22namespace __sanitizer {
23
24// Concurrent uptr->T hashmap.
25// T must be a POD type, kSize is preferably a prime but can be any number.
26// Usage example:
27//
28// typedef AddrHashMap<uptr, 11> Map;
29// Map m;
30// {
31// Map::Handle h(&m, addr);
32// use h.operator->() to access the data
33// if h.created() then the element was just created, and the current thread
34// has exclusive access to it
35// otherwise the current thread has only read access to the data
36// }
37// {
38// Map::Handle h(&m, addr, true);
39// this will remove the data from the map in Handle dtor
40// the current thread has exclusive access to the data
41// if !h.exists() then the element never existed
42// }
43template<typename T, uptr kSize>
44class AddrHashMap {
45 private:
46 struct Cell {
47 atomic_uintptr_t addr;
48 T val;
49 };
50
51 struct AddBucket {
52 uptr cap;
53 uptr size;
54 Cell cells[1]; // variable len
55 };
56
57 static const uptr kBucketSize = 3;
58
59 struct Bucket {
60 RWMutex mtx;
61 atomic_uintptr_t add;
62 Cell cells[kBucketSize];
63 };
64
65 public:
66 AddrHashMap();
67
68 class Handle {
69 public:
70 Handle(AddrHashMap<T, kSize> *map, uptr addr);
71 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
72 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
73
74 ~Handle();
75 T *operator->();
76 bool created() const;
77 bool exists() const;
78
79 private:
80 friend AddrHashMap<T, kSize>;
81 AddrHashMap<T, kSize> *map_;
82 Bucket *bucket_;
83 Cell *cell_;
84 uptr addr_;
85 uptr addidx_;
86 bool created_;
87 bool remove_;
88 bool create_;
89 };
90
91 private:
92 friend class Handle;
93 Bucket *table_;
94
95 void acquire(Handle *h);
96 void release(Handle *h);
97 uptr calcHash(uptr addr);
98};
99
100template<typename T, uptr kSize>
101AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
102 map_ = map;
103 addr_ = addr;
104 remove_ = false;
105 create_ = true;
106 map_->acquire(this);
107}
108
109template<typename T, uptr kSize>
110AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
111 bool remove) {
112 map_ = map;
113 addr_ = addr;
114 remove_ = remove;
115 create_ = true;
116 map_->acquire(this);
117}
118
119template<typename T, uptr kSize>
120AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
121 bool remove, bool create) {
122 map_ = map;
123 addr_ = addr;
124 remove_ = remove;
125 create_ = create;
126 map_->acquire(this);
127}
128
129template<typename T, uptr kSize>
130AddrHashMap<T, kSize>::Handle::~Handle() {
131 map_->release(this);
132}
133
134template <typename T, uptr kSize>
135T *AddrHashMap<T, kSize>::Handle::operator->() {
136 return &cell_->val;
137}
138
139template<typename T, uptr kSize>
140bool AddrHashMap<T, kSize>::Handle::created() const {
141 return created_;
142}
143
144template<typename T, uptr kSize>
145bool AddrHashMap<T, kSize>::Handle::exists() const {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800146 return cell_ != nullptr;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700147}
148
149template<typename T, uptr kSize>
150AddrHashMap<T, kSize>::AddrHashMap() {
151 table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
152}
153
154template<typename T, uptr kSize>
155void AddrHashMap<T, kSize>::acquire(Handle *h) {
156 uptr addr = h->addr_;
157 uptr hash = calcHash(addr);
158 Bucket *b = &table_[hash];
159
160 h->created_ = false;
161 h->addidx_ = -1U;
162 h->bucket_ = b;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800163 h->cell_ = nullptr;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700164
165 // If we want to remove the element, we need exclusive access to the bucket,
166 // so skip the lock-free phase.
167 if (h->remove_)
168 goto locked;
169
170 retry:
171 // First try to find an existing element w/o read mutex.
172 CHECK(!h->remove_);
173 // Check the embed cells.
174 for (uptr i = 0; i < kBucketSize; i++) {
175 Cell *c = &b->cells[i];
176 uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
177 if (addr1 == addr) {
178 h->cell_ = c;
179 return;
180 }
181 }
182
183 // Check the add cells with read lock.
184 if (atomic_load(&b->add, memory_order_relaxed)) {
185 b->mtx.ReadLock();
186 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
187 for (uptr i = 0; i < add->size; i++) {
188 Cell *c = &add->cells[i];
189 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
190 if (addr1 == addr) {
191 h->addidx_ = i;
192 h->cell_ = c;
193 return;
194 }
195 }
196 b->mtx.ReadUnlock();
197 }
198
199 locked:
200 // Re-check existence under write lock.
201 // Embed cells.
202 b->mtx.Lock();
203 for (uptr i = 0; i < kBucketSize; i++) {
204 Cell *c = &b->cells[i];
205 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
206 if (addr1 == addr) {
207 if (h->remove_) {
208 h->cell_ = c;
209 return;
210 }
211 b->mtx.Unlock();
212 goto retry;
213 }
214 }
215
216 // Add cells.
217 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
218 if (add) {
219 for (uptr i = 0; i < add->size; i++) {
220 Cell *c = &add->cells[i];
221 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
222 if (addr1 == addr) {
223 if (h->remove_) {
224 h->addidx_ = i;
225 h->cell_ = c;
226 return;
227 }
228 b->mtx.Unlock();
229 goto retry;
230 }
231 }
232 }
233
234 // The element does not exist, no need to create it if we want to remove.
235 if (h->remove_ || !h->create_) {
236 b->mtx.Unlock();
237 return;
238 }
239
240 // Now try to create it under the mutex.
241 h->created_ = true;
242 // See if we have a free embed cell.
243 for (uptr i = 0; i < kBucketSize; i++) {
244 Cell *c = &b->cells[i];
245 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
246 if (addr1 == 0) {
247 h->cell_ = c;
248 return;
249 }
250 }
251
252 // Store in the add cells.
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800253 if (!add) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700254 // Allocate a new add array.
255 const uptr kInitSize = 64;
256 add = (AddBucket*)InternalAlloc(kInitSize);
257 internal_memset(add, 0, kInitSize);
258 add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
259 add->size = 0;
260 atomic_store(&b->add, (uptr)add, memory_order_relaxed);
261 }
262 if (add->size == add->cap) {
263 // Grow existing add array.
264 uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
265 uptr newsize = oldsize * 2;
266 AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
267 internal_memset(add1, 0, newsize);
268 add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
269 add1->size = add->size;
270 internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
271 InternalFree(add);
272 atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
273 add = add1;
274 }
275 // Store.
276 uptr i = add->size++;
277 Cell *c = &add->cells[i];
278 CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
279 h->addidx_ = i;
280 h->cell_ = c;
281}
282
283template<typename T, uptr kSize>
284void AddrHashMap<T, kSize>::release(Handle *h) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800285 if (!h->cell_)
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700286 return;
287 Bucket *b = h->bucket_;
288 Cell *c = h->cell_;
289 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
290 if (h->created_) {
291 // Denote completion of insertion.
292 CHECK_EQ(addr1, 0);
293 // After the following store, the element becomes available
294 // for lock-free reads.
295 atomic_store(&c->addr, h->addr_, memory_order_release);
296 b->mtx.Unlock();
297 } else if (h->remove_) {
298 // Denote that the cell is empty now.
299 CHECK_EQ(addr1, h->addr_);
300 atomic_store(&c->addr, 0, memory_order_release);
301 // See if we need to compact the bucket.
302 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
303 if (h->addidx_ == -1U) {
304 // Removed from embed array, move an add element into the freed cell.
305 if (add && add->size != 0) {
306 uptr last = --add->size;
307 Cell *c1 = &add->cells[last];
308 c->val = c1->val;
309 uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
310 atomic_store(&c->addr, addr1, memory_order_release);
311 atomic_store(&c1->addr, 0, memory_order_release);
312 }
313 } else {
314 // Removed from add array, compact it.
315 uptr last = --add->size;
316 Cell *c1 = &add->cells[last];
317 if (c != c1) {
318 *c = *c1;
319 atomic_store(&c1->addr, 0, memory_order_relaxed);
320 }
321 }
322 if (add && add->size == 0) {
323 // FIXME(dvyukov): free add?
324 }
325 b->mtx.Unlock();
326 } else {
327 CHECK_EQ(addr1, h->addr_);
328 if (h->addidx_ != -1U)
329 b->mtx.ReadUnlock();
330 }
331}
332
333template<typename T, uptr kSize>
334uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
335 addr += addr << 10;
336 addr ^= addr >> 6;
337 return addr % kSize;
338}
339
340} // namespace __sanitizer
341
342#endif // SANITIZER_ADDRHASHMAP_H