blob: b6027303674df16602fb2cfadede8b02c3f95a75 [file] [log] [blame]
Jason Evans0657f122011-03-18 17:56:14 -07001#define JEMALLOC_RTREE_C_
Jason Evans2dbecf12010-09-05 10:35:13 -07002#include "jemalloc/internal/jemalloc_internal.h"
3
Jason Evans8d0e04d2015-01-30 22:54:08 -08004static unsigned
5hmin(unsigned ha, unsigned hb)
Jason Evans2dbecf12010-09-05 10:35:13 -07006{
Jason Evans8d0e04d2015-01-30 22:54:08 -08007
8 return (ha < hb ? ha : hb);
9}
10
Jason Evans2d2b4e92016-03-28 03:06:35 -070011/*
12 * Only the most significant bits of keys passed to rtree_{read,write}() are
13 * used.
14 */
Jason Evans8d0e04d2015-01-30 22:54:08 -080015bool
Jason Evans8c9be3e2016-04-16 00:36:11 -070016rtree_new(rtree_t *rtree, unsigned bits)
Jason Evans8d0e04d2015-01-30 22:54:08 -080017{
18 unsigned bits_in_leaf, height, i;
Jason Evans2dbecf12010-09-05 10:35:13 -070019
Jason Evans6c460ad2016-03-22 17:54:35 -070020 assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
21 RTREE_BITS_PER_LEVEL));
Jason Evansb980cc72014-01-02 16:08:28 -080022 assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
23
Jason Evans8d0e04d2015-01-30 22:54:08 -080024 bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
25 : (bits % RTREE_BITS_PER_LEVEL);
Jason Evansb954bc52014-01-02 17:36:38 -080026 if (bits > bits_in_leaf) {
Jason Evans8d0e04d2015-01-30 22:54:08 -080027 height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
28 if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
Jason Evansb954bc52014-01-02 17:36:38 -080029 height++;
Jason Evansb954bc52014-01-02 17:36:38 -080030 } else
Jason Evans8d0e04d2015-01-30 22:54:08 -080031 height = 1;
32 assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
Jason Evans2dbecf12010-09-05 10:35:13 -070033
Jason Evans8d0e04d2015-01-30 22:54:08 -080034 rtree->height = height;
35
36 /* Root level. */
37 rtree->levels[0].subtree = NULL;
38 rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
39 bits_in_leaf;
40 rtree->levels[0].cumbits = rtree->levels[0].bits;
41 /* Interior levels. */
42 for (i = 1; i < height-1; i++) {
43 rtree->levels[i].subtree = NULL;
44 rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
45 rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
46 RTREE_BITS_PER_LEVEL;
Jason Evans2dbecf12010-09-05 10:35:13 -070047 }
Jason Evans8d0e04d2015-01-30 22:54:08 -080048 /* Leaf level. */
49 if (height > 1) {
50 rtree->levels[height-1].subtree = NULL;
51 rtree->levels[height-1].bits = bits_in_leaf;
52 rtree->levels[height-1].cumbits = bits;
53 }
Jason Evans2dbecf12010-09-05 10:35:13 -070054
Jason Evans8d0e04d2015-01-30 22:54:08 -080055 /* Compute lookup table to be used by rtree_start_level(). */
56 for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
57 rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
58 1);
59 }
60
61 return (false);
Jason Evans2dbecf12010-09-05 10:35:13 -070062}
Jason Evans20f1fc92012-10-09 14:46:22 -070063
Jason Evans8c9be3e2016-04-16 00:36:11 -070064#ifdef JEMALLOC_JET
65#undef rtree_node_alloc
66#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc_impl)
67#endif
68static rtree_elm_t *
69rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
70{
71
72 return ((rtree_elm_t *)base_alloc(tsdn, nelms * sizeof(rtree_elm_t)));
73}
74#ifdef JEMALLOC_JET
75#undef rtree_node_alloc
76#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
77rtree_node_alloc_t *rtree_node_alloc = JEMALLOC_N(rtree_node_alloc_impl);
78#endif
79
80#ifdef JEMALLOC_JET
81#undef rtree_node_dalloc
82#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc_impl)
83#endif
84UNUSED static void
85rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
86{
87
88 /* Nodes are never deleted during normal operation. */
89 not_reached();
90}
91#ifdef JEMALLOC_JET
92#undef rtree_node_dalloc
93#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
94rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl);
95#endif
96
97#ifdef JEMALLOC_JET
Jason Evansb980cc72014-01-02 16:08:28 -080098static void
Jason Evans8c9be3e2016-04-16 00:36:11 -070099rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node,
100 unsigned level)
Jason Evansb980cc72014-01-02 16:08:28 -0800101{
102
Jason Evansfbd8d772015-03-11 23:14:50 -0700103 if (level + 1 < rtree->height) {
Jason Evansb980cc72014-01-02 16:08:28 -0800104 size_t nchildren, i;
105
Jason Evans8d0e04d2015-01-30 22:54:08 -0800106 nchildren = ZU(1) << rtree->levels[level].bits;
Jason Evansb980cc72014-01-02 16:08:28 -0800107 for (i = 0; i < nchildren; i++) {
Jason Evans2d2b4e92016-03-28 03:06:35 -0700108 rtree_elm_t *child = node[i].child;
Jason Evans8c9be3e2016-04-16 00:36:11 -0700109 if (child != NULL) {
110 rtree_delete_subtree(tsdn, rtree, child, level +
111 1);
112 }
Jason Evansb980cc72014-01-02 16:08:28 -0800113 }
114 }
Jason Evans8c9be3e2016-04-16 00:36:11 -0700115 rtree_node_dalloc(tsdn, rtree, node);
Jason Evansb980cc72014-01-02 16:08:28 -0800116}
117
118void
Jason Evans8c9be3e2016-04-16 00:36:11 -0700119rtree_delete(tsdn_t *tsdn, rtree_t *rtree)
Jason Evansb980cc72014-01-02 16:08:28 -0800120{
Jason Evans8d0e04d2015-01-30 22:54:08 -0800121 unsigned i;
Jason Evansb980cc72014-01-02 16:08:28 -0800122
Jason Evans8d0e04d2015-01-30 22:54:08 -0800123 for (i = 0; i < rtree->height; i++) {
Jason Evans2d2b4e92016-03-28 03:06:35 -0700124 rtree_elm_t *subtree = rtree->levels[i].subtree;
Jason Evans8d0e04d2015-01-30 22:54:08 -0800125 if (subtree != NULL)
Jason Evans8c9be3e2016-04-16 00:36:11 -0700126 rtree_delete_subtree(tsdn, rtree, subtree, i);
Jason Evans8d0e04d2015-01-30 22:54:08 -0800127 }
Jason Evansb980cc72014-01-02 16:08:28 -0800128}
Jason Evans8c9be3e2016-04-16 00:36:11 -0700129#endif
Jason Evansb980cc72014-01-02 16:08:28 -0800130
Jason Evans2d2b4e92016-03-28 03:06:35 -0700131static rtree_elm_t *
Jason Evans8c9be3e2016-04-16 00:36:11 -0700132rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
133 rtree_elm_t **elmp)
Jason Evans20f1fc92012-10-09 14:46:22 -0700134{
Jason Evans2d2b4e92016-03-28 03:06:35 -0700135 rtree_elm_t *node;
Jason Evans20f1fc92012-10-09 14:46:22 -0700136
Jason Evans8d0e04d2015-01-30 22:54:08 -0800137 if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
138 /*
139 * Another thread is already in the process of initializing.
140 * Spin-wait until initialization is complete.
141 */
142 do {
143 CPU_SPINWAIT;
144 node = atomic_read_p((void **)elmp);
145 } while (node == RTREE_NODE_INITIALIZING);
146 } else {
Jason Evans8c9be3e2016-04-16 00:36:11 -0700147 node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
148 rtree->levels[level].bits);
Jason Evans8d0e04d2015-01-30 22:54:08 -0800149 if (node == NULL)
150 return (NULL);
151 atomic_write_p((void **)elmp, node);
152 }
153
154 return (node);
Jason Evans20f1fc92012-10-09 14:46:22 -0700155}
156
Jason Evans2d2b4e92016-03-28 03:06:35 -0700157rtree_elm_t *
Jason Evans8c9be3e2016-04-16 00:36:11 -0700158rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level)
Jason Evans20f1fc92012-10-09 14:46:22 -0700159{
160
Jason Evans8c9be3e2016-04-16 00:36:11 -0700161 return (rtree_node_init(tsdn, rtree, level,
162 &rtree->levels[level].subtree));
Jason Evans20f1fc92012-10-09 14:46:22 -0700163}
164
Jason Evans2d2b4e92016-03-28 03:06:35 -0700165rtree_elm_t *
Jason Evans8c9be3e2016-04-16 00:36:11 -0700166rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
167 unsigned level)
Jason Evans20f1fc92012-10-09 14:46:22 -0700168{
169
Jason Evans8c9be3e2016-04-16 00:36:11 -0700170 return (rtree_node_init(tsdn, rtree, level, &elm->child));
Jason Evans20f1fc92012-10-09 14:46:22 -0700171}
Jason Evanse75e9be2016-04-17 12:55:10 -0700172
173static int
174rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b,
175 void *ob)
176{
177 uintptr_t ka = (uintptr_t)oa;
178 uintptr_t kb = (uintptr_t)ob;
179
180 assert(ka != 0);
181 assert(kb != 0);
182
183 return ((ka > kb) - (ka < kb));
184}
185
186static witness_t *
187rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm)
188{
189 witness_t *witness;
190 size_t i;
191 rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
192
193 /* Iterate over entire array to detect double allocation attempts. */
194 witness = NULL;
195 for (i = 0; i < sizeof(rtree_elm_witness_tsd_t) / sizeof(witness_t);
196 i++) {
197 rtree_elm_witness_t *rew = &witnesses->witnesses[i];
198
199 assert(rew->elm != elm);
200 if (rew->elm == NULL && witness == NULL) {
201 rew->elm = elm;
202 witness = &rew->witness;
203 witness_init(witness, "rtree_elm",
204 WITNESS_RANK_RTREE_ELM, rtree_elm_witness_comp,
205 (void *)key);
206 }
207 }
208 assert(witness != NULL);
209 return (witness);
210}
211
212static witness_t *
213rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm)
214{
215 size_t i;
216 rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
217
218 for (i = 0; i < sizeof(rtree_elm_witness_tsd_t) / sizeof(witness_t);
219 i++) {
220 rtree_elm_witness_t *rew = &witnesses->witnesses[i];
221
222 if (rew->elm == elm)
223 return (&rew->witness);
224 }
225 not_reached();
226}
227
228static void
229rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, const rtree_elm_t *elm)
230{
231 size_t i;
232 rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd);
233
234 for (i = 0; i < sizeof(rtree_elm_witness_tsd_t) / sizeof(witness_t);
235 i++) {
236 rtree_elm_witness_t *rew = &witnesses->witnesses[i];
237
238 if (rew->elm == elm) {
239 rew->elm = NULL;
240 witness_init(&rew->witness, "rtree_elm",
241 WITNESS_RANK_RTREE_ELM, rtree_elm_witness_comp,
242 NULL);
243 return;
244 }
245 }
246 not_reached();
247}
248
249void
250rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree, uintptr_t key,
251 const rtree_elm_t *elm)
252{
253 witness_t *witness;
254
255 if (tsdn_null(tsdn))
256 return;
257
258 witness = rtree_elm_witness_alloc(tsdn_tsd(tsdn), key, elm);
259 witness_lock(tsdn, witness);
260}
261
262void
263rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
264 const rtree_elm_t *elm)
265{
266 witness_t *witness;
267
268 if (tsdn_null(tsdn))
269 return;
270
271 witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm);
272 witness_assert_owner(tsdn, witness);
273}
274
275void
276rtree_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
277 const rtree_elm_t *elm)
278{
279 witness_t *witness;
280
281 if (tsdn_null(tsdn))
282 return;
283
284 witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm);
285 witness_unlock(tsdn, witness);
286 rtree_elm_witness_dalloc(tsdn_tsd(tsdn), witness, elm);
287}