blob: 58f79fee8c710dfe6c2a3fca7353f97171bab50c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
Christoph Lametercde53532008-07-04 09:59:22 -07004 * Copyright (C) 2005 SGI, Christoph Lameter
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08005 * Copyright (C) 2006 Nick Piggin
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -07006 * Copyright (C) 2012 Konstantin Khlebnikov
Matthew Wilcox6b053b82016-05-20 17:02:58 -07007 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050028#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/radix-tree.h>
30#include <linux/percpu.h>
31#include <linux/slab.h>
Catalin Marinasce80b062014-06-06 14:38:18 -070032#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/notifier.h>
34#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/string.h>
36#include <linux/bitops.h>
Nick Piggin7cf9c2c2006-12-06 20:33:44 -080037#include <linux/rcupdate.h>
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020038#include <linux/preempt.h> /* in_interrupt() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40
Jeff Moyer26fb1582007-10-16 01:24:49 -070041/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 * Radix tree node cache.
43 */
Christoph Lametere18b8902006-12-06 20:33:20 -080044static struct kmem_cache *radix_tree_node_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/*
Nick Piggin55368052012-05-29 15:07:34 -070047 * The radix tree is variable-height, so an insert operation not only has
48 * to build the branch to its corresponding item, it also has to build the
49 * branch to existing items if the size has to be increased (by
50 * radix_tree_extend).
51 *
52 * The worst case is a zero height tree with just a single item at index 0,
53 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
54 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
55 * Hence:
56 */
57#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
58
59/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 * Per-cpu pool of preloaded nodes
61 */
62struct radix_tree_preload {
Matthew Wilcox2fcd9002016-05-20 17:03:04 -070063 unsigned nr;
Kirill A. Shutemov9d2a8da2015-06-25 15:02:19 -070064 /* nodes->private_data points to next preallocated node */
65 struct radix_tree_node *nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066};
Harvey Harrison8cef7d52009-01-06 14:40:50 -080067static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Nick Piggin27d20fd2010-11-11 14:05:19 -080069static inline void *ptr_to_indirect(void *ptr)
70{
71 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
72}
73
Matthew Wilcoxafe0e392016-05-20 17:02:17 -070074#define RADIX_TREE_RETRY ptr_to_indirect(NULL)
75
Matthew Wilcoxdb050f22016-05-20 17:01:57 -070076#ifdef CONFIG_RADIX_TREE_MULTIORDER
77/* Sibling slots point directly to another slot in the same node */
78static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
79{
80 void **ptr = node;
81 return (parent->slots <= ptr) &&
82 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
83}
84#else
85static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
86{
87 return false;
88}
89#endif
90
91static inline unsigned long get_slot_offset(struct radix_tree_node *parent,
92 void **slot)
93{
94 return slot - parent->slots;
95}
96
97static unsigned radix_tree_descend(struct radix_tree_node *parent,
98 struct radix_tree_node **nodep, unsigned offset)
99{
100 void **entry = rcu_dereference_raw(parent->slots[offset]);
101
102#ifdef CONFIG_RADIX_TREE_MULTIORDER
103 if (radix_tree_is_indirect_ptr(entry)) {
104 unsigned long siboff = get_slot_offset(parent, entry);
105 if (siboff < RADIX_TREE_MAP_SIZE) {
106 offset = siboff;
107 entry = rcu_dereference_raw(parent->slots[offset]);
108 }
109 }
110#endif
111
112 *nodep = (void *)entry;
113 return offset;
114}
115
Nick Piggin612d6c12006-06-23 02:03:22 -0700116static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
117{
118 return root->gfp_mask & __GFP_BITS_MASK;
119}
120
Nick Piggin643b52b2008-06-12 15:21:52 -0700121static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
122 int offset)
123{
124 __set_bit(offset, node->tags[tag]);
125}
126
127static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
128 int offset)
129{
130 __clear_bit(offset, node->tags[tag]);
131}
132
133static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
134 int offset)
135{
136 return test_bit(offset, node->tags[tag]);
137}
138
139static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
140{
141 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
142}
143
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700144static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
Nick Piggin643b52b2008-06-12 15:21:52 -0700145{
146 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
147}
148
149static inline void root_tag_clear_all(struct radix_tree_root *root)
150{
151 root->gfp_mask &= __GFP_BITS_MASK;
152}
153
154static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
155{
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700156 return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
Nick Piggin643b52b2008-06-12 15:21:52 -0700157}
158
Matthew Wilcox7b60e9a2016-05-20 17:02:23 -0700159static inline unsigned root_tags_get(struct radix_tree_root *root)
160{
161 return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT;
162}
163
Nick Piggin643b52b2008-06-12 15:21:52 -0700164/*
165 * Returns 1 if any slot in the node has this tag set.
166 * Otherwise returns 0.
167 */
168static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
169{
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700170 unsigned idx;
Nick Piggin643b52b2008-06-12 15:21:52 -0700171 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
172 if (node->tags[tag][idx])
173 return 1;
174 }
175 return 0;
176}
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700177
178/**
179 * radix_tree_find_next_bit - find the next set bit in a memory region
180 *
181 * @addr: The address to base the search on
182 * @size: The bitmap size in bits
183 * @offset: The bitnumber to start searching at
184 *
185 * Unrollable variant of find_next_bit() for constant size arrays.
186 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
187 * Returns next bit offset, or size if nothing found.
188 */
189static __always_inline unsigned long
190radix_tree_find_next_bit(const unsigned long *addr,
191 unsigned long size, unsigned long offset)
192{
193 if (!__builtin_constant_p(size))
194 return find_next_bit(addr, size, offset);
195
196 if (offset < size) {
197 unsigned long tmp;
198
199 addr += offset / BITS_PER_LONG;
200 tmp = *addr >> (offset % BITS_PER_LONG);
201 if (tmp)
202 return __ffs(tmp) + offset;
203 offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
204 while (offset < size) {
205 tmp = *++addr;
206 if (tmp)
207 return __ffs(tmp) + offset;
208 offset += BITS_PER_LONG;
209 }
210 }
211 return size;
212}
213
Ross Zwisler0796c582016-05-20 17:02:55 -0700214#ifndef __KERNEL__
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700215static void dump_node(struct radix_tree_node *node, unsigned long index)
Matthew Wilcox7cf19af2016-03-17 14:21:57 -0700216{
Ross Zwisler0796c582016-05-20 17:02:55 -0700217 unsigned long i;
Matthew Wilcox7cf19af2016-03-17 14:21:57 -0700218
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700219 pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d parent %p\n",
Matthew Wilcox0c7fa0a2016-05-20 17:03:07 -0700220 node, node->offset,
Ross Zwisler0796c582016-05-20 17:02:55 -0700221 node->tags[0][0], node->tags[1][0], node->tags[2][0],
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700222 node->shift, node->count, node->parent);
Matthew Wilcox7cf19af2016-03-17 14:21:57 -0700223
Ross Zwisler0796c582016-05-20 17:02:55 -0700224 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700225 unsigned long first = index | (i << node->shift);
226 unsigned long last = first | ((1UL << node->shift) - 1);
Ross Zwisler0796c582016-05-20 17:02:55 -0700227 void *entry = node->slots[i];
228 if (!entry)
229 continue;
230 if (is_sibling_entry(node, entry)) {
231 pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n",
232 entry, i,
233 *(void **)indirect_to_ptr(entry),
234 first, last);
235 } else if (!radix_tree_is_indirect_ptr(entry)) {
236 pr_debug("radix entry %p offset %ld indices %ld-%ld\n",
237 entry, i, first, last);
238 } else {
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700239 dump_node(indirect_to_ptr(entry), first);
Ross Zwisler0796c582016-05-20 17:02:55 -0700240 }
241 }
Matthew Wilcox7cf19af2016-03-17 14:21:57 -0700242}
243
244/* For debug */
245static void radix_tree_dump(struct radix_tree_root *root)
246{
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700247 pr_debug("radix root: %p rnode %p tags %x\n",
248 root, root->rnode,
Matthew Wilcox7cf19af2016-03-17 14:21:57 -0700249 root->gfp_mask >> __GFP_BITS_SHIFT);
250 if (!radix_tree_is_indirect_ptr(root->rnode))
251 return;
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700252 dump_node(indirect_to_ptr(root->rnode), 0);
Matthew Wilcox7cf19af2016-03-17 14:21:57 -0700253}
254#endif
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/*
257 * This assumes that the caller has performed appropriate preallocation, and
258 * that the caller has pinned this thread of control to the current CPU.
259 */
260static struct radix_tree_node *
261radix_tree_node_alloc(struct radix_tree_root *root)
262{
Nick Piggine2848a02008-02-04 22:29:10 -0800263 struct radix_tree_node *ret = NULL;
Nick Piggin612d6c12006-06-23 02:03:22 -0700264 gfp_t gfp_mask = root_gfp_mask(root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Jan Kara5e4c0d972013-09-11 14:26:05 -0700266 /*
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700267 * Preload code isn't irq safe and it doesn't make sense to use
268 * preloading during an interrupt anyway as all the allocations have
269 * to be atomic. So just do normal allocation when in interrupt.
Jan Kara5e4c0d972013-09-11 14:26:05 -0700270 */
Mel Gormand0164ad2015-11-06 16:28:21 -0800271 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 struct radix_tree_preload *rtp;
273
Nick Piggine2848a02008-02-04 22:29:10 -0800274 /*
Vladimir Davydov58e698a2016-03-17 14:18:36 -0700275 * Even if the caller has preloaded, try to allocate from the
276 * cache first for the new node to get accounted.
277 */
278 ret = kmem_cache_alloc(radix_tree_node_cachep,
279 gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
280 if (ret)
281 goto out;
282
283 /*
Nick Piggine2848a02008-02-04 22:29:10 -0800284 * Provided the caller has preloaded here, we will always
285 * succeed in getting a node here (and never reach
286 * kmem_cache_alloc)
287 */
Christoph Lameter7c8e0182014-06-04 16:07:56 -0700288 rtp = this_cpu_ptr(&radix_tree_preloads);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 if (rtp->nr) {
Kirill A. Shutemov9d2a8da2015-06-25 15:02:19 -0700290 ret = rtp->nodes;
291 rtp->nodes = ret->private_data;
292 ret->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 rtp->nr--;
294 }
Catalin Marinasce80b062014-06-06 14:38:18 -0700295 /*
296 * Update the allocation stack trace as this is more useful
297 * for debugging.
298 */
299 kmemleak_update_trace(ret);
Vladimir Davydov58e698a2016-03-17 14:18:36 -0700300 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 }
Vladimir Davydov58e698a2016-03-17 14:18:36 -0700302 ret = kmem_cache_alloc(radix_tree_node_cachep,
303 gfp_mask | __GFP_ACCOUNT);
304out:
Nick Pigginc0bc9872007-10-16 01:24:42 -0700305 BUG_ON(radix_tree_is_indirect_ptr(ret));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return ret;
307}
308
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800309static void radix_tree_node_rcu_free(struct rcu_head *head)
310{
311 struct radix_tree_node *node =
312 container_of(head, struct radix_tree_node, rcu_head);
Dave Chinnerb6dd0862010-08-23 10:33:19 +1000313 int i;
Nick Piggin643b52b2008-06-12 15:21:52 -0700314
315 /*
316 * must only free zeroed nodes into the slab. radix_tree_shrink
317 * can leave us with a non-NULL entry in the first slot, so clear
318 * that here to make sure.
319 */
Dave Chinnerb6dd0862010-08-23 10:33:19 +1000320 for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
321 tag_clear(node, i, 0);
322
Nick Piggin643b52b2008-06-12 15:21:52 -0700323 node->slots[0] = NULL;
324 node->count = 0;
325
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800326 kmem_cache_free(radix_tree_node_cachep, node);
327}
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329static inline void
330radix_tree_node_free(struct radix_tree_node *node)
331{
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800332 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
335/*
336 * Load up this CPU's radix_tree_node buffer with sufficient objects to
337 * ensure that the addition of a single element in the tree cannot fail. On
338 * success, return zero, with preemption disabled. On error, return -ENOMEM
339 * with preemption not disabled.
David Howellsb34df792009-11-19 18:11:14 +0000340 *
341 * To make use of this facility, the radix tree must be initialised without
Mel Gormand0164ad2015-11-06 16:28:21 -0800342 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 */
Jan Kara5e4c0d972013-09-11 14:26:05 -0700344static int __radix_tree_preload(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 struct radix_tree_preload *rtp;
347 struct radix_tree_node *node;
348 int ret = -ENOMEM;
349
350 preempt_disable();
Christoph Lameter7c8e0182014-06-04 16:07:56 -0700351 rtp = this_cpu_ptr(&radix_tree_preloads);
Kirill A. Shutemov9d2a8da2015-06-25 15:02:19 -0700352 while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 preempt_enable();
Christoph Lameter488514d2008-04-28 02:12:05 -0700354 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (node == NULL)
356 goto out;
357 preempt_disable();
Christoph Lameter7c8e0182014-06-04 16:07:56 -0700358 rtp = this_cpu_ptr(&radix_tree_preloads);
Kirill A. Shutemov9d2a8da2015-06-25 15:02:19 -0700359 if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
360 node->private_data = rtp->nodes;
361 rtp->nodes = node;
362 rtp->nr++;
363 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 kmem_cache_free(radix_tree_node_cachep, node);
Kirill A. Shutemov9d2a8da2015-06-25 15:02:19 -0700365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
367 ret = 0;
368out:
369 return ret;
370}
Jan Kara5e4c0d972013-09-11 14:26:05 -0700371
372/*
373 * Load up this CPU's radix_tree_node buffer with sufficient objects to
374 * ensure that the addition of a single element in the tree cannot fail. On
375 * success, return zero, with preemption disabled. On error, return -ENOMEM
376 * with preemption not disabled.
377 *
378 * To make use of this facility, the radix tree must be initialised without
Mel Gormand0164ad2015-11-06 16:28:21 -0800379 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
Jan Kara5e4c0d972013-09-11 14:26:05 -0700380 */
381int radix_tree_preload(gfp_t gfp_mask)
382{
383 /* Warn on non-sensical use... */
Mel Gormand0164ad2015-11-06 16:28:21 -0800384 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
Jan Kara5e4c0d972013-09-11 14:26:05 -0700385 return __radix_tree_preload(gfp_mask);
386}
David Chinnerd7f09232007-07-14 16:05:04 +1000387EXPORT_SYMBOL(radix_tree_preload);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Nick Piggin6e954b92006-01-08 01:01:40 -0800389/*
Jan Kara5e4c0d972013-09-11 14:26:05 -0700390 * The same as above function, except we don't guarantee preloading happens.
391 * We do it, if we decide it helps. On success, return zero with preemption
392 * disabled. On error, return -ENOMEM with preemption not disabled.
393 */
394int radix_tree_maybe_preload(gfp_t gfp_mask)
395{
Mel Gormand0164ad2015-11-06 16:28:21 -0800396 if (gfpflags_allow_blocking(gfp_mask))
Jan Kara5e4c0d972013-09-11 14:26:05 -0700397 return __radix_tree_preload(gfp_mask);
398 /* Preloading doesn't help anything with this gfp mask, skip it */
399 preempt_disable();
400 return 0;
401}
402EXPORT_SYMBOL(radix_tree_maybe_preload);
403
404/*
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700405 * The maximum index which can be stored in a radix tree
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 */
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700407static inline unsigned long shift_maxindex(unsigned int shift)
408{
409 return (RADIX_TREE_MAP_SIZE << shift) - 1;
410}
411
Matthew Wilcox1456a432016-05-20 17:02:08 -0700412static inline unsigned long node_maxindex(struct radix_tree_node *node)
413{
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700414 return shift_maxindex(node->shift);
Matthew Wilcox1456a432016-05-20 17:02:08 -0700415}
416
417static unsigned radix_tree_load_root(struct radix_tree_root *root,
418 struct radix_tree_node **nodep, unsigned long *maxindex)
419{
420 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
421
422 *nodep = node;
423
424 if (likely(radix_tree_is_indirect_ptr(node))) {
425 node = indirect_to_ptr(node);
426 *maxindex = node_maxindex(node);
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700427 return node->shift + RADIX_TREE_MAP_SHIFT;
Matthew Wilcox1456a432016-05-20 17:02:08 -0700428 }
429
430 *maxindex = 0;
431 return 0;
432}
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434/*
435 * Extend a radix tree so it can store key @index.
436 */
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700437static int radix_tree_extend(struct radix_tree_root *root,
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700438 unsigned long index, unsigned int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800440 struct radix_tree_node *slot;
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700441 unsigned int maxshift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 int tag;
443
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700444 /* Figure out what the shift should be. */
445 maxshift = shift;
446 while (index > shift_maxindex(maxshift))
447 maxshift += RADIX_TREE_MAP_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700449 slot = root->rnode;
450 if (!slot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 do {
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700454 struct radix_tree_node *node = radix_tree_node_alloc(root);
455
456 if (!node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 return -ENOMEM;
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 /* Propagate the aggregated tag info into the new root */
Jonathan Corbetdaff89f2006-03-25 03:08:05 -0800460 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
Nick Piggin612d6c12006-06-23 02:03:22 -0700461 if (root_tag_get(root, tag))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 tag_set(node, tag, 0);
463 }
464
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700465 BUG_ON(shift > BITS_PER_LONG);
466 node->shift = shift;
Matthew Wilcox0c7fa0a2016-05-20 17:03:07 -0700467 node->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 node->count = 1;
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800469 node->parent = NULL;
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700470 if (radix_tree_is_indirect_ptr(slot)) {
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800471 slot = indirect_to_ptr(slot);
472 slot->parent = node;
Matthew Wilcox339e6352016-03-17 14:21:48 -0700473 slot = ptr_to_indirect(slot);
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800474 }
475 node->slots[0] = slot;
Nick Piggin27d20fd2010-11-11 14:05:19 -0800476 node = ptr_to_indirect(node);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800477 rcu_assign_pointer(root->rnode, node);
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700478 shift += RADIX_TREE_MAP_SHIFT;
479 slot = node;
480 } while (shift <= maxshift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481out:
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700482 return maxshift + RADIX_TREE_MAP_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485/**
Johannes Weiner139e5612014-04-03 14:47:54 -0700486 * __radix_tree_create - create a slot in a radix tree
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 * @root: radix tree root
488 * @index: index key
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700489 * @order: index occupies 2^order aligned slots
Johannes Weiner139e5612014-04-03 14:47:54 -0700490 * @nodep: returns node
491 * @slotp: returns slot
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 *
Johannes Weiner139e5612014-04-03 14:47:54 -0700493 * Create, if necessary, and return the node and slot for an item
494 * at position @index in the radix tree @root.
495 *
496 * Until there is more than one item in the tree, no nodes are
497 * allocated and @root->rnode is used as a direct slot instead of
498 * pointing to a node, in which case *@nodep will be NULL.
499 *
500 * Returns -ENOMEM, or 0 for success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 */
Johannes Weiner139e5612014-04-03 14:47:54 -0700502int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700503 unsigned order, struct radix_tree_node **nodep,
504 void ***slotp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
Christoph Lameter201b6262005-09-06 15:16:46 -0700506 struct radix_tree_node *node = NULL, *slot;
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700507 unsigned long maxindex;
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700508 unsigned int shift, offset;
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700509 unsigned long max = index | ((1UL << order) - 1);
510
511 shift = radix_tree_load_root(root, &slot, &maxindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 /* Make sure the tree is high enough. */
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700514 if (max > maxindex) {
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700515 int error = radix_tree_extend(root, max, shift);
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700516 if (error < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return error;
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700518 shift = error;
519 slot = root->rnode;
Matthew Wilcoxd0891262016-05-20 17:03:19 -0700520 if (order == shift)
Matthew Wilcox49ea6eb2016-05-20 17:02:11 -0700521 shift += RADIX_TREE_MAP_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 offset = 0; /* uninitialised var warning */
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700525 while (shift > order) {
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700526 shift -= RADIX_TREE_MAP_SHIFT;
Christoph Lameter201b6262005-09-06 15:16:46 -0700527 if (slot == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 /* Have to add a child node. */
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700529 slot = radix_tree_node_alloc(root);
530 if (!slot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 return -ENOMEM;
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -0700532 slot->shift = shift;
Matthew Wilcox0c7fa0a2016-05-20 17:03:07 -0700533 slot->offset = offset;
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800534 slot->parent = node;
Christoph Lameter201b6262005-09-06 15:16:46 -0700535 if (node) {
Matthew Wilcox339e6352016-03-17 14:21:48 -0700536 rcu_assign_pointer(node->slots[offset],
537 ptr_to_indirect(slot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 node->count++;
Christoph Lameter201b6262005-09-06 15:16:46 -0700539 } else
Matthew Wilcox339e6352016-03-17 14:21:48 -0700540 rcu_assign_pointer(root->rnode,
541 ptr_to_indirect(slot));
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700542 } else if (!radix_tree_is_indirect_ptr(slot))
543 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 /* Go a level down */
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700546 node = indirect_to_ptr(slot);
Matthew Wilcox8a14f4d2016-05-20 17:02:44 -0700547 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
548 offset = radix_tree_descend(node, &slot, offset);
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700549 }
550
Matthew Wilcox57578c22016-05-20 17:01:54 -0700551#ifdef CONFIG_RADIX_TREE_MULTIORDER
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700552 /* Insert pointers to the canonical entry */
Matthew Wilcox3b8c00f2016-05-20 17:01:59 -0700553 if (order > shift) {
554 int i, n = 1 << (order - shift);
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700555 offset = offset & ~(n - 1);
556 slot = ptr_to_indirect(&node->slots[offset]);
557 for (i = 0; i < n; i++) {
558 if (node->slots[offset + i])
559 return -EEXIST;
560 }
561
562 for (i = 1; i < n; i++) {
563 rcu_assign_pointer(node->slots[offset + i], slot);
564 node->count++;
565 }
Nick Piggin612d6c12006-06-23 02:03:22 -0700566 }
Matthew Wilcox57578c22016-05-20 17:01:54 -0700567#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Johannes Weiner139e5612014-04-03 14:47:54 -0700569 if (nodep)
570 *nodep = node;
571 if (slotp)
572 *slotp = node ? node->slots + offset : (void **)&root->rnode;
573 return 0;
574}
575
576/**
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700577 * __radix_tree_insert - insert into a radix tree
Johannes Weiner139e5612014-04-03 14:47:54 -0700578 * @root: radix tree root
579 * @index: index key
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700580 * @order: key covers the 2^order indices around index
Johannes Weiner139e5612014-04-03 14:47:54 -0700581 * @item: item to insert
582 *
583 * Insert an item into the radix tree at position @index.
584 */
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700585int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
586 unsigned order, void *item)
Johannes Weiner139e5612014-04-03 14:47:54 -0700587{
588 struct radix_tree_node *node;
589 void **slot;
590 int error;
591
592 BUG_ON(radix_tree_is_indirect_ptr(item));
593
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700594 error = __radix_tree_create(root, index, order, &node, &slot);
Johannes Weiner139e5612014-04-03 14:47:54 -0700595 if (error)
596 return error;
597 if (*slot != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 return -EEXIST;
Johannes Weiner139e5612014-04-03 14:47:54 -0700599 rcu_assign_pointer(*slot, item);
Christoph Lameter201b6262005-09-06 15:16:46 -0700600
Nick Piggin612d6c12006-06-23 02:03:22 -0700601 if (node) {
Matthew Wilcox7b60e9a2016-05-20 17:02:23 -0700602 unsigned offset = get_slot_offset(node, slot);
Nick Piggin612d6c12006-06-23 02:03:22 -0700603 node->count++;
Matthew Wilcox7b60e9a2016-05-20 17:02:23 -0700604 BUG_ON(tag_get(node, 0, offset));
605 BUG_ON(tag_get(node, 1, offset));
606 BUG_ON(tag_get(node, 2, offset));
Nick Piggin612d6c12006-06-23 02:03:22 -0700607 } else {
Matthew Wilcox7b60e9a2016-05-20 17:02:23 -0700608 BUG_ON(root_tags_get(root));
Nick Piggin612d6c12006-06-23 02:03:22 -0700609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return 0;
612}
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700613EXPORT_SYMBOL(__radix_tree_insert);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Johannes Weiner139e5612014-04-03 14:47:54 -0700615/**
616 * __radix_tree_lookup - lookup an item in a radix tree
617 * @root: radix tree root
618 * @index: index key
619 * @nodep: returns node
620 * @slotp: returns slot
621 *
622 * Lookup and return the item at position @index in the radix
623 * tree @root.
624 *
625 * Until there is more than one item in the tree, no nodes are
626 * allocated and @root->rnode is used as a direct slot instead of
627 * pointing to a node, in which case *@nodep will be NULL.
Hans Reisera4331362005-11-07 00:59:29 -0800628 */
Johannes Weiner139e5612014-04-03 14:47:54 -0700629void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
630 struct radix_tree_node **nodep, void ***slotp)
Hans Reisera4331362005-11-07 00:59:29 -0800631{
Johannes Weiner139e5612014-04-03 14:47:54 -0700632 struct radix_tree_node *node, *parent;
Matthew Wilcox85829952016-05-20 17:02:20 -0700633 unsigned long maxindex;
634 unsigned int shift;
Johannes Weiner139e5612014-04-03 14:47:54 -0700635 void **slot;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800636
Matthew Wilcox85829952016-05-20 17:02:20 -0700637 restart:
638 parent = NULL;
639 slot = (void **)&root->rnode;
640 shift = radix_tree_load_root(root, &node, &maxindex);
641 if (index > maxindex)
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800642 return NULL;
643
Matthew Wilcox85829952016-05-20 17:02:20 -0700644 while (radix_tree_is_indirect_ptr(node)) {
645 unsigned offset;
Johannes Weiner139e5612014-04-03 14:47:54 -0700646
Matthew Wilcox85829952016-05-20 17:02:20 -0700647 if (node == RADIX_TREE_RETRY)
648 goto restart;
649 parent = indirect_to_ptr(node);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800650 shift -= RADIX_TREE_MAP_SHIFT;
Matthew Wilcox85829952016-05-20 17:02:20 -0700651 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
652 offset = radix_tree_descend(parent, &node, offset);
653 slot = parent->slots + offset;
654 }
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800655
Johannes Weiner139e5612014-04-03 14:47:54 -0700656 if (nodep)
657 *nodep = parent;
658 if (slotp)
659 *slotp = slot;
660 return node;
Huang Shijieb72b71c2009-06-16 15:33:42 -0700661}
662
663/**
664 * radix_tree_lookup_slot - lookup a slot in a radix tree
665 * @root: radix tree root
666 * @index: index key
667 *
668 * Returns: the slot corresponding to the position @index in the
669 * radix tree @root. This is useful for update-if-exists operations.
670 *
671 * This function can be called under rcu_read_lock iff the slot is not
672 * modified by radix_tree_replace_slot, otherwise it must be called
673 * exclusive from other writers. Any dereference of the slot must be done
674 * using radix_tree_deref_slot.
675 */
676void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
677{
Johannes Weiner139e5612014-04-03 14:47:54 -0700678 void **slot;
679
680 if (!__radix_tree_lookup(root, index, NULL, &slot))
681 return NULL;
682 return slot;
Hans Reisera4331362005-11-07 00:59:29 -0800683}
684EXPORT_SYMBOL(radix_tree_lookup_slot);
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686/**
687 * radix_tree_lookup - perform lookup operation on a radix tree
688 * @root: radix tree root
689 * @index: index key
690 *
691 * Lookup the item at the position @index in the radix tree @root.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800692 *
693 * This function can be called under rcu_read_lock, however the caller
694 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
695 * them safely). No RCU barriers are required to access or modify the
696 * returned item, however.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 */
698void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
699{
Johannes Weiner139e5612014-04-03 14:47:54 -0700700 return __radix_tree_lookup(root, index, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702EXPORT_SYMBOL(radix_tree_lookup);
703
704/**
705 * radix_tree_tag_set - set a tag on a radix tree node
706 * @root: radix tree root
707 * @index: index key
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700708 * @tag: tag index
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 *
Jonathan Corbetdaff89f2006-03-25 03:08:05 -0800710 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
711 * corresponding to @index in the radix tree. From
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 * the root all the way down to the leaf node.
713 *
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700714 * Returns the address of the tagged item. Setting a tag on a not-present
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 * item is a bug.
716 */
717void *radix_tree_tag_set(struct radix_tree_root *root,
Jonathan Corbetdaff89f2006-03-25 03:08:05 -0800718 unsigned long index, unsigned int tag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
Ross Zwislerfb969902016-05-20 17:02:32 -0700720 struct radix_tree_node *node, *parent;
721 unsigned long maxindex;
722 unsigned int shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Ross Zwislerfb969902016-05-20 17:02:32 -0700724 shift = radix_tree_load_root(root, &node, &maxindex);
725 BUG_ON(index > maxindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Ross Zwislerfb969902016-05-20 17:02:32 -0700727 while (radix_tree_is_indirect_ptr(node)) {
728 unsigned offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 shift -= RADIX_TREE_MAP_SHIFT;
Ross Zwislerfb969902016-05-20 17:02:32 -0700731 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
732
733 parent = indirect_to_ptr(node);
734 offset = radix_tree_descend(parent, &node, offset);
735 BUG_ON(!node);
736
737 if (!tag_get(parent, tag, offset))
738 tag_set(parent, tag, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 }
740
Nick Piggin612d6c12006-06-23 02:03:22 -0700741 /* set the root's tag bit */
Ross Zwislerfb969902016-05-20 17:02:32 -0700742 if (!root_tag_get(root, tag))
Nick Piggin612d6c12006-06-23 02:03:22 -0700743 root_tag_set(root, tag);
744
Ross Zwislerfb969902016-05-20 17:02:32 -0700745 return node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747EXPORT_SYMBOL(radix_tree_tag_set);
748
749/**
750 * radix_tree_tag_clear - clear a tag on a radix tree node
751 * @root: radix tree root
752 * @index: index key
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700753 * @tag: tag index
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 *
Jonathan Corbetdaff89f2006-03-25 03:08:05 -0800755 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700756 * corresponding to @index in the radix tree. If this causes
757 * the leaf node to have no tags set then clear the tag in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 * next-to-leaf node, etc.
759 *
760 * Returns the address of the tagged item on success, else NULL. ie:
761 * has the same return value and semantics as radix_tree_lookup().
762 */
763void *radix_tree_tag_clear(struct radix_tree_root *root,
Jonathan Corbetdaff89f2006-03-25 03:08:05 -0800764 unsigned long index, unsigned int tag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
Ross Zwisler00f47b52016-05-20 17:02:35 -0700766 struct radix_tree_node *node, *parent;
767 unsigned long maxindex;
768 unsigned int shift;
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800769 int uninitialized_var(offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Ross Zwisler00f47b52016-05-20 17:02:35 -0700771 shift = radix_tree_load_root(root, &node, &maxindex);
772 if (index > maxindex)
773 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Ross Zwisler00f47b52016-05-20 17:02:35 -0700775 parent = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Ross Zwisler00f47b52016-05-20 17:02:35 -0700777 while (radix_tree_is_indirect_ptr(node)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 shift -= RADIX_TREE_MAP_SHIFT;
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800779 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
Ross Zwisler00f47b52016-05-20 17:02:35 -0700780
781 parent = indirect_to_ptr(node);
782 offset = radix_tree_descend(parent, &node, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
784
Ross Zwisler00f47b52016-05-20 17:02:35 -0700785 if (node == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 goto out;
787
Ross Zwisler00f47b52016-05-20 17:02:35 -0700788 index >>= shift;
789
790 while (parent) {
791 if (!tag_get(parent, tag, offset))
Nick Piggind5274262006-01-08 01:01:41 -0800792 goto out;
Ross Zwisler00f47b52016-05-20 17:02:35 -0700793 tag_clear(parent, tag, offset);
794 if (any_tag_set(parent, tag))
Nick Piggin6e954b92006-01-08 01:01:40 -0800795 goto out;
Hugh Dickinse2bdb932012-01-12 17:20:41 -0800796
797 index >>= RADIX_TREE_MAP_SHIFT;
798 offset = index & RADIX_TREE_MAP_MASK;
Ross Zwisler00f47b52016-05-20 17:02:35 -0700799 parent = parent->parent;
Nick Piggin612d6c12006-06-23 02:03:22 -0700800 }
801
802 /* clear the root's tag bit */
803 if (root_tag_get(root, tag))
804 root_tag_clear(root, tag);
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806out:
Ross Zwisler00f47b52016-05-20 17:02:35 -0700807 return node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
809EXPORT_SYMBOL(radix_tree_tag_clear);
810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811/**
Marcelo Tosatti32605a12005-09-06 15:16:48 -0700812 * radix_tree_tag_get - get a tag on a radix tree node
813 * @root: radix tree root
814 * @index: index key
Matthew Wilcox2fcd9002016-05-20 17:03:04 -0700815 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 *
Marcelo Tosatti32605a12005-09-06 15:16:48 -0700817 * Return values:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 *
Nick Piggin612d6c12006-06-23 02:03:22 -0700819 * 0: tag not present or not set
820 * 1: tag set
David Howellsce826532010-04-06 22:36:20 +0100821 *
822 * Note that the return value of this function may not be relied on, even if
823 * the RCU lock is held, unless tag modification and node deletion are excluded
824 * from concurrency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 */
826int radix_tree_tag_get(struct radix_tree_root *root,
Jonathan Corbetdaff89f2006-03-25 03:08:05 -0800827 unsigned long index, unsigned int tag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
Ross Zwisler4589ba62016-05-20 17:02:38 -0700829 struct radix_tree_node *node, *parent;
830 unsigned long maxindex;
831 unsigned int shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Nick Piggin612d6c12006-06-23 02:03:22 -0700833 if (!root_tag_get(root, tag))
834 return 0;
835
Ross Zwisler4589ba62016-05-20 17:02:38 -0700836 shift = radix_tree_load_root(root, &node, &maxindex);
837 if (index > maxindex)
838 return 0;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800839 if (node == NULL)
840 return 0;
841
Ross Zwisler4589ba62016-05-20 17:02:38 -0700842 while (radix_tree_is_indirect_ptr(node)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 int offset;
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 shift -= RADIX_TREE_MAP_SHIFT;
Ross Zwisler4589ba62016-05-20 17:02:38 -0700846 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
847
848 parent = indirect_to_ptr(node);
849 offset = radix_tree_descend(parent, &node, offset);
850
851 if (!node)
852 return 0;
853 if (!tag_get(parent, tag, offset))
854 return 0;
855 if (node == RADIX_TREE_RETRY)
856 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 }
Ross Zwisler4589ba62016-05-20 17:02:38 -0700858
859 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860}
861EXPORT_SYMBOL(radix_tree_tag_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Ross Zwisler21ef5332016-05-20 17:02:26 -0700863static inline void __set_iter_shift(struct radix_tree_iter *iter,
864 unsigned int shift)
865{
866#ifdef CONFIG_RADIX_TREE_MULTIORDER
867 iter->shift = shift;
868#endif
869}
870
Fengguang Wu6df8ba42007-10-16 01:24:33 -0700871/**
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700872 * radix_tree_next_chunk - find next chunk of slots for iteration
873 *
874 * @root: radix tree root
875 * @iter: iterator state
876 * @flags: RADIX_TREE_ITER_* flags and tag index
877 * Returns: pointer to chunk first slot, or NULL if iteration is over
878 */
879void **radix_tree_next_chunk(struct radix_tree_root *root,
880 struct radix_tree_iter *iter, unsigned flags)
881{
882 unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
883 struct radix_tree_node *rnode, *node;
Ross Zwisler21ef5332016-05-20 17:02:26 -0700884 unsigned long index, offset, maxindex;
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700885
886 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
887 return NULL;
888
889 /*
890 * Catch next_index overflow after ~0UL. iter->index never overflows
891 * during iterating; it can be zero only at the beginning.
892 * And we cannot overflow iter->next_index in a single step,
893 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
Konstantin Khlebnikovfffaee32012-06-05 21:36:33 +0400894 *
895 * This condition also used by radix_tree_next_slot() to stop
896 * contiguous iterating, and forbid swithing to the next chunk.
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700897 */
898 index = iter->next_index;
899 if (!index && iter->index)
900 return NULL;
901
Ross Zwisler21ef5332016-05-20 17:02:26 -0700902 restart:
903 shift = radix_tree_load_root(root, &rnode, &maxindex);
904 if (index > maxindex)
905 return NULL;
906
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700907 if (radix_tree_is_indirect_ptr(rnode)) {
908 rnode = indirect_to_ptr(rnode);
Ross Zwisler21ef5332016-05-20 17:02:26 -0700909 } else if (rnode) {
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700910 /* Single-slot tree */
Ross Zwisler21ef5332016-05-20 17:02:26 -0700911 iter->index = index;
912 iter->next_index = maxindex + 1;
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700913 iter->tags = 1;
Ross Zwisler21ef5332016-05-20 17:02:26 -0700914 __set_iter_shift(iter, shift);
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700915 return (void **)&root->rnode;
916 } else
917 return NULL;
918
Ross Zwisler21ef5332016-05-20 17:02:26 -0700919 shift -= RADIX_TREE_MAP_SHIFT;
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700920 offset = index >> shift;
921
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700922 node = rnode;
923 while (1) {
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700924 struct radix_tree_node *slot;
Ross Zwisler21ef5332016-05-20 17:02:26 -0700925 unsigned new_off = radix_tree_descend(node, &slot, offset);
926
927 if (new_off < offset) {
928 offset = new_off;
929 index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1);
930 index |= offset << shift;
931 }
932
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700933 if ((flags & RADIX_TREE_ITER_TAGGED) ?
Ross Zwisler21ef5332016-05-20 17:02:26 -0700934 !tag_get(node, tag, offset) : !slot) {
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700935 /* Hole detected */
936 if (flags & RADIX_TREE_ITER_CONTIG)
937 return NULL;
938
939 if (flags & RADIX_TREE_ITER_TAGGED)
940 offset = radix_tree_find_next_bit(
941 node->tags[tag],
942 RADIX_TREE_MAP_SIZE,
943 offset + 1);
944 else
945 while (++offset < RADIX_TREE_MAP_SIZE) {
Ross Zwisler21ef5332016-05-20 17:02:26 -0700946 void *slot = node->slots[offset];
947 if (is_sibling_entry(node, slot))
948 continue;
949 if (slot)
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700950 break;
951 }
952 index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1);
953 index += offset << shift;
954 /* Overflow after ~0UL */
955 if (!index)
956 return NULL;
957 if (offset == RADIX_TREE_MAP_SIZE)
958 goto restart;
Ross Zwisler21ef5332016-05-20 17:02:26 -0700959 slot = rcu_dereference_raw(node->slots[offset]);
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700960 }
961
Ross Zwisler21ef5332016-05-20 17:02:26 -0700962 if ((slot == NULL) || (slot == RADIX_TREE_RETRY))
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700963 goto restart;
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700964 if (!radix_tree_is_indirect_ptr(slot))
965 break;
Ross Zwisler21ef5332016-05-20 17:02:26 -0700966
Matthew Wilcoxe6145232016-03-17 14:21:54 -0700967 node = indirect_to_ptr(slot);
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700968 shift -= RADIX_TREE_MAP_SHIFT;
969 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
970 }
971
972 /* Update the iterator state */
Ross Zwisler21ef5332016-05-20 17:02:26 -0700973 iter->index = index & ~((1 << shift) - 1);
974 iter->next_index = (index | ((RADIX_TREE_MAP_SIZE << shift) - 1)) + 1;
975 __set_iter_shift(iter, shift);
Konstantin Khlebnikov78c1d782012-03-28 14:42:53 -0700976
977 /* Construct iter->tags bit-mask from node->tags[tag] array */
978 if (flags & RADIX_TREE_ITER_TAGGED) {
979 unsigned tag_long, tag_bit;
980
981 tag_long = offset / BITS_PER_LONG;
982 tag_bit = offset % BITS_PER_LONG;
983 iter->tags = node->tags[tag][tag_long] >> tag_bit;
984 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
985 if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
986 /* Pick tags from next element */
987 if (tag_bit)
988 iter->tags |= node->tags[tag][tag_long + 1] <<
989 (BITS_PER_LONG - tag_bit);
990 /* Clip chunk size, here only BITS_PER_LONG tags */
991 iter->next_index = index + BITS_PER_LONG;
992 }
993 }
994
995 return node->slots + offset;
996}
997EXPORT_SYMBOL(radix_tree_next_chunk);
998
999/**
Jan Karaebf8aa42010-08-09 17:19:11 -07001000 * radix_tree_range_tag_if_tagged - for each item in given range set given
1001 * tag if item has another tag set
1002 * @root: radix tree root
1003 * @first_indexp: pointer to a starting index of a range to scan
1004 * @last_index: last index of a range to scan
1005 * @nr_to_tag: maximum number items to tag
1006 * @iftag: tag index to test
1007 * @settag: tag index to set if tested tag is set
1008 *
1009 * This function scans range of radix tree from first_index to last_index
1010 * (inclusive). For each item in the range if iftag is set, the function sets
1011 * also settag. The function stops either after tagging nr_to_tag items or
1012 * after reaching last_index.
1013 *
Dave Chinner144dcfc2010-08-23 10:33:53 +10001014 * The tags must be set from the leaf level only and propagated back up the
1015 * path to the root. We must do this so that we resolve the full path before
1016 * setting any tags on intermediate nodes. If we set tags as we descend, then
1017 * we can get to the leaf node and find that the index that has the iftag
1018 * set is outside the range we are scanning. This reults in dangling tags and
1019 * can lead to problems with later tag operations (e.g. livelocks on lookups).
1020 *
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001021 * The function returns the number of leaves where the tag was set and sets
Jan Karaebf8aa42010-08-09 17:19:11 -07001022 * *first_indexp to the first unscanned index.
Jan Karad5ed3a42010-08-19 14:13:33 -07001023 * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
1024 * be prepared to handle that.
Jan Karaebf8aa42010-08-09 17:19:11 -07001025 */
1026unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
1027 unsigned long *first_indexp, unsigned long last_index,
1028 unsigned long nr_to_tag,
1029 unsigned int iftag, unsigned int settag)
1030{
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001031 struct radix_tree_node *slot, *node = NULL;
1032 unsigned long maxindex;
1033 unsigned int shift = radix_tree_load_root(root, &slot, &maxindex);
Dave Chinner144dcfc2010-08-23 10:33:53 +10001034 unsigned long tagged = 0;
1035 unsigned long index = *first_indexp;
Jan Karaebf8aa42010-08-09 17:19:11 -07001036
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001037 last_index = min(last_index, maxindex);
Jan Karaebf8aa42010-08-09 17:19:11 -07001038 if (index > last_index)
1039 return 0;
1040 if (!nr_to_tag)
1041 return 0;
1042 if (!root_tag_get(root, iftag)) {
1043 *first_indexp = last_index + 1;
1044 return 0;
1045 }
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001046 if (!radix_tree_is_indirect_ptr(slot)) {
Jan Karaebf8aa42010-08-09 17:19:11 -07001047 *first_indexp = last_index + 1;
1048 root_tag_set(root, settag);
1049 return 1;
1050 }
1051
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001052 node = indirect_to_ptr(slot);
1053 shift -= RADIX_TREE_MAP_SHIFT;
Jan Karaebf8aa42010-08-09 17:19:11 -07001054
1055 for (;;) {
Hugh Dickinse2bdb932012-01-12 17:20:41 -08001056 unsigned long upindex;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001057 unsigned offset;
Jan Karaebf8aa42010-08-09 17:19:11 -07001058
1059 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001060 offset = radix_tree_descend(node, &slot, offset);
1061 if (!slot)
Jan Karaebf8aa42010-08-09 17:19:11 -07001062 goto next;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001063 if (!tag_get(node, iftag, offset))
Jan Karaebf8aa42010-08-09 17:19:11 -07001064 goto next;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001065 /* Sibling slots never have tags set on them */
1066 if (radix_tree_is_indirect_ptr(slot)) {
1067 node = indirect_to_ptr(slot);
1068 shift -= RADIX_TREE_MAP_SHIFT;
1069 continue;
Jan Karaebf8aa42010-08-09 17:19:11 -07001070 }
Dave Chinner144dcfc2010-08-23 10:33:53 +10001071
1072 /* tag the leaf */
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001073 tagged++;
1074 tag_set(node, settag, offset);
Dave Chinner144dcfc2010-08-23 10:33:53 +10001075
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001076 slot = node->parent;
Dave Chinner144dcfc2010-08-23 10:33:53 +10001077 /* walk back up the path tagging interior nodes */
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001078 upindex = index >> shift;
1079 while (slot) {
Hugh Dickinse2bdb932012-01-12 17:20:41 -08001080 upindex >>= RADIX_TREE_MAP_SHIFT;
1081 offset = upindex & RADIX_TREE_MAP_MASK;
1082
Dave Chinner144dcfc2010-08-23 10:33:53 +10001083 /* stop if we find a node with the tag already set */
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001084 if (tag_get(slot, settag, offset))
Dave Chinner144dcfc2010-08-23 10:33:53 +10001085 break;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001086 tag_set(slot, settag, offset);
1087 slot = slot->parent;
Dave Chinner144dcfc2010-08-23 10:33:53 +10001088 }
1089
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001090 next:
Jan Karaebf8aa42010-08-09 17:19:11 -07001091 /* Go to next item at level determined by 'shift' */
1092 index = ((index >> shift) + 1) << shift;
Jan Karad5ed3a42010-08-19 14:13:33 -07001093 /* Overflow can happen when last_index is ~0UL... */
1094 if (index > last_index || !index)
Jan Karaebf8aa42010-08-09 17:19:11 -07001095 break;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001096 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
1097 while (offset == 0) {
Jan Karaebf8aa42010-08-09 17:19:11 -07001098 /*
1099 * We've fully scanned this node. Go up. Because
1100 * last_index is guaranteed to be in the tree, what
1101 * we do below cannot wander astray.
1102 */
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001103 node = node->parent;
Jan Karaebf8aa42010-08-09 17:19:11 -07001104 shift += RADIX_TREE_MAP_SHIFT;
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001105 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
Jan Karaebf8aa42010-08-09 17:19:11 -07001106 }
Matthew Wilcox070c5ac2016-05-20 17:02:52 -07001107 if (is_sibling_entry(node, node->slots[offset]))
1108 goto next;
1109 if (tagged >= nr_to_tag)
1110 break;
Jan Karaebf8aa42010-08-09 17:19:11 -07001111 }
1112 /*
Toshiyuki Okajimaac15ee62011-01-25 15:07:32 -08001113 * We need not to tag the root tag if there is no tag which is set with
1114 * settag within the range from *first_indexp to last_index.
Jan Karaebf8aa42010-08-09 17:19:11 -07001115 */
Toshiyuki Okajimaac15ee62011-01-25 15:07:32 -08001116 if (tagged > 0)
1117 root_tag_set(root, settag);
Jan Karaebf8aa42010-08-09 17:19:11 -07001118 *first_indexp = index;
1119
1120 return tagged;
1121}
1122EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124/**
1125 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1126 * @root: radix tree root
1127 * @results: where the results of the lookup are placed
1128 * @first_index: start the lookup from this key
1129 * @max_items: place up to this many items at *results
1130 *
1131 * Performs an index-ascending scan of the tree for present items. Places
1132 * them at *@results and returns the number of items which were placed at
1133 * *@results.
1134 *
1135 * The implementation is naive.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001136 *
1137 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1138 * rcu_read_lock. In this case, rather than the returned results being
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001139 * an atomic snapshot of the tree at a single point in time, the
1140 * semantics of an RCU protected gang lookup are as though multiple
1141 * radix_tree_lookups have been issued in individual locks, and results
1142 * stored in 'results'.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 */
1144unsigned int
1145radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
1146 unsigned long first_index, unsigned int max_items)
1147{
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001148 struct radix_tree_iter iter;
1149 void **slot;
1150 unsigned int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001152 if (unlikely(!max_items))
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001153 return 0;
1154
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001155 radix_tree_for_each_slot(slot, root, &iter, first_index) {
Matthew Wilcox46437f92016-02-02 16:57:52 -08001156 results[ret] = rcu_dereference_raw(*slot);
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001157 if (!results[ret])
1158 continue;
Matthew Wilcox46437f92016-02-02 16:57:52 -08001159 if (radix_tree_is_indirect_ptr(results[ret])) {
1160 slot = radix_tree_iter_retry(&iter);
1161 continue;
1162 }
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001163 if (++ret == max_items)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 }
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 return ret;
1168}
1169EXPORT_SYMBOL(radix_tree_gang_lookup);
1170
Nick Piggin47feff22008-07-25 19:45:29 -07001171/**
1172 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1173 * @root: radix tree root
1174 * @results: where the results of the lookup are placed
Hugh Dickins63286502011-08-03 16:21:18 -07001175 * @indices: where their indices should be placed (but usually NULL)
Nick Piggin47feff22008-07-25 19:45:29 -07001176 * @first_index: start the lookup from this key
1177 * @max_items: place up to this many items at *results
1178 *
1179 * Performs an index-ascending scan of the tree for present items. Places
1180 * their slots at *@results and returns the number of items which were
1181 * placed at *@results.
1182 *
1183 * The implementation is naive.
1184 *
1185 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1186 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1187 * protection, radix_tree_deref_slot may fail requiring a retry.
1188 */
1189unsigned int
Hugh Dickins63286502011-08-03 16:21:18 -07001190radix_tree_gang_lookup_slot(struct radix_tree_root *root,
1191 void ***results, unsigned long *indices,
Nick Piggin47feff22008-07-25 19:45:29 -07001192 unsigned long first_index, unsigned int max_items)
1193{
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001194 struct radix_tree_iter iter;
1195 void **slot;
1196 unsigned int ret = 0;
Nick Piggin47feff22008-07-25 19:45:29 -07001197
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001198 if (unlikely(!max_items))
Nick Piggin47feff22008-07-25 19:45:29 -07001199 return 0;
1200
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001201 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1202 results[ret] = slot;
Hugh Dickins63286502011-08-03 16:21:18 -07001203 if (indices)
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001204 indices[ret] = iter.index;
1205 if (++ret == max_items)
Nick Piggin47feff22008-07-25 19:45:29 -07001206 break;
Nick Piggin47feff22008-07-25 19:45:29 -07001207 }
1208
1209 return ret;
1210}
1211EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213/**
1214 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1215 * based on a tag
1216 * @root: radix tree root
1217 * @results: where the results of the lookup are placed
1218 * @first_index: start the lookup from this key
1219 * @max_items: place up to this many items at *results
Jonathan Corbetdaff89f2006-03-25 03:08:05 -08001220 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 *
1222 * Performs an index-ascending scan of the tree for present items which
1223 * have the tag indexed by @tag set. Places the items at *@results and
1224 * returns the number of items which were placed at *@results.
1225 */
1226unsigned int
1227radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
Jonathan Corbetdaff89f2006-03-25 03:08:05 -08001228 unsigned long first_index, unsigned int max_items,
1229 unsigned int tag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001231 struct radix_tree_iter iter;
1232 void **slot;
1233 unsigned int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001235 if (unlikely(!max_items))
Nick Piggin612d6c12006-06-23 02:03:22 -07001236 return 0;
1237
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001238 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
Matthew Wilcox46437f92016-02-02 16:57:52 -08001239 results[ret] = rcu_dereference_raw(*slot);
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001240 if (!results[ret])
1241 continue;
Matthew Wilcox46437f92016-02-02 16:57:52 -08001242 if (radix_tree_is_indirect_ptr(results[ret])) {
1243 slot = radix_tree_iter_retry(&iter);
1244 continue;
1245 }
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001246 if (++ret == max_items)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 }
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 return ret;
1251}
1252EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1253
1254/**
Nick Piggin47feff22008-07-25 19:45:29 -07001255 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1256 * radix tree based on a tag
1257 * @root: radix tree root
1258 * @results: where the results of the lookup are placed
1259 * @first_index: start the lookup from this key
1260 * @max_items: place up to this many items at *results
1261 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1262 *
1263 * Performs an index-ascending scan of the tree for present items which
1264 * have the tag indexed by @tag set. Places the slots at *@results and
1265 * returns the number of slots which were placed at *@results.
1266 */
1267unsigned int
1268radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1269 unsigned long first_index, unsigned int max_items,
1270 unsigned int tag)
1271{
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001272 struct radix_tree_iter iter;
1273 void **slot;
1274 unsigned int ret = 0;
Nick Piggin47feff22008-07-25 19:45:29 -07001275
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001276 if (unlikely(!max_items))
Nick Piggin47feff22008-07-25 19:45:29 -07001277 return 0;
1278
Konstantin Khlebnikovcebbd292012-03-28 14:42:53 -07001279 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1280 results[ret] = slot;
1281 if (++ret == max_items)
Nick Piggin47feff22008-07-25 19:45:29 -07001282 break;
Nick Piggin47feff22008-07-25 19:45:29 -07001283 }
1284
1285 return ret;
1286}
1287EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1288
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001289#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
1290#include <linux/sched.h> /* for cond_resched() */
1291
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001292struct locate_info {
1293 unsigned long found_index;
1294 bool stop;
1295};
1296
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001297/*
1298 * This linear search is at present only useful to shmem_unuse_inode().
1299 */
1300static unsigned long __locate(struct radix_tree_node *slot, void *item,
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001301 unsigned long index, struct locate_info *info)
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001302{
Matthew Wilcox0c7fa0a2016-05-20 17:03:07 -07001303 unsigned int shift;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001304 unsigned long i;
1305
Matthew Wilcoxc12e51b2016-05-20 17:03:10 -07001306 shift = slot->shift + RADIX_TREE_MAP_SHIFT;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001307
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001308 do {
Matthew Wilcoxe6145232016-03-17 14:21:54 -07001309 shift -= RADIX_TREE_MAP_SHIFT;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001310
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001311 for (i = (index >> shift) & RADIX_TREE_MAP_MASK;
1312 i < RADIX_TREE_MAP_SIZE;
1313 i++, index += (1UL << shift)) {
1314 struct radix_tree_node *node =
1315 rcu_dereference_raw(slot->slots[i]);
1316 if (node == RADIX_TREE_RETRY)
1317 goto out;
1318 if (!radix_tree_is_indirect_ptr(node)) {
1319 if (node == item) {
1320 info->found_index = index;
1321 info->stop = true;
1322 goto out;
1323 }
1324 continue;
1325 }
1326 node = indirect_to_ptr(node);
1327 if (is_sibling_entry(slot, node))
1328 continue;
1329 slot = node;
1330 break;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001331 }
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001332 if (i == RADIX_TREE_MAP_SIZE)
1333 break;
1334 } while (shift);
1335
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001336out:
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001337 if ((index == 0) && (i == RADIX_TREE_MAP_SIZE))
1338 info->stop = true;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001339 return index;
1340}
1341
1342/**
1343 * radix_tree_locate_item - search through radix tree for item
1344 * @root: radix tree root
1345 * @item: item to be found
1346 *
1347 * Returns index where item was found, or -1 if not found.
1348 * Caller must hold no lock (since this time-consuming function needs
1349 * to be preemptible), and must check afterwards if item is still there.
1350 */
1351unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1352{
1353 struct radix_tree_node *node;
1354 unsigned long max_index;
1355 unsigned long cur_index = 0;
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001356 struct locate_info info = {
1357 .found_index = -1,
1358 .stop = false,
1359 };
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001360
1361 do {
1362 rcu_read_lock();
1363 node = rcu_dereference_raw(root->rnode);
1364 if (!radix_tree_is_indirect_ptr(node)) {
1365 rcu_read_unlock();
1366 if (node == item)
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001367 info.found_index = 0;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001368 break;
1369 }
1370
1371 node = indirect_to_ptr(node);
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001372
1373 max_index = node_maxindex(node);
Hugh Dickins5f30fc92014-03-03 15:38:23 -08001374 if (cur_index > max_index) {
1375 rcu_read_unlock();
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001376 break;
Hugh Dickins5f30fc92014-03-03 15:38:23 -08001377 }
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001378
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001379 cur_index = __locate(node, item, cur_index, &info);
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001380 rcu_read_unlock();
1381 cond_resched();
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001382 } while (!info.stop && cur_index <= max_index);
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001383
Matthew Wilcox0a2efc62016-05-20 17:02:46 -07001384 return info.found_index;
Hugh Dickinse504f3f2011-08-03 16:21:27 -07001385}
1386#else
1387unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1388{
1389 return -1;
1390}
1391#endif /* CONFIG_SHMEM && CONFIG_SWAP */
Nick Piggin47feff22008-07-25 19:45:29 -07001392
1393/**
Matthew Wilcoxd0891262016-05-20 17:03:19 -07001394 * radix_tree_shrink - shrink radix tree to minimum height
Nick Piggina5f51c92006-01-08 01:01:41 -08001395 * @root radix tree root
1396 */
Matthew Wilcoxfb209012016-05-20 17:03:13 -07001397static inline bool radix_tree_shrink(struct radix_tree_root *root)
Nick Piggina5f51c92006-01-08 01:01:41 -08001398{
Matthew Wilcoxfb209012016-05-20 17:03:13 -07001399 bool shrunk = false;
1400
Matthew Wilcoxd0891262016-05-20 17:03:19 -07001401 for (;;) {
Nick Piggina5f51c92006-01-08 01:01:41 -08001402 struct radix_tree_node *to_free = root->rnode;
Hugh Dickinse2bdb932012-01-12 17:20:41 -08001403 struct radix_tree_node *slot;
Nick Piggina5f51c92006-01-08 01:01:41 -08001404
Matthew Wilcoxd0891262016-05-20 17:03:19 -07001405 if (!radix_tree_is_indirect_ptr(to_free))
1406 break;
Nick Piggin27d20fd2010-11-11 14:05:19 -08001407 to_free = indirect_to_ptr(to_free);
Nick Pigginc0bc9872007-10-16 01:24:42 -07001408
1409 /*
1410 * The candidate node has more than one child, or its child
Matthew Wilcoxd0891262016-05-20 17:03:19 -07001411 * is not at the leftmost slot, or the child is a multiorder
1412 * entry, we cannot shrink.
Nick Pigginc0bc9872007-10-16 01:24:42 -07001413 */
1414 if (to_free->count != 1)
1415 break;
Matthew Wilcox339e6352016-03-17 14:21:48 -07001416 slot = to_free->slots[0];
1417 if (!slot)
Nick Pigginc0bc9872007-10-16 01:24:42 -07001418 break;
Matthew Wilcoxd0891262016-05-20 17:03:19 -07001419 if (!radix_tree_is_indirect_ptr(slot) && to_free->shift)
Matthew Wilcoxafe0e392016-05-20 17:02:17 -07001420 break;
1421
1422 if (radix_tree_is_indirect_ptr(slot)) {
1423 slot = indirect_to_ptr(slot);
1424 slot->parent = NULL;
1425 slot = ptr_to_indirect(slot);
1426 }
Nick Pigginc0bc9872007-10-16 01:24:42 -07001427
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001428 /*
1429 * We don't need rcu_assign_pointer(), since we are simply
Nick Piggin27d20fd2010-11-11 14:05:19 -08001430 * moving the node from one part of the tree to another: if it
1431 * was safe to dereference the old pointer to it
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001432 * (to_free->slots[0]), it will be safe to dereference the new
Nick Piggin27d20fd2010-11-11 14:05:19 -08001433 * one (root->rnode) as far as dependent read barriers go.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -08001434 */
Hugh Dickinse2bdb932012-01-12 17:20:41 -08001435 root->rnode = slot;
Nick Piggin27d20fd2010-11-11 14:05:19 -08001436
1437 /*
1438 * We have a dilemma here. The node's slot[0] must not be
1439 * NULLed in case there are concurrent lookups expecting to
1440 * find the item. However if this was a bottom-level node,
1441 * then it may be subject to the slot pointer being visible
1442 * to callers dereferencing it. If item corresponding to
1443 * slot[0] is subsequently deleted, these callers would expect
1444 * their slot to become empty sooner or later.
1445 *
1446 * For example, lockless pagecache will look up a slot, deref
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001447 * the page pointer, and if the page has 0 refcount it means it
Nick Piggin27d20fd2010-11-11 14:05:19 -08001448 * was concurrently deleted from pagecache so try the deref
1449 * again. Fortunately there is already a requirement for logic
1450 * to retry the entire slot lookup -- the indirect pointer
1451 * problem (replacing direct root node with an indirect pointer
1452 * also results in a stale slot). So tag the slot as indirect
1453 * to force callers to retry.
1454 */
Matthew Wilcoxafe0e392016-05-20 17:02:17 -07001455 if (!radix_tree_is_indirect_ptr(slot))
1456 to_free->slots[0] = RADIX_TREE_RETRY;
Nick Piggin27d20fd2010-11-11 14:05:19 -08001457
Nick Piggina5f51c92006-01-08 01:01:41 -08001458 radix_tree_node_free(to_free);
Matthew Wilcoxfb209012016-05-20 17:03:13 -07001459 shrunk = true;
Nick Piggina5f51c92006-01-08 01:01:41 -08001460 }
Matthew Wilcoxfb209012016-05-20 17:03:13 -07001461
1462 return shrunk;
Nick Piggina5f51c92006-01-08 01:01:41 -08001463}
1464
1465/**
Johannes Weiner139e5612014-04-03 14:47:54 -07001466 * __radix_tree_delete_node - try to free node after clearing a slot
1467 * @root: radix tree root
Johannes Weiner139e5612014-04-03 14:47:54 -07001468 * @node: node containing @index
1469 *
1470 * After clearing the slot at @index in @node from radix tree
1471 * rooted at @root, call this function to attempt freeing the
1472 * node and shrinking the tree.
1473 *
1474 * Returns %true if @node was freed, %false otherwise.
1475 */
Johannes Weiner449dd692014-04-03 14:47:56 -07001476bool __radix_tree_delete_node(struct radix_tree_root *root,
Johannes Weiner139e5612014-04-03 14:47:54 -07001477 struct radix_tree_node *node)
1478{
1479 bool deleted = false;
1480
1481 do {
1482 struct radix_tree_node *parent;
1483
1484 if (node->count) {
Matthew Wilcoxfb209012016-05-20 17:03:13 -07001485 if (node == indirect_to_ptr(root->rnode))
1486 deleted |= radix_tree_shrink(root);
Johannes Weiner139e5612014-04-03 14:47:54 -07001487 return deleted;
1488 }
1489
1490 parent = node->parent;
1491 if (parent) {
Matthew Wilcox0c7fa0a2016-05-20 17:03:07 -07001492 parent->slots[node->offset] = NULL;
Johannes Weiner139e5612014-04-03 14:47:54 -07001493 parent->count--;
1494 } else {
1495 root_tag_clear_all(root);
Johannes Weiner139e5612014-04-03 14:47:54 -07001496 root->rnode = NULL;
1497 }
1498
1499 radix_tree_node_free(node);
1500 deleted = true;
1501
1502 node = parent;
1503 } while (node);
1504
1505 return deleted;
1506}
1507
Matthew Wilcox57578c22016-05-20 17:01:54 -07001508static inline void delete_sibling_entries(struct radix_tree_node *node,
1509 void *ptr, unsigned offset)
1510{
1511#ifdef CONFIG_RADIX_TREE_MULTIORDER
1512 int i;
1513 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
1514 if (node->slots[offset + i] != ptr)
1515 break;
1516 node->slots[offset + i] = NULL;
1517 node->count--;
1518 }
1519#endif
1520}
1521
Johannes Weiner139e5612014-04-03 14:47:54 -07001522/**
Johannes Weiner53c59f22014-04-03 14:47:39 -07001523 * radix_tree_delete_item - delete an item from a radix tree
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 * @root: radix tree root
1525 * @index: index key
Johannes Weiner53c59f22014-04-03 14:47:39 -07001526 * @item: expected item
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 *
Johannes Weiner53c59f22014-04-03 14:47:39 -07001528 * Remove @item at @index from the radix tree rooted at @root.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 *
Johannes Weiner53c59f22014-04-03 14:47:39 -07001530 * Returns the address of the deleted item, or NULL if it was not present
1531 * or the entry at the given @index was not @item.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 */
Johannes Weiner53c59f22014-04-03 14:47:39 -07001533void *radix_tree_delete_item(struct radix_tree_root *root,
1534 unsigned long index, void *item)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535{
Johannes Weiner139e5612014-04-03 14:47:54 -07001536 struct radix_tree_node *node;
Matthew Wilcox57578c22016-05-20 17:01:54 -07001537 unsigned int offset;
Johannes Weiner139e5612014-04-03 14:47:54 -07001538 void **slot;
1539 void *entry;
Nick Piggind5274262006-01-08 01:01:41 -08001540 int tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
Johannes Weiner139e5612014-04-03 14:47:54 -07001542 entry = __radix_tree_lookup(root, index, &node, &slot);
1543 if (!entry)
1544 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Johannes Weiner139e5612014-04-03 14:47:54 -07001546 if (item && entry != item)
1547 return NULL;
1548
1549 if (!node) {
Nick Piggin612d6c12006-06-23 02:03:22 -07001550 root_tag_clear_all(root);
1551 root->rnode = NULL;
Johannes Weiner139e5612014-04-03 14:47:54 -07001552 return entry;
Nick Piggin612d6c12006-06-23 02:03:22 -07001553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Matthew Wilcox29e09672016-05-20 17:02:02 -07001555 offset = get_slot_offset(node, slot);
Johannes Weiner53c59f22014-04-03 14:47:39 -07001556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 /*
Hugh Dickinse2bdb932012-01-12 17:20:41 -08001558 * Clear all tags associated with the item to be deleted.
1559 * This way of doing it would be inefficient, but seldom is any set.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 */
Jonathan Corbetdaff89f2006-03-25 03:08:05 -08001561 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
Hugh Dickinse2bdb932012-01-12 17:20:41 -08001562 if (tag_get(node, tag, offset))
Nick Piggin612d6c12006-06-23 02:03:22 -07001563 radix_tree_tag_clear(root, index, tag);
Nick Piggind5274262006-01-08 01:01:41 -08001564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
Matthew Wilcox57578c22016-05-20 17:01:54 -07001566 delete_sibling_entries(node, ptr_to_indirect(slot), offset);
Johannes Weiner139e5612014-04-03 14:47:54 -07001567 node->slots[offset] = NULL;
1568 node->count--;
Nick Piggina5f51c92006-01-08 01:01:41 -08001569
Johannes Weiner449dd692014-04-03 14:47:56 -07001570 __radix_tree_delete_node(root, node);
Christoph Lameter201b6262005-09-06 15:16:46 -07001571
Johannes Weiner139e5612014-04-03 14:47:54 -07001572 return entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
Johannes Weiner53c59f22014-04-03 14:47:39 -07001574EXPORT_SYMBOL(radix_tree_delete_item);
1575
1576/**
1577 * radix_tree_delete - delete an item from a radix tree
1578 * @root: radix tree root
1579 * @index: index key
1580 *
1581 * Remove the item at @index from the radix tree rooted at @root.
1582 *
1583 * Returns the address of the deleted item, or NULL if it was not present.
1584 */
1585void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1586{
1587 return radix_tree_delete_item(root, index, NULL);
1588}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589EXPORT_SYMBOL(radix_tree_delete);
1590
1591/**
1592 * radix_tree_tagged - test whether any items in the tree are tagged
1593 * @root: radix tree root
1594 * @tag: tag to test
1595 */
Jonathan Corbetdaff89f2006-03-25 03:08:05 -08001596int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
Nick Piggin612d6c12006-06-23 02:03:22 -07001598 return root_tag_get(root, tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599}
1600EXPORT_SYMBOL(radix_tree_tagged);
1601
1602static void
Johannes Weiner449dd692014-04-03 14:47:56 -07001603radix_tree_node_ctor(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
Johannes Weiner449dd692014-04-03 14:47:56 -07001605 struct radix_tree_node *node = arg;
1606
1607 memset(node, 0, sizeof(*node));
1608 INIT_LIST_HEAD(&node->private_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609}
1610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611static int radix_tree_callback(struct notifier_block *nfb,
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001612 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613{
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001614 int cpu = (long)hcpu;
1615 struct radix_tree_preload *rtp;
1616 struct radix_tree_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001618 /* Free per-cpu pool of preloaded nodes */
1619 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1620 rtp = &per_cpu(radix_tree_preloads, cpu);
1621 while (rtp->nr) {
Kirill A. Shutemov9d2a8da2015-06-25 15:02:19 -07001622 node = rtp->nodes;
1623 rtp->nodes = node->private_data;
1624 kmem_cache_free(radix_tree_node_cachep, node);
1625 rtp->nr--;
Matthew Wilcox2fcd9002016-05-20 17:03:04 -07001626 }
1627 }
1628 return NOTIFY_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
1631void __init radix_tree_init(void)
1632{
1633 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1634 sizeof(struct radix_tree_node), 0,
Christoph Lameter488514d2008-04-28 02:12:05 -07001635 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1636 radix_tree_node_ctor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 hotcpu_notifier(radix_tree_callback, 0);
1638}