blob: 2eb1dca03681b00eaa0e94cadc2ca18df0678f7b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
Nadia Derbey3219b3b2008-07-25 01:48:00 -07009 * Modified by Nadia Derbey to make it RCU safe.
10 *
Jesper Juhle15ae2d2005-10-30 15:02:14 -080011 * Small id to pointer translation service.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Jesper Juhle15ae2d2005-10-30 15:02:14 -080013 * It uses a radix tree like structure as a sparse array indexed
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * by the id to obtain the pointer. The bitmap makes allocating
Jesper Juhle15ae2d2005-10-30 15:02:14 -080015 * a new id quick.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
Jesper Juhle15ae2d2005-10-30 15:02:14 -080022 * You can release ids at any time. When all ids are released, most of
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
Jesper Juhle15ae2d2005-10-30 15:02:14 -080024 * don't need to go to the memory "store" during an id allocate, just
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29#ifndef TEST // to test in user space...
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#endif
Jeff Mahoney5806f072006-06-26 00:27:19 -070034#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/string.h>
36#include <linux/idr.h>
37
Christoph Lametere18b8902006-12-06 20:33:20 -080038static struct kmem_cache *idr_layer_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Nadia Derbey4ae53782008-07-25 01:47:58 -070040static struct idr_layer *get_from_free_list(struct idr *idp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 struct idr_layer *p;
Roland Dreierc259cc22006-07-14 00:24:23 -070043 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Roland Dreierc259cc22006-07-14 00:24:23 -070045 spin_lock_irqsave(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 if ((p = idp->id_free)) {
47 idp->id_free = p->ary[0];
48 idp->id_free_cnt--;
49 p->ary[0] = NULL;
50 }
Roland Dreierc259cc22006-07-14 00:24:23 -070051 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 return(p);
53}
54
Nadia Derbeycf481c22008-07-25 01:48:02 -070055static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
Sonny Rao1eec0052006-06-25 05:49:34 -070068/* only called when idp->lock is held */
Nadia Derbey4ae53782008-07-25 01:47:58 -070069static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
Sonny Rao1eec0052006-06-25 05:49:34 -070070{
71 p->ary[0] = idp->id_free;
72 idp->id_free = p;
73 idp->id_free_cnt++;
74}
75
Nadia Derbey4ae53782008-07-25 01:47:58 -070076static void move_to_free_list(struct idr *idp, struct idr_layer *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Roland Dreierc259cc22006-07-14 00:24:23 -070078 unsigned long flags;
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 /*
81 * Depends on the return element being zeroed.
82 */
Roland Dreierc259cc22006-07-14 00:24:23 -070083 spin_lock_irqsave(&idp->lock, flags);
Nadia Derbey4ae53782008-07-25 01:47:58 -070084 __move_to_free_list(idp, p);
Roland Dreierc259cc22006-07-14 00:24:23 -070085 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
Tejun Heoe33ac8b2007-06-14 03:45:12 +090088static void idr_mark_full(struct idr_layer **pa, int id)
89{
90 struct idr_layer *p = pa[0];
91 int l = 0;
92
93 __set_bit(id & IDR_MASK, &p->bitmap);
94 /*
95 * If this layer is full mark the bit in the layer above to
96 * show that this part of the radix tree is full. This may
97 * complete the layer above and require walking up the radix
98 * tree.
99 */
100 while (p->bitmap == IDR_FULL) {
101 if (!(p = pa[++l]))
102 break;
103 id = id >> IDR_BITS;
104 __set_bit((id & IDR_MASK), &p->bitmap);
105 }
106}
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/**
109 * idr_pre_get - reserver resources for idr allocation
110 * @idp: idr handle
111 * @gfp_mask: memory allocation flags
112 *
113 * This function should be called prior to locking and calling the
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700114 * idr_get_new* functions. It preallocates enough memory to satisfy
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 * the worst possible allocation.
116 *
117 * If the system is REALLY out of memory this function returns 0,
118 * otherwise 1.
119 */
Al Virofd4f2df2005-10-21 03:18:50 -0400120int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121{
122 while (idp->id_free_cnt < IDR_FREE_MAX) {
123 struct idr_layer *new;
Andrew Morton5b019e92009-01-15 13:51:21 -0800124 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800125 if (new == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 return (0);
Nadia Derbey4ae53782008-07-25 01:47:58 -0700127 move_to_free_list(idp, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 }
129 return 1;
130}
131EXPORT_SYMBOL(idr_pre_get);
132
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900133static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 int n, m, sh;
136 struct idr_layer *p, *new;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900137 int l, id, oid;
Al Viro5ba25332007-10-14 19:35:50 +0100138 unsigned long bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140 id = *starting_id;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900141 restart:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 p = idp->top;
Tejun Heo6f14a662010-02-04 17:57:37 +0900143 l = idp->layers;
144 pa[l--] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 while (1) {
146 /*
147 * We run around this while until we reach the leaf node...
148 */
149 n = (id >> (IDR_BITS*l)) & IDR_MASK;
150 bm = ~p->bitmap;
151 m = find_next_bit(&bm, IDR_SIZE, n);
152 if (m == IDR_SIZE) {
153 /* no space available go back to previous layer. */
154 l++;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900155 oid = id;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900157
Tejun Heo6f14a662010-02-04 17:57:37 +0900158 /* if already at the top layer, we need to grow */
Tejun Heod2e72762010-02-22 12:44:19 -0800159 if (id >= 1 << (idp->layers * IDR_BITS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 *starting_id = id;
Nadia Derbey944ca052008-07-25 01:47:59 -0700161 return IDR_NEED_TO_GROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 }
Tejun Heod2e72762010-02-22 12:44:19 -0800163 p = pa[l];
164 BUG_ON(!p);
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900165
166 /* If we need to go up one layer, continue the
167 * loop; otherwise, restart from the top.
168 */
169 sh = IDR_BITS * (l + 1);
170 if (oid >> sh == id >> sh)
171 continue;
172 else
173 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 }
175 if (m != n) {
176 sh = IDR_BITS*l;
177 id = ((id >> sh) ^ n ^ m) << sh;
178 }
179 if ((id >= MAX_ID_BIT) || (id < 0))
Nadia Derbey944ca052008-07-25 01:47:59 -0700180 return IDR_NOMORE_SPACE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 if (l == 0)
182 break;
183 /*
184 * Create the layer below if it is missing.
185 */
186 if (!p->ary[m]) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700187 new = get_from_free_list(idp);
188 if (!new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return -1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800190 new->layer = l-1;
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700191 rcu_assign_pointer(p->ary[m], new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 p->count++;
193 }
194 pa[l--] = p;
195 p = p->ary[m];
196 }
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900197
198 pa[l] = p;
199 return id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900202static int idr_get_empty_slot(struct idr *idp, int starting_id,
203 struct idr_layer **pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
205 struct idr_layer *p, *new;
206 int layers, v, id;
Roland Dreierc259cc22006-07-14 00:24:23 -0700207 unsigned long flags;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 id = starting_id;
210build_up:
211 p = idp->top;
212 layers = idp->layers;
213 if (unlikely(!p)) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700214 if (!(p = get_from_free_list(idp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 return -1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800216 p->layer = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 layers = 1;
218 }
219 /*
220 * Add a new layer to the top of the tree if the requested
221 * id is larger than the currently allocated space.
222 */
Zaur Kambarov589777e2005-06-21 17:14:31 -0700223 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 layers++;
Manfred Spraul711a49a2008-12-10 18:17:06 +0100225 if (!p->count) {
226 /* special case: if the tree is currently empty,
227 * then we grow the tree by moving the top node
228 * upwards.
229 */
230 p->layer++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 continue;
Manfred Spraul711a49a2008-12-10 18:17:06 +0100232 }
Nadia Derbey4ae53782008-07-25 01:47:58 -0700233 if (!(new = get_from_free_list(idp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 /*
235 * The allocation failed. If we built part of
236 * the structure tear it down.
237 */
Roland Dreierc259cc22006-07-14 00:24:23 -0700238 spin_lock_irqsave(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 for (new = p; p && p != idp->top; new = p) {
240 p = p->ary[0];
241 new->ary[0] = NULL;
242 new->bitmap = new->count = 0;
Nadia Derbey4ae53782008-07-25 01:47:58 -0700243 __move_to_free_list(idp, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
Roland Dreierc259cc22006-07-14 00:24:23 -0700245 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return -1;
247 }
248 new->ary[0] = p;
249 new->count = 1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800250 new->layer = layers-1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 if (p->bitmap == IDR_FULL)
252 __set_bit(0, &new->bitmap);
253 p = new;
254 }
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700255 rcu_assign_pointer(idp->top, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 idp->layers = layers;
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900257 v = sub_alloc(idp, &id, pa);
Nadia Derbey944ca052008-07-25 01:47:59 -0700258 if (v == IDR_NEED_TO_GROW)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 goto build_up;
260 return(v);
261}
262
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900263static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
264{
265 struct idr_layer *pa[MAX_LEVEL];
266 int id;
267
268 id = idr_get_empty_slot(idp, starting_id, pa);
269 if (id >= 0) {
270 /*
271 * Successfully found an empty slot. Install the user
272 * pointer and mark the slot full.
273 */
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700274 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
275 (struct idr_layer *)ptr);
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900276 pa[0]->count++;
277 idr_mark_full(pa, id);
278 }
279
280 return id;
281}
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283/**
John McCutchan7c657f22005-08-26 14:02:04 -0400284 * idr_get_new_above - allocate new idr entry above or equal to a start id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 * @idp: idr handle
Thadeu Lima de Souza Cascardo94e2bd62009-10-16 15:20:49 +0200286 * @ptr: pointer you want associated with the id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 * @start_id: id to start search at
288 * @id: pointer to the allocated handle
289 *
290 * This is the allocate id function. It should be called with any
291 * required locks.
292 *
293 * If memory is required, it will return -EAGAIN, you should unlock
294 * and go back to the idr_pre_get() call. If the idr is full, it will
295 * return -ENOSPC.
296 *
Li Zefanb0981612009-01-15 13:51:00 -0800297 * @id returns a value in the range @starting_id ... 0x7fffffff
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 */
299int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
300{
301 int rv;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 rv = idr_get_new_above_int(idp, ptr, starting_id);
304 /*
305 * This is a cheap hack until the IDR code can be fixed to
306 * return proper error values.
307 */
Nadia Derbey944ca052008-07-25 01:47:59 -0700308 if (rv < 0)
309 return _idr_rc_to_errno(rv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 *id = rv;
311 return 0;
312}
313EXPORT_SYMBOL(idr_get_new_above);
314
315/**
316 * idr_get_new - allocate new idr entry
317 * @idp: idr handle
Thadeu Lima de Souza Cascardo94e2bd62009-10-16 15:20:49 +0200318 * @ptr: pointer you want associated with the id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * @id: pointer to the allocated handle
320 *
321 * This is the allocate id function. It should be called with any
322 * required locks.
323 *
324 * If memory is required, it will return -EAGAIN, you should unlock
325 * and go back to the idr_pre_get() call. If the idr is full, it will
326 * return -ENOSPC.
327 *
328 * @id returns a value in the range 0 ... 0x7fffffff
329 */
330int idr_get_new(struct idr *idp, void *ptr, int *id)
331{
332 int rv;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 rv = idr_get_new_above_int(idp, ptr, 0);
335 /*
336 * This is a cheap hack until the IDR code can be fixed to
337 * return proper error values.
338 */
Nadia Derbey944ca052008-07-25 01:47:59 -0700339 if (rv < 0)
340 return _idr_rc_to_errno(rv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 *id = rv;
342 return 0;
343}
344EXPORT_SYMBOL(idr_get_new);
345
346static void idr_remove_warning(int id)
347{
Nadia Derbeyf098ad62008-07-25 01:47:59 -0700348 printk(KERN_WARNING
349 "idr_remove called for id=%d which is not allocated.\n", id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 dump_stack();
351}
352
353static void sub_remove(struct idr *idp, int shift, int id)
354{
355 struct idr_layer *p = idp->top;
356 struct idr_layer **pa[MAX_LEVEL];
357 struct idr_layer ***paa = &pa[0];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700358 struct idr_layer *to_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 int n;
360
361 *paa = NULL;
362 *++paa = &idp->top;
363
364 while ((shift > 0) && p) {
365 n = (id >> shift) & IDR_MASK;
366 __clear_bit(n, &p->bitmap);
367 *++paa = &p->ary[n];
368 p = p->ary[n];
369 shift -= IDR_BITS;
370 }
371 n = id & IDR_MASK;
372 if (likely(p != NULL && test_bit(n, &p->bitmap))){
373 __clear_bit(n, &p->bitmap);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700374 rcu_assign_pointer(p->ary[n], NULL);
375 to_free = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 while(*paa && ! --((**paa)->count)){
Nadia Derbeycf481c22008-07-25 01:48:02 -0700377 if (to_free)
378 free_layer(to_free);
379 to_free = **paa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 **paa-- = NULL;
381 }
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800382 if (!*paa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 idp->layers = 0;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700384 if (to_free)
385 free_layer(to_free);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800386 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 idr_remove_warning(id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
390/**
391 * idr_remove - remove the given id and free it's slot
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800392 * @idp: idr handle
393 * @id: unique key
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 */
395void idr_remove(struct idr *idp, int id)
396{
397 struct idr_layer *p;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700398 struct idr_layer *to_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400 /* Mask off upper bits we don't use for the search. */
401 id &= MAX_ID_MASK;
402
403 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800404 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
Nadia Derbeycf481c22008-07-25 01:48:02 -0700405 idp->top->ary[0]) {
406 /*
407 * Single child at leftmost slot: we can shrink the tree.
408 * This level is not needed anymore since when layers are
409 * inserted, they are inserted at the top of the existing
410 * tree.
411 */
412 to_free = idp->top;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 p = idp->top->ary[0];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700414 rcu_assign_pointer(idp->top, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 --idp->layers;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700416 to_free->bitmap = to_free->count = 0;
417 free_layer(to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419 while (idp->id_free_cnt >= IDR_FREE_MAX) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700420 p = get_from_free_list(idp);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700421 /*
422 * Note: we don't call the rcu callback here, since the only
423 * layers that fall into the freelist are those that have been
424 * preallocated.
425 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 kmem_cache_free(idr_layer_cache, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
Nadia Derbeyaf8e2a42008-05-01 04:34:57 -0700428 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430EXPORT_SYMBOL(idr_remove);
431
432/**
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700433 * idr_remove_all - remove all ids from the given idr tree
434 * @idp: idr handle
435 *
436 * idr_destroy() only frees up unused, cached idp_layers, but this
437 * function will remove all id mappings and leave all idp_layers
438 * unused.
439 *
440 * A typical clean-up sequence for objects stored in an idr tree, will
441 * use idr_for_each() to free all objects, if necessay, then
442 * idr_remove_all() to remove all ids, and idr_destroy() to free
443 * up the cached idr_layers.
444 */
445void idr_remove_all(struct idr *idp)
446{
Oleg Nesterov6ace06dc2007-07-31 00:39:19 -0700447 int n, id, max;
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700448 struct idr_layer *p;
449 struct idr_layer *pa[MAX_LEVEL];
450 struct idr_layer **paa = &pa[0];
451
452 n = idp->layers * IDR_BITS;
453 p = idp->top;
Paul E. McKenney1b233362009-03-10 12:55:52 -0700454 rcu_assign_pointer(idp->top, NULL);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700455 max = 1 << n;
456
457 id = 0;
Oleg Nesterov6ace06dc2007-07-31 00:39:19 -0700458 while (id < max) {
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700459 while (n > IDR_BITS && p) {
460 n -= IDR_BITS;
461 *paa++ = p;
462 p = p->ary[(id >> n) & IDR_MASK];
463 }
464
465 id += 1 << n;
466 while (n < fls(id)) {
Nadia Derbeycf481c22008-07-25 01:48:02 -0700467 if (p)
468 free_layer(p);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700469 n += IDR_BITS;
470 p = *--paa;
471 }
472 }
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700473 idp->layers = 0;
474}
475EXPORT_SYMBOL(idr_remove_all);
476
477/**
Andrew Morton8d3b3592005-10-23 12:57:18 -0700478 * idr_destroy - release all cached layers within an idr tree
479 * idp: idr handle
480 */
481void idr_destroy(struct idr *idp)
482{
483 while (idp->id_free_cnt) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700484 struct idr_layer *p = get_from_free_list(idp);
Andrew Morton8d3b3592005-10-23 12:57:18 -0700485 kmem_cache_free(idr_layer_cache, p);
486 }
487}
488EXPORT_SYMBOL(idr_destroy);
489
490/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 * idr_find - return pointer for given id
492 * @idp: idr handle
493 * @id: lookup key
494 *
495 * Return the pointer given the id it has been registered with. A %NULL
496 * return indicates that @id is not valid or you passed %NULL in
497 * idr_get_new().
498 *
Nadia Derbeyf9c46d62008-07-25 01:48:01 -0700499 * This function can be called under rcu_read_lock(), given that the leaf
500 * pointers lifetimes are correctly managed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 */
502void *idr_find(struct idr *idp, int id)
503{
504 int n;
505 struct idr_layer *p;
506
Paul E. McKenney96be7532010-02-22 17:04:55 -0800507 p = rcu_dereference_raw(idp->top);
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800508 if (!p)
509 return NULL;
510 n = (p->layer+1) * IDR_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512 /* Mask off upper bits we don't use for the search. */
513 id &= MAX_ID_MASK;
514
515 if (id >= (1 << n))
516 return NULL;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800517 BUG_ON(n == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 while (n > 0 && p) {
520 n -= IDR_BITS;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800521 BUG_ON(n != p->layer*IDR_BITS);
Paul E. McKenney96be7532010-02-22 17:04:55 -0800522 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 }
524 return((void *)p);
525}
526EXPORT_SYMBOL(idr_find);
527
Jeff Mahoney5806f072006-06-26 00:27:19 -0700528/**
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700529 * idr_for_each - iterate through all stored pointers
530 * @idp: idr handle
531 * @fn: function to be called for each pointer
532 * @data: data passed back to callback function
533 *
534 * Iterate over the pointers registered with the given idr. The
535 * callback function will be called for each pointer currently
536 * registered, passing the id, the pointer and the data pointer passed
537 * to this function. It is not safe to modify the idr tree while in
538 * the callback, so functions such as idr_get_new and idr_remove are
539 * not allowed.
540 *
541 * We check the return of @fn each time. If it returns anything other
542 * than 0, we break out and return that value.
543 *
544 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
545 */
546int idr_for_each(struct idr *idp,
547 int (*fn)(int id, void *p, void *data), void *data)
548{
549 int n, id, max, error = 0;
550 struct idr_layer *p;
551 struct idr_layer *pa[MAX_LEVEL];
552 struct idr_layer **paa = &pa[0];
553
554 n = idp->layers * IDR_BITS;
Paul E. McKenney96be7532010-02-22 17:04:55 -0800555 p = rcu_dereference_raw(idp->top);
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700556 max = 1 << n;
557
558 id = 0;
559 while (id < max) {
560 while (n > 0 && p) {
561 n -= IDR_BITS;
562 *paa++ = p;
Paul E. McKenney96be7532010-02-22 17:04:55 -0800563 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700564 }
565
566 if (p) {
567 error = fn(id, (void *)p, data);
568 if (error)
569 break;
570 }
571
572 id += 1 << n;
573 while (n < fls(id)) {
574 n += IDR_BITS;
575 p = *--paa;
576 }
577 }
578
579 return error;
580}
581EXPORT_SYMBOL(idr_for_each);
582
583/**
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700584 * idr_get_next - lookup next object of id to given id.
585 * @idp: idr handle
586 * @id: pointer to lookup key
587 *
588 * Returns pointer to registered object with id, which is next number to
589 * given id.
590 */
591
592void *idr_get_next(struct idr *idp, int *nextidp)
593{
594 struct idr_layer *p, *pa[MAX_LEVEL];
595 struct idr_layer **paa = &pa[0];
596 int id = *nextidp;
597 int n, max;
598
599 /* find first ent */
600 n = idp->layers * IDR_BITS;
601 max = 1 << n;
602 p = rcu_dereference(idp->top);
603 if (!p)
604 return NULL;
605
606 while (id < max) {
607 while (n > 0 && p) {
608 n -= IDR_BITS;
609 *paa++ = p;
610 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
611 }
612
613 if (p) {
614 *nextidp = id;
615 return p;
616 }
617
618 id += 1 << n;
619 while (n < fls(id)) {
620 n += IDR_BITS;
621 p = *--paa;
622 }
623 }
624 return NULL;
625}
626
627
628
629/**
Jeff Mahoney5806f072006-06-26 00:27:19 -0700630 * idr_replace - replace pointer for given id
631 * @idp: idr handle
632 * @ptr: pointer you want associated with the id
633 * @id: lookup key
634 *
635 * Replace the pointer registered with an id and return the old value.
636 * A -ENOENT return indicates that @id was not found.
637 * A -EINVAL return indicates that @id was not within valid constraints.
638 *
Nadia Derbeycf481c22008-07-25 01:48:02 -0700639 * The caller must serialize with writers.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700640 */
641void *idr_replace(struct idr *idp, void *ptr, int id)
642{
643 int n;
644 struct idr_layer *p, *old_p;
645
Jeff Mahoney5806f072006-06-26 00:27:19 -0700646 p = idp->top;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800647 if (!p)
648 return ERR_PTR(-EINVAL);
649
650 n = (p->layer+1) * IDR_BITS;
Jeff Mahoney5806f072006-06-26 00:27:19 -0700651
652 id &= MAX_ID_MASK;
653
654 if (id >= (1 << n))
655 return ERR_PTR(-EINVAL);
656
657 n -= IDR_BITS;
658 while ((n > 0) && p) {
659 p = p->ary[(id >> n) & IDR_MASK];
660 n -= IDR_BITS;
661 }
662
663 n = id & IDR_MASK;
664 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
665 return ERR_PTR(-ENOENT);
666
667 old_p = p->ary[n];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700668 rcu_assign_pointer(p->ary[n], ptr);
Jeff Mahoney5806f072006-06-26 00:27:19 -0700669
670 return old_p;
671}
672EXPORT_SYMBOL(idr_replace);
673
Akinobu Mita199f0ca2008-04-29 01:03:13 -0700674void __init idr_init_cache(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675{
Akinobu Mita199f0ca2008-04-29 01:03:13 -0700676 idr_layer_cache = kmem_cache_create("idr_layer_cache",
Andrew Morton5b019e92009-01-15 13:51:21 -0800677 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
680/**
681 * idr_init - initialize idr handle
682 * @idp: idr handle
683 *
684 * This function is use to set up the handle (@idp) that you will pass
685 * to the rest of the functions.
686 */
687void idr_init(struct idr *idp)
688{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 memset(idp, 0, sizeof(struct idr));
690 spin_lock_init(&idp->lock);
691}
692EXPORT_SYMBOL(idr_init);
Tejun Heo72dba582007-06-14 03:45:13 +0900693
694
695/*
696 * IDA - IDR based ID allocator
697 *
698 * this is id allocator without id -> pointer translation. Memory
699 * usage is much lower than full blown idr because each id only
700 * occupies a bit. ida uses a custom leaf node which contains
701 * IDA_BITMAP_BITS slots.
702 *
703 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
704 */
705
706static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
707{
708 unsigned long flags;
709
710 if (!ida->free_bitmap) {
711 spin_lock_irqsave(&ida->idr.lock, flags);
712 if (!ida->free_bitmap) {
713 ida->free_bitmap = bitmap;
714 bitmap = NULL;
715 }
716 spin_unlock_irqrestore(&ida->idr.lock, flags);
717 }
718
719 kfree(bitmap);
720}
721
722/**
723 * ida_pre_get - reserve resources for ida allocation
724 * @ida: ida handle
725 * @gfp_mask: memory allocation flag
726 *
727 * This function should be called prior to locking and calling the
728 * following function. It preallocates enough memory to satisfy the
729 * worst possible allocation.
730 *
731 * If the system is REALLY out of memory this function returns 0,
732 * otherwise 1.
733 */
734int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
735{
736 /* allocate idr_layers */
737 if (!idr_pre_get(&ida->idr, gfp_mask))
738 return 0;
739
740 /* allocate free_bitmap */
741 if (!ida->free_bitmap) {
742 struct ida_bitmap *bitmap;
743
744 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
745 if (!bitmap)
746 return 0;
747
748 free_bitmap(ida, bitmap);
749 }
750
751 return 1;
752}
753EXPORT_SYMBOL(ida_pre_get);
754
755/**
756 * ida_get_new_above - allocate new ID above or equal to a start id
757 * @ida: ida handle
758 * @staring_id: id to start search at
759 * @p_id: pointer to the allocated handle
760 *
761 * Allocate new ID above or equal to @ida. It should be called with
762 * any required locks.
763 *
764 * If memory is required, it will return -EAGAIN, you should unlock
765 * and go back to the ida_pre_get() call. If the ida is full, it will
766 * return -ENOSPC.
767 *
Li Zefanb0981612009-01-15 13:51:00 -0800768 * @p_id returns a value in the range @starting_id ... 0x7fffffff.
Tejun Heo72dba582007-06-14 03:45:13 +0900769 */
770int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
771{
772 struct idr_layer *pa[MAX_LEVEL];
773 struct ida_bitmap *bitmap;
774 unsigned long flags;
775 int idr_id = starting_id / IDA_BITMAP_BITS;
776 int offset = starting_id % IDA_BITMAP_BITS;
777 int t, id;
778
779 restart:
780 /* get vacant slot */
781 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
Nadia Derbey944ca052008-07-25 01:47:59 -0700782 if (t < 0)
783 return _idr_rc_to_errno(t);
Tejun Heo72dba582007-06-14 03:45:13 +0900784
785 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
786 return -ENOSPC;
787
788 if (t != idr_id)
789 offset = 0;
790 idr_id = t;
791
792 /* if bitmap isn't there, create a new one */
793 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
794 if (!bitmap) {
795 spin_lock_irqsave(&ida->idr.lock, flags);
796 bitmap = ida->free_bitmap;
797 ida->free_bitmap = NULL;
798 spin_unlock_irqrestore(&ida->idr.lock, flags);
799
800 if (!bitmap)
801 return -EAGAIN;
802
803 memset(bitmap, 0, sizeof(struct ida_bitmap));
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700804 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
805 (void *)bitmap);
Tejun Heo72dba582007-06-14 03:45:13 +0900806 pa[0]->count++;
807 }
808
809 /* lookup for empty slot */
810 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
811 if (t == IDA_BITMAP_BITS) {
812 /* no empty slot after offset, continue to the next chunk */
813 idr_id++;
814 offset = 0;
815 goto restart;
816 }
817
818 id = idr_id * IDA_BITMAP_BITS + t;
819 if (id >= MAX_ID_BIT)
820 return -ENOSPC;
821
822 __set_bit(t, bitmap->bitmap);
823 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
824 idr_mark_full(pa, idr_id);
825
826 *p_id = id;
827
828 /* Each leaf node can handle nearly a thousand slots and the
829 * whole idea of ida is to have small memory foot print.
830 * Throw away extra resources one by one after each successful
831 * allocation.
832 */
833 if (ida->idr.id_free_cnt || ida->free_bitmap) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700834 struct idr_layer *p = get_from_free_list(&ida->idr);
Tejun Heo72dba582007-06-14 03:45:13 +0900835 if (p)
836 kmem_cache_free(idr_layer_cache, p);
837 }
838
839 return 0;
840}
841EXPORT_SYMBOL(ida_get_new_above);
842
843/**
844 * ida_get_new - allocate new ID
845 * @ida: idr handle
846 * @p_id: pointer to the allocated handle
847 *
848 * Allocate new ID. It should be called with any required locks.
849 *
850 * If memory is required, it will return -EAGAIN, you should unlock
851 * and go back to the idr_pre_get() call. If the idr is full, it will
852 * return -ENOSPC.
853 *
854 * @id returns a value in the range 0 ... 0x7fffffff.
855 */
856int ida_get_new(struct ida *ida, int *p_id)
857{
858 return ida_get_new_above(ida, 0, p_id);
859}
860EXPORT_SYMBOL(ida_get_new);
861
862/**
863 * ida_remove - remove the given ID
864 * @ida: ida handle
865 * @id: ID to free
866 */
867void ida_remove(struct ida *ida, int id)
868{
869 struct idr_layer *p = ida->idr.top;
870 int shift = (ida->idr.layers - 1) * IDR_BITS;
871 int idr_id = id / IDA_BITMAP_BITS;
872 int offset = id % IDA_BITMAP_BITS;
873 int n;
874 struct ida_bitmap *bitmap;
875
876 /* clear full bits while looking up the leaf idr_layer */
877 while ((shift > 0) && p) {
878 n = (idr_id >> shift) & IDR_MASK;
879 __clear_bit(n, &p->bitmap);
880 p = p->ary[n];
881 shift -= IDR_BITS;
882 }
883
884 if (p == NULL)
885 goto err;
886
887 n = idr_id & IDR_MASK;
888 __clear_bit(n, &p->bitmap);
889
890 bitmap = (void *)p->ary[n];
891 if (!test_bit(offset, bitmap->bitmap))
892 goto err;
893
894 /* update bitmap and remove it if empty */
895 __clear_bit(offset, bitmap->bitmap);
896 if (--bitmap->nr_busy == 0) {
897 __set_bit(n, &p->bitmap); /* to please idr_remove() */
898 idr_remove(&ida->idr, idr_id);
899 free_bitmap(ida, bitmap);
900 }
901
902 return;
903
904 err:
905 printk(KERN_WARNING
906 "ida_remove called for id=%d which is not allocated.\n", id);
907}
908EXPORT_SYMBOL(ida_remove);
909
910/**
911 * ida_destroy - release all cached layers within an ida tree
912 * ida: ida handle
913 */
914void ida_destroy(struct ida *ida)
915{
916 idr_destroy(&ida->idr);
917 kfree(ida->free_bitmap);
918}
919EXPORT_SYMBOL(ida_destroy);
920
921/**
922 * ida_init - initialize ida handle
923 * @ida: ida handle
924 *
925 * This function is use to set up the handle (@ida) that you will pass
926 * to the rest of the functions.
927 */
928void ida_init(struct ida *ida)
929{
930 memset(ida, 0, sizeof(struct ida));
931 idr_init(&ida->idr);
932
933}
934EXPORT_SYMBOL(ida_init);