blob: f402f14891f5e8713b3b3aefff14d2a2a75ee7c2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
Nadia Derbey3219b3b2008-07-25 01:48:00 -07009 * Modified by Nadia Derbey to make it RCU safe.
10 *
Jesper Juhle15ae2d2005-10-30 15:02:14 -080011 * Small id to pointer translation service.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Jesper Juhle15ae2d2005-10-30 15:02:14 -080013 * It uses a radix tree like structure as a sparse array indexed
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * by the id to obtain the pointer. The bitmap makes allocating
Jesper Juhle15ae2d2005-10-30 15:02:14 -080015 * a new id quick.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
Jesper Juhle15ae2d2005-10-30 15:02:14 -080022 * You can release ids at any time. When all ids are released, most of
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
Jesper Juhle15ae2d2005-10-30 15:02:14 -080024 * don't need to go to the memory "store" during an id allocate, just
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29#ifndef TEST // to test in user space...
30#include <linux/slab.h>
31#include <linux/init.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050032#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#endif
Jeff Mahoney5806f072006-06-26 00:27:19 -070034#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/string.h>
36#include <linux/idr.h>
Rusty Russell88eca022011-08-03 16:21:06 -070037#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Christoph Lametere18b8902006-12-06 20:33:20 -080039static struct kmem_cache *idr_layer_cache;
Rusty Russell88eca022011-08-03 16:21:06 -070040static DEFINE_SPINLOCK(simple_ida_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Tejun Heof4fe9792013-02-27 17:05:02 -080042/* the maximum ID which can be allocated given idr->layers */
43static int idr_max(int layers)
44{
45 int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT);
46
47 return (1 << bits) - 1;
48}
49
Nadia Derbey4ae53782008-07-25 01:47:58 -070050static struct idr_layer *get_from_free_list(struct idr *idp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
52 struct idr_layer *p;
Roland Dreierc259cc22006-07-14 00:24:23 -070053 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Roland Dreierc259cc22006-07-14 00:24:23 -070055 spin_lock_irqsave(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 if ((p = idp->id_free)) {
57 idp->id_free = p->ary[0];
58 idp->id_free_cnt--;
59 p->ary[0] = NULL;
60 }
Roland Dreierc259cc22006-07-14 00:24:23 -070061 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return(p);
63}
64
Nadia Derbeycf481c22008-07-25 01:48:02 -070065static void idr_layer_rcu_free(struct rcu_head *head)
66{
67 struct idr_layer *layer;
68
69 layer = container_of(head, struct idr_layer, rcu_head);
70 kmem_cache_free(idr_layer_cache, layer);
71}
72
73static inline void free_layer(struct idr_layer *p)
74{
75 call_rcu(&p->rcu_head, idr_layer_rcu_free);
76}
77
Sonny Rao1eec0052006-06-25 05:49:34 -070078/* only called when idp->lock is held */
Nadia Derbey4ae53782008-07-25 01:47:58 -070079static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
Sonny Rao1eec0052006-06-25 05:49:34 -070080{
81 p->ary[0] = idp->id_free;
82 idp->id_free = p;
83 idp->id_free_cnt++;
84}
85
Nadia Derbey4ae53782008-07-25 01:47:58 -070086static void move_to_free_list(struct idr *idp, struct idr_layer *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Roland Dreierc259cc22006-07-14 00:24:23 -070088 unsigned long flags;
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 /*
91 * Depends on the return element being zeroed.
92 */
Roland Dreierc259cc22006-07-14 00:24:23 -070093 spin_lock_irqsave(&idp->lock, flags);
Nadia Derbey4ae53782008-07-25 01:47:58 -070094 __move_to_free_list(idp, p);
Roland Dreierc259cc22006-07-14 00:24:23 -070095 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
Tejun Heoe33ac8b2007-06-14 03:45:12 +090098static void idr_mark_full(struct idr_layer **pa, int id)
99{
100 struct idr_layer *p = pa[0];
101 int l = 0;
102
103 __set_bit(id & IDR_MASK, &p->bitmap);
104 /*
105 * If this layer is full mark the bit in the layer above to
106 * show that this part of the radix tree is full. This may
107 * complete the layer above and require walking up the radix
108 * tree.
109 */
110 while (p->bitmap == IDR_FULL) {
111 if (!(p = pa[++l]))
112 break;
113 id = id >> IDR_BITS;
114 __set_bit((id & IDR_MASK), &p->bitmap);
115 }
116}
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118/**
Randy Dunlap56083ab2010-10-26 14:19:08 -0700119 * idr_pre_get - reserve resources for idr allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 * @idp: idr handle
121 * @gfp_mask: memory allocation flags
122 *
Naohiro Aota066a9be2010-10-26 14:23:03 -0700123 * This function should be called prior to calling the idr_get_new* functions.
124 * It preallocates enough memory to satisfy the worst possible allocation. The
125 * caller should pass in GFP_KERNEL if possible. This of course requires that
126 * no spinning locks be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700128 * If the system is REALLY out of memory this function returns %0,
129 * otherwise %1.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 */
Al Virofd4f2df2005-10-21 03:18:50 -0400131int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 while (idp->id_free_cnt < IDR_FREE_MAX) {
134 struct idr_layer *new;
Andrew Morton5b019e92009-01-15 13:51:21 -0800135 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800136 if (new == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return (0);
Nadia Derbey4ae53782008-07-25 01:47:58 -0700138 move_to_free_list(idp, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 }
140 return 1;
141}
142EXPORT_SYMBOL(idr_pre_get);
143
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900144static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
146 int n, m, sh;
147 struct idr_layer *p, *new;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900148 int l, id, oid;
Al Viro5ba25332007-10-14 19:35:50 +0100149 unsigned long bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151 id = *starting_id;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900152 restart:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 p = idp->top;
154 l = idp->layers;
155 pa[l--] = NULL;
156 while (1) {
157 /*
158 * We run around this while until we reach the leaf node...
159 */
160 n = (id >> (IDR_BITS*l)) & IDR_MASK;
161 bm = ~p->bitmap;
162 m = find_next_bit(&bm, IDR_SIZE, n);
163 if (m == IDR_SIZE) {
164 /* no space available go back to previous layer. */
165 l++;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900166 oid = id;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800167 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900168
169 /* if already at the top layer, we need to grow */
Tejun Heod2e72762010-02-22 12:44:19 -0800170 if (id >= 1 << (idp->layers * IDR_BITS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 *starting_id = id;
Nadia Derbey944ca052008-07-25 01:47:59 -0700172 return IDR_NEED_TO_GROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
Tejun Heod2e72762010-02-22 12:44:19 -0800174 p = pa[l];
175 BUG_ON(!p);
Tejun Heo7aae6dd2007-06-14 03:45:12 +0900176
177 /* If we need to go up one layer, continue the
178 * loop; otherwise, restart from the top.
179 */
180 sh = IDR_BITS * (l + 1);
181 if (oid >> sh == id >> sh)
182 continue;
183 else
184 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 }
186 if (m != n) {
187 sh = IDR_BITS*l;
188 id = ((id >> sh) ^ n ^ m) << sh;
189 }
190 if ((id >= MAX_ID_BIT) || (id < 0))
Nadia Derbey944ca052008-07-25 01:47:59 -0700191 return IDR_NOMORE_SPACE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 if (l == 0)
193 break;
194 /*
195 * Create the layer below if it is missing.
196 */
197 if (!p->ary[m]) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700198 new = get_from_free_list(idp);
199 if (!new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 return -1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800201 new->layer = l-1;
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700202 rcu_assign_pointer(p->ary[m], new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 p->count++;
204 }
205 pa[l--] = p;
206 p = p->ary[m];
207 }
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900208
209 pa[l] = p;
210 return id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900213static int idr_get_empty_slot(struct idr *idp, int starting_id,
214 struct idr_layer **pa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
216 struct idr_layer *p, *new;
217 int layers, v, id;
Roland Dreierc259cc22006-07-14 00:24:23 -0700218 unsigned long flags;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 id = starting_id;
221build_up:
222 p = idp->top;
223 layers = idp->layers;
224 if (unlikely(!p)) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700225 if (!(p = get_from_free_list(idp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 return -1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800227 p->layer = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 layers = 1;
229 }
230 /*
231 * Add a new layer to the top of the tree if the requested
232 * id is larger than the currently allocated space.
233 */
Tejun Heof4fe9792013-02-27 17:05:02 -0800234 while (id > idr_max(layers)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 layers++;
Manfred Spraul711a49a2008-12-10 18:17:06 +0100236 if (!p->count) {
237 /* special case: if the tree is currently empty,
238 * then we grow the tree by moving the top node
239 * upwards.
240 */
241 p->layer++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 continue;
Manfred Spraul711a49a2008-12-10 18:17:06 +0100243 }
Nadia Derbey4ae53782008-07-25 01:47:58 -0700244 if (!(new = get_from_free_list(idp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 /*
246 * The allocation failed. If we built part of
247 * the structure tear it down.
248 */
Roland Dreierc259cc22006-07-14 00:24:23 -0700249 spin_lock_irqsave(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 for (new = p; p && p != idp->top; new = p) {
251 p = p->ary[0];
252 new->ary[0] = NULL;
253 new->bitmap = new->count = 0;
Nadia Derbey4ae53782008-07-25 01:47:58 -0700254 __move_to_free_list(idp, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
Roland Dreierc259cc22006-07-14 00:24:23 -0700256 spin_unlock_irqrestore(&idp->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 return -1;
258 }
259 new->ary[0] = p;
260 new->count = 1;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800261 new->layer = layers-1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 if (p->bitmap == IDR_FULL)
263 __set_bit(0, &new->bitmap);
264 p = new;
265 }
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700266 rcu_assign_pointer(idp->top, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 idp->layers = layers;
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900268 v = sub_alloc(idp, &id, pa);
Nadia Derbey944ca052008-07-25 01:47:59 -0700269 if (v == IDR_NEED_TO_GROW)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 goto build_up;
271 return(v);
272}
273
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900274static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
275{
Tejun Heof4fe9792013-02-27 17:05:02 -0800276 struct idr_layer *pa[MAX_LEVEL + 1];
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900277 int id;
278
279 id = idr_get_empty_slot(idp, starting_id, pa);
280 if (id >= 0) {
281 /*
282 * Successfully found an empty slot. Install the user
283 * pointer and mark the slot full.
284 */
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700285 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
286 (struct idr_layer *)ptr);
Tejun Heoe33ac8b2007-06-14 03:45:12 +0900287 pa[0]->count++;
288 idr_mark_full(pa, id);
289 }
290
291 return id;
292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294/**
John McCutchan7c657f22005-08-26 14:02:04 -0400295 * idr_get_new_above - allocate new idr entry above or equal to a start id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 * @idp: idr handle
Thadeu Lima de Souza Cascardo94e2bd62009-10-16 15:20:49 +0200297 * @ptr: pointer you want associated with the id
Naohiro Aotaea24ea82010-08-31 00:37:03 +0900298 * @starting_id: id to start search at
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 * @id: pointer to the allocated handle
300 *
301 * This is the allocate id function. It should be called with any
302 * required locks.
303 *
Naohiro Aota066a9be2010-10-26 14:23:03 -0700304 * If allocation from IDR's private freelist fails, idr_get_new_above() will
Randy Dunlap56083ab2010-10-26 14:19:08 -0700305 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
Naohiro Aota066a9be2010-10-26 14:23:03 -0700306 * IDR's preallocation and then retry the idr_get_new_above() call.
307 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700308 * If the idr is full idr_get_new_above() will return %-ENOSPC.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700310 * @id returns a value in the range @starting_id ... %0x7fffffff
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 */
312int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
313{
314 int rv;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 rv = idr_get_new_above_int(idp, ptr, starting_id);
317 /*
318 * This is a cheap hack until the IDR code can be fixed to
319 * return proper error values.
320 */
Nadia Derbey944ca052008-07-25 01:47:59 -0700321 if (rv < 0)
322 return _idr_rc_to_errno(rv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 *id = rv;
324 return 0;
325}
326EXPORT_SYMBOL(idr_get_new_above);
327
328/**
329 * idr_get_new - allocate new idr entry
330 * @idp: idr handle
Thadeu Lima de Souza Cascardo94e2bd62009-10-16 15:20:49 +0200331 * @ptr: pointer you want associated with the id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 * @id: pointer to the allocated handle
333 *
Naohiro Aota066a9be2010-10-26 14:23:03 -0700334 * If allocation from IDR's private freelist fails, idr_get_new_above() will
Randy Dunlap56083ab2010-10-26 14:19:08 -0700335 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
Naohiro Aota066a9be2010-10-26 14:23:03 -0700336 * IDR's preallocation and then retry the idr_get_new_above() call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700338 * If the idr is full idr_get_new_above() will return %-ENOSPC.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700340 * @id returns a value in the range %0 ... %0x7fffffff
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 */
342int idr_get_new(struct idr *idp, void *ptr, int *id)
343{
344 int rv;
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 rv = idr_get_new_above_int(idp, ptr, 0);
347 /*
348 * This is a cheap hack until the IDR code can be fixed to
349 * return proper error values.
350 */
Nadia Derbey944ca052008-07-25 01:47:59 -0700351 if (rv < 0)
352 return _idr_rc_to_errno(rv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 *id = rv;
354 return 0;
355}
356EXPORT_SYMBOL(idr_get_new);
357
358static void idr_remove_warning(int id)
359{
Nadia Derbeyf098ad62008-07-25 01:47:59 -0700360 printk(KERN_WARNING
361 "idr_remove called for id=%d which is not allocated.\n", id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 dump_stack();
363}
364
365static void sub_remove(struct idr *idp, int shift, int id)
366{
367 struct idr_layer *p = idp->top;
Tejun Heof4fe9792013-02-27 17:05:02 -0800368 struct idr_layer **pa[MAX_LEVEL + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 struct idr_layer ***paa = &pa[0];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700370 struct idr_layer *to_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 int n;
372
373 *paa = NULL;
374 *++paa = &idp->top;
375
376 while ((shift > 0) && p) {
377 n = (id >> shift) & IDR_MASK;
378 __clear_bit(n, &p->bitmap);
379 *++paa = &p->ary[n];
380 p = p->ary[n];
381 shift -= IDR_BITS;
382 }
383 n = id & IDR_MASK;
384 if (likely(p != NULL && test_bit(n, &p->bitmap))){
385 __clear_bit(n, &p->bitmap);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700386 rcu_assign_pointer(p->ary[n], NULL);
387 to_free = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 while(*paa && ! --((**paa)->count)){
Nadia Derbeycf481c22008-07-25 01:48:02 -0700389 if (to_free)
390 free_layer(to_free);
391 to_free = **paa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 **paa-- = NULL;
393 }
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800394 if (!*paa)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 idp->layers = 0;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700396 if (to_free)
397 free_layer(to_free);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800398 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 idr_remove_warning(id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
402/**
Randy Dunlap56083ab2010-10-26 14:19:08 -0700403 * idr_remove - remove the given id and free its slot
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800404 * @idp: idr handle
405 * @id: unique key
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 */
407void idr_remove(struct idr *idp, int id)
408{
409 struct idr_layer *p;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700410 struct idr_layer *to_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 /* Mask off upper bits we don't use for the search. */
413 id &= MAX_ID_MASK;
414
415 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
Jesper Juhle15ae2d2005-10-30 15:02:14 -0800416 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
Nadia Derbeycf481c22008-07-25 01:48:02 -0700417 idp->top->ary[0]) {
418 /*
419 * Single child at leftmost slot: we can shrink the tree.
420 * This level is not needed anymore since when layers are
421 * inserted, they are inserted at the top of the existing
422 * tree.
423 */
424 to_free = idp->top;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 p = idp->top->ary[0];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700426 rcu_assign_pointer(idp->top, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 --idp->layers;
Nadia Derbeycf481c22008-07-25 01:48:02 -0700428 to_free->bitmap = to_free->count = 0;
429 free_layer(to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 }
431 while (idp->id_free_cnt >= IDR_FREE_MAX) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700432 p = get_from_free_list(idp);
Nadia Derbeycf481c22008-07-25 01:48:02 -0700433 /*
434 * Note: we don't call the rcu callback here, since the only
435 * layers that fall into the freelist are those that have been
436 * preallocated.
437 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 kmem_cache_free(idr_layer_cache, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 }
Nadia Derbeyaf8e2a42008-05-01 04:34:57 -0700440 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442EXPORT_SYMBOL(idr_remove);
443
444/**
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700445 * idr_remove_all - remove all ids from the given idr tree
446 * @idp: idr handle
447 *
448 * idr_destroy() only frees up unused, cached idp_layers, but this
449 * function will remove all id mappings and leave all idp_layers
450 * unused.
451 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700452 * A typical clean-up sequence for objects stored in an idr tree will
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700453 * use idr_for_each() to free all objects, if necessay, then
454 * idr_remove_all() to remove all ids, and idr_destroy() to free
455 * up the cached idr_layers.
456 */
457void idr_remove_all(struct idr *idp)
458{
Oleg Nesterov6ace06d2007-07-31 00:39:19 -0700459 int n, id, max;
Imre Deak2dcb22b2010-05-26 14:43:38 -0700460 int bt_mask;
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700461 struct idr_layer *p;
Tejun Heof4fe9792013-02-27 17:05:02 -0800462 struct idr_layer *pa[MAX_LEVEL + 1];
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700463 struct idr_layer **paa = &pa[0];
464
465 n = idp->layers * IDR_BITS;
466 p = idp->top;
Paul E. McKenney1b233362009-03-10 12:55:52 -0700467 rcu_assign_pointer(idp->top, NULL);
Tejun Heof4fe9792013-02-27 17:05:02 -0800468 max = idr_max(idp->layers);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700469
470 id = 0;
Tejun Heof4fe9792013-02-27 17:05:02 -0800471 while (id >= 0 && id <= max) {
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700472 while (n > IDR_BITS && p) {
473 n -= IDR_BITS;
474 *paa++ = p;
475 p = p->ary[(id >> n) & IDR_MASK];
476 }
477
Imre Deak2dcb22b2010-05-26 14:43:38 -0700478 bt_mask = id;
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700479 id += 1 << n;
Imre Deak2dcb22b2010-05-26 14:43:38 -0700480 /* Get the highest bit that the above add changed from 0->1. */
481 while (n < fls(id ^ bt_mask)) {
Nadia Derbeycf481c22008-07-25 01:48:02 -0700482 if (p)
483 free_layer(p);
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700484 n += IDR_BITS;
485 p = *--paa;
486 }
487 }
Kristian Hoegsberg23936cc2007-07-15 23:37:24 -0700488 idp->layers = 0;
489}
490EXPORT_SYMBOL(idr_remove_all);
491
492/**
Andrew Morton8d3b3592005-10-23 12:57:18 -0700493 * idr_destroy - release all cached layers within an idr tree
Naohiro Aotaea24ea82010-08-31 00:37:03 +0900494 * @idp: idr handle
Andrew Morton8d3b3592005-10-23 12:57:18 -0700495 */
496void idr_destroy(struct idr *idp)
497{
498 while (idp->id_free_cnt) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700499 struct idr_layer *p = get_from_free_list(idp);
Andrew Morton8d3b3592005-10-23 12:57:18 -0700500 kmem_cache_free(idr_layer_cache, p);
501 }
502}
503EXPORT_SYMBOL(idr_destroy);
504
505/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 * idr_find - return pointer for given id
507 * @idp: idr handle
508 * @id: lookup key
509 *
510 * Return the pointer given the id it has been registered with. A %NULL
511 * return indicates that @id is not valid or you passed %NULL in
512 * idr_get_new().
513 *
Nadia Derbeyf9c46d62008-07-25 01:48:01 -0700514 * This function can be called under rcu_read_lock(), given that the leaf
515 * pointers lifetimes are correctly managed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 */
517void *idr_find(struct idr *idp, int id)
518{
519 int n;
520 struct idr_layer *p;
521
Paul E. McKenney96be7532010-02-22 17:04:55 -0800522 p = rcu_dereference_raw(idp->top);
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800523 if (!p)
524 return NULL;
525 n = (p->layer+1) * IDR_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
527 /* Mask off upper bits we don't use for the search. */
528 id &= MAX_ID_MASK;
529
Tejun Heof4fe9792013-02-27 17:05:02 -0800530 if (id > idr_max(p->layer + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 return NULL;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800532 BUG_ON(n == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 while (n > 0 && p) {
535 n -= IDR_BITS;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800536 BUG_ON(n != p->layer*IDR_BITS);
Paul E. McKenney96be7532010-02-22 17:04:55 -0800537 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
539 return((void *)p);
540}
541EXPORT_SYMBOL(idr_find);
542
Jeff Mahoney5806f072006-06-26 00:27:19 -0700543/**
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700544 * idr_for_each - iterate through all stored pointers
545 * @idp: idr handle
546 * @fn: function to be called for each pointer
547 * @data: data passed back to callback function
548 *
549 * Iterate over the pointers registered with the given idr. The
550 * callback function will be called for each pointer currently
551 * registered, passing the id, the pointer and the data pointer passed
552 * to this function. It is not safe to modify the idr tree while in
553 * the callback, so functions such as idr_get_new and idr_remove are
554 * not allowed.
555 *
556 * We check the return of @fn each time. If it returns anything other
Randy Dunlap56083ab2010-10-26 14:19:08 -0700557 * than %0, we break out and return that value.
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700558 *
559 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
560 */
561int idr_for_each(struct idr *idp,
562 int (*fn)(int id, void *p, void *data), void *data)
563{
564 int n, id, max, error = 0;
565 struct idr_layer *p;
Tejun Heof4fe9792013-02-27 17:05:02 -0800566 struct idr_layer *pa[MAX_LEVEL + 1];
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700567 struct idr_layer **paa = &pa[0];
568
569 n = idp->layers * IDR_BITS;
Paul E. McKenney96be7532010-02-22 17:04:55 -0800570 p = rcu_dereference_raw(idp->top);
Tejun Heof4fe9792013-02-27 17:05:02 -0800571 max = idr_max(idp->layers);
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700572
573 id = 0;
Tejun Heof4fe9792013-02-27 17:05:02 -0800574 while (id >= 0 && id <= max) {
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700575 while (n > 0 && p) {
576 n -= IDR_BITS;
577 *paa++ = p;
Paul E. McKenney96be7532010-02-22 17:04:55 -0800578 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700579 }
580
581 if (p) {
582 error = fn(id, (void *)p, data);
583 if (error)
584 break;
585 }
586
587 id += 1 << n;
588 while (n < fls(id)) {
589 n += IDR_BITS;
590 p = *--paa;
591 }
592 }
593
594 return error;
595}
596EXPORT_SYMBOL(idr_for_each);
597
598/**
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700599 * idr_get_next - lookup next object of id to given id.
600 * @idp: idr handle
Naohiro Aotaea24ea82010-08-31 00:37:03 +0900601 * @nextidp: pointer to lookup key
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700602 *
603 * Returns pointer to registered object with id, which is next number to
Naohiro Aota1458ce12010-08-27 17:43:46 +0900604 * given id. After being looked up, *@nextidp will be updated for the next
605 * iteration.
Hugh Dickins9f7de822012-03-21 16:34:20 -0700606 *
607 * This function can be called under rcu_read_lock(), given that the leaf
608 * pointers lifetimes are correctly managed.
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700609 */
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700610void *idr_get_next(struct idr *idp, int *nextidp)
611{
Tejun Heof4fe9792013-02-27 17:05:02 -0800612 struct idr_layer *p, *pa[MAX_LEVEL + 1];
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700613 struct idr_layer **paa = &pa[0];
614 int id = *nextidp;
615 int n, max;
616
617 /* find first ent */
Paul E. McKenney94bfa3b2010-06-07 17:09:45 -0700618 p = rcu_dereference_raw(idp->top);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700619 if (!p)
620 return NULL;
Hugh Dickins9f7de822012-03-21 16:34:20 -0700621 n = (p->layer + 1) * IDR_BITS;
Tejun Heof4fe9792013-02-27 17:05:02 -0800622 max = idr_max(p->layer + 1);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700623
Tejun Heof4fe9792013-02-27 17:05:02 -0800624 while (id >= 0 && id <= max) {
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700625 while (n > 0 && p) {
626 n -= IDR_BITS;
627 *paa++ = p;
Paul E. McKenney94bfa3b2010-06-07 17:09:45 -0700628 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700629 }
630
631 if (p) {
632 *nextidp = id;
633 return p;
634 }
635
Prakash Kamliyae1aa7f52013-12-07 17:17:27 +0530636 /*
637 * Proceed to the next layer at the current level. Unlike
638 * idr_for_each(), @id isn't guaranteed to be aligned to
639 * layer boundary at this point and adding 1 << n may
640 * incorrectly skip IDs. Make sure we jump to the
641 * beginning of the next layer using round_up().
642 */
643 id = round_up(id + 1, 1 << n);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700644 while (n < fls(id)) {
645 n += IDR_BITS;
646 p = *--paa;
647 }
648 }
649 return NULL;
650}
Ben Hutchings4d1ee802010-01-29 20:59:17 +0000651EXPORT_SYMBOL(idr_get_next);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700652
653
654/**
Jeff Mahoney5806f072006-06-26 00:27:19 -0700655 * idr_replace - replace pointer for given id
656 * @idp: idr handle
657 * @ptr: pointer you want associated with the id
658 * @id: lookup key
659 *
660 * Replace the pointer registered with an id and return the old value.
Randy Dunlap56083ab2010-10-26 14:19:08 -0700661 * A %-ENOENT return indicates that @id was not found.
662 * A %-EINVAL return indicates that @id was not within valid constraints.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700663 *
Nadia Derbeycf481c22008-07-25 01:48:02 -0700664 * The caller must serialize with writers.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700665 */
666void *idr_replace(struct idr *idp, void *ptr, int id)
667{
668 int n;
669 struct idr_layer *p, *old_p;
670
Jeff Mahoney5806f072006-06-26 00:27:19 -0700671 p = idp->top;
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800672 if (!p)
673 return ERR_PTR(-EINVAL);
674
675 n = (p->layer+1) * IDR_BITS;
Jeff Mahoney5806f072006-06-26 00:27:19 -0700676
677 id &= MAX_ID_MASK;
678
679 if (id >= (1 << n))
680 return ERR_PTR(-EINVAL);
681
682 n -= IDR_BITS;
683 while ((n > 0) && p) {
684 p = p->ary[(id >> n) & IDR_MASK];
685 n -= IDR_BITS;
686 }
687
688 n = id & IDR_MASK;
689 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
690 return ERR_PTR(-ENOENT);
691
692 old_p = p->ary[n];
Nadia Derbeycf481c22008-07-25 01:48:02 -0700693 rcu_assign_pointer(p->ary[n], ptr);
Jeff Mahoney5806f072006-06-26 00:27:19 -0700694
695 return old_p;
696}
697EXPORT_SYMBOL(idr_replace);
698
Akinobu Mita199f0ca2008-04-29 01:03:13 -0700699void __init idr_init_cache(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Akinobu Mita199f0ca2008-04-29 01:03:13 -0700701 idr_layer_cache = kmem_cache_create("idr_layer_cache",
Andrew Morton5b019e92009-01-15 13:51:21 -0800702 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703}
704
705/**
706 * idr_init - initialize idr handle
707 * @idp: idr handle
708 *
709 * This function is use to set up the handle (@idp) that you will pass
710 * to the rest of the functions.
711 */
712void idr_init(struct idr *idp)
713{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 memset(idp, 0, sizeof(struct idr));
715 spin_lock_init(&idp->lock);
716}
717EXPORT_SYMBOL(idr_init);
Tejun Heo72dba582007-06-14 03:45:13 +0900718
719
Randy Dunlap56083ab2010-10-26 14:19:08 -0700720/**
721 * DOC: IDA description
Tejun Heo72dba582007-06-14 03:45:13 +0900722 * IDA - IDR based ID allocator
723 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700724 * This is id allocator without id -> pointer translation. Memory
Tejun Heo72dba582007-06-14 03:45:13 +0900725 * usage is much lower than full blown idr because each id only
726 * occupies a bit. ida uses a custom leaf node which contains
727 * IDA_BITMAP_BITS slots.
728 *
729 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
730 */
731
732static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
733{
734 unsigned long flags;
735
736 if (!ida->free_bitmap) {
737 spin_lock_irqsave(&ida->idr.lock, flags);
738 if (!ida->free_bitmap) {
739 ida->free_bitmap = bitmap;
740 bitmap = NULL;
741 }
742 spin_unlock_irqrestore(&ida->idr.lock, flags);
743 }
744
745 kfree(bitmap);
746}
747
748/**
749 * ida_pre_get - reserve resources for ida allocation
750 * @ida: ida handle
751 * @gfp_mask: memory allocation flag
752 *
753 * This function should be called prior to locking and calling the
754 * following function. It preallocates enough memory to satisfy the
755 * worst possible allocation.
756 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700757 * If the system is REALLY out of memory this function returns %0,
758 * otherwise %1.
Tejun Heo72dba582007-06-14 03:45:13 +0900759 */
760int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
761{
762 /* allocate idr_layers */
763 if (!idr_pre_get(&ida->idr, gfp_mask))
764 return 0;
765
766 /* allocate free_bitmap */
767 if (!ida->free_bitmap) {
768 struct ida_bitmap *bitmap;
769
770 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
771 if (!bitmap)
772 return 0;
773
774 free_bitmap(ida, bitmap);
775 }
776
777 return 1;
778}
779EXPORT_SYMBOL(ida_pre_get);
780
781/**
782 * ida_get_new_above - allocate new ID above or equal to a start id
783 * @ida: ida handle
Naohiro Aotaea24ea82010-08-31 00:37:03 +0900784 * @starting_id: id to start search at
Tejun Heo72dba582007-06-14 03:45:13 +0900785 * @p_id: pointer to the allocated handle
786 *
Wang Sheng-Huie3816c52011-10-31 17:12:36 -0700787 * Allocate new ID above or equal to @starting_id. It should be called
788 * with any required locks.
Tejun Heo72dba582007-06-14 03:45:13 +0900789 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700790 * If memory is required, it will return %-EAGAIN, you should unlock
Tejun Heo72dba582007-06-14 03:45:13 +0900791 * and go back to the ida_pre_get() call. If the ida is full, it will
Randy Dunlap56083ab2010-10-26 14:19:08 -0700792 * return %-ENOSPC.
Tejun Heo72dba582007-06-14 03:45:13 +0900793 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700794 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
Tejun Heo72dba582007-06-14 03:45:13 +0900795 */
796int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
797{
Tejun Heof4fe9792013-02-27 17:05:02 -0800798 struct idr_layer *pa[MAX_LEVEL + 1];
Tejun Heo72dba582007-06-14 03:45:13 +0900799 struct ida_bitmap *bitmap;
800 unsigned long flags;
801 int idr_id = starting_id / IDA_BITMAP_BITS;
802 int offset = starting_id % IDA_BITMAP_BITS;
803 int t, id;
804
805 restart:
806 /* get vacant slot */
807 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
Nadia Derbey944ca052008-07-25 01:47:59 -0700808 if (t < 0)
809 return _idr_rc_to_errno(t);
Tejun Heo72dba582007-06-14 03:45:13 +0900810
811 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
812 return -ENOSPC;
813
814 if (t != idr_id)
815 offset = 0;
816 idr_id = t;
817
818 /* if bitmap isn't there, create a new one */
819 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
820 if (!bitmap) {
821 spin_lock_irqsave(&ida->idr.lock, flags);
822 bitmap = ida->free_bitmap;
823 ida->free_bitmap = NULL;
824 spin_unlock_irqrestore(&ida->idr.lock, flags);
825
826 if (!bitmap)
827 return -EAGAIN;
828
829 memset(bitmap, 0, sizeof(struct ida_bitmap));
Nadia Derbey3219b3b2008-07-25 01:48:00 -0700830 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
831 (void *)bitmap);
Tejun Heo72dba582007-06-14 03:45:13 +0900832 pa[0]->count++;
833 }
834
835 /* lookup for empty slot */
836 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
837 if (t == IDA_BITMAP_BITS) {
838 /* no empty slot after offset, continue to the next chunk */
839 idr_id++;
840 offset = 0;
841 goto restart;
842 }
843
844 id = idr_id * IDA_BITMAP_BITS + t;
845 if (id >= MAX_ID_BIT)
846 return -ENOSPC;
847
848 __set_bit(t, bitmap->bitmap);
849 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
850 idr_mark_full(pa, idr_id);
851
852 *p_id = id;
853
854 /* Each leaf node can handle nearly a thousand slots and the
855 * whole idea of ida is to have small memory foot print.
856 * Throw away extra resources one by one after each successful
857 * allocation.
858 */
859 if (ida->idr.id_free_cnt || ida->free_bitmap) {
Nadia Derbey4ae53782008-07-25 01:47:58 -0700860 struct idr_layer *p = get_from_free_list(&ida->idr);
Tejun Heo72dba582007-06-14 03:45:13 +0900861 if (p)
862 kmem_cache_free(idr_layer_cache, p);
863 }
864
865 return 0;
866}
867EXPORT_SYMBOL(ida_get_new_above);
868
869/**
870 * ida_get_new - allocate new ID
871 * @ida: idr handle
872 * @p_id: pointer to the allocated handle
873 *
874 * Allocate new ID. It should be called with any required locks.
875 *
Randy Dunlap56083ab2010-10-26 14:19:08 -0700876 * If memory is required, it will return %-EAGAIN, you should unlock
Tejun Heo72dba582007-06-14 03:45:13 +0900877 * and go back to the idr_pre_get() call. If the idr is full, it will
Randy Dunlap56083ab2010-10-26 14:19:08 -0700878 * return %-ENOSPC.
Tejun Heo72dba582007-06-14 03:45:13 +0900879 *
Paul Bollef5c3dd72011-08-03 16:18:39 +0200880 * @p_id returns a value in the range %0 ... %0x7fffffff.
Tejun Heo72dba582007-06-14 03:45:13 +0900881 */
882int ida_get_new(struct ida *ida, int *p_id)
883{
884 return ida_get_new_above(ida, 0, p_id);
885}
886EXPORT_SYMBOL(ida_get_new);
887
888/**
889 * ida_remove - remove the given ID
890 * @ida: ida handle
891 * @id: ID to free
892 */
893void ida_remove(struct ida *ida, int id)
894{
895 struct idr_layer *p = ida->idr.top;
896 int shift = (ida->idr.layers - 1) * IDR_BITS;
897 int idr_id = id / IDA_BITMAP_BITS;
898 int offset = id % IDA_BITMAP_BITS;
899 int n;
900 struct ida_bitmap *bitmap;
901
902 /* clear full bits while looking up the leaf idr_layer */
903 while ((shift > 0) && p) {
904 n = (idr_id >> shift) & IDR_MASK;
905 __clear_bit(n, &p->bitmap);
906 p = p->ary[n];
907 shift -= IDR_BITS;
908 }
909
910 if (p == NULL)
911 goto err;
912
913 n = idr_id & IDR_MASK;
914 __clear_bit(n, &p->bitmap);
915
916 bitmap = (void *)p->ary[n];
917 if (!test_bit(offset, bitmap->bitmap))
918 goto err;
919
920 /* update bitmap and remove it if empty */
921 __clear_bit(offset, bitmap->bitmap);
922 if (--bitmap->nr_busy == 0) {
923 __set_bit(n, &p->bitmap); /* to please idr_remove() */
924 idr_remove(&ida->idr, idr_id);
925 free_bitmap(ida, bitmap);
926 }
927
928 return;
929
930 err:
931 printk(KERN_WARNING
932 "ida_remove called for id=%d which is not allocated.\n", id);
933}
934EXPORT_SYMBOL(ida_remove);
935
936/**
937 * ida_destroy - release all cached layers within an ida tree
Naohiro Aotaea24ea82010-08-31 00:37:03 +0900938 * @ida: ida handle
Tejun Heo72dba582007-06-14 03:45:13 +0900939 */
940void ida_destroy(struct ida *ida)
941{
942 idr_destroy(&ida->idr);
943 kfree(ida->free_bitmap);
944}
945EXPORT_SYMBOL(ida_destroy);
946
947/**
Rusty Russell88eca022011-08-03 16:21:06 -0700948 * ida_simple_get - get a new id.
949 * @ida: the (initialized) ida.
950 * @start: the minimum id (inclusive, < 0x8000000)
951 * @end: the maximum id (exclusive, < 0x8000000 or 0)
952 * @gfp_mask: memory allocation flags
953 *
954 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
955 * On memory allocation failure, returns -ENOMEM.
956 *
957 * Use ida_simple_remove() to get rid of an id.
958 */
959int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
960 gfp_t gfp_mask)
961{
962 int ret, id;
963 unsigned int max;
Tejun Heo46cbc1d2011-11-02 13:38:46 -0700964 unsigned long flags;
Rusty Russell88eca022011-08-03 16:21:06 -0700965
966 BUG_ON((int)start < 0);
967 BUG_ON((int)end < 0);
968
969 if (end == 0)
970 max = 0x80000000;
971 else {
972 BUG_ON(end < start);
973 max = end - 1;
974 }
975
976again:
977 if (!ida_pre_get(ida, gfp_mask))
978 return -ENOMEM;
979
Tejun Heo46cbc1d2011-11-02 13:38:46 -0700980 spin_lock_irqsave(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -0700981 ret = ida_get_new_above(ida, start, &id);
982 if (!ret) {
983 if (id > max) {
984 ida_remove(ida, id);
985 ret = -ENOSPC;
986 } else {
987 ret = id;
988 }
989 }
Tejun Heo46cbc1d2011-11-02 13:38:46 -0700990 spin_unlock_irqrestore(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -0700991
992 if (unlikely(ret == -EAGAIN))
993 goto again;
994
995 return ret;
996}
997EXPORT_SYMBOL(ida_simple_get);
998
999/**
1000 * ida_simple_remove - remove an allocated id.
1001 * @ida: the (initialized) ida.
1002 * @id: the id returned by ida_simple_get.
1003 */
1004void ida_simple_remove(struct ida *ida, unsigned int id)
1005{
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001006 unsigned long flags;
1007
Rusty Russell88eca022011-08-03 16:21:06 -07001008 BUG_ON((int)id < 0);
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001009 spin_lock_irqsave(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -07001010 ida_remove(ida, id);
Tejun Heo46cbc1d2011-11-02 13:38:46 -07001011 spin_unlock_irqrestore(&simple_ida_lock, flags);
Rusty Russell88eca022011-08-03 16:21:06 -07001012}
1013EXPORT_SYMBOL(ida_simple_remove);
1014
1015/**
Tejun Heo72dba582007-06-14 03:45:13 +09001016 * ida_init - initialize ida handle
1017 * @ida: ida handle
1018 *
1019 * This function is use to set up the handle (@ida) that you will pass
1020 * to the rest of the functions.
1021 */
1022void ida_init(struct ida *ida)
1023{
1024 memset(ida, 0, sizeof(struct ida));
1025 idr_init(&ida->idr);
1026
1027}
1028EXPORT_SYMBOL(ida_init);