Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
| 3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
| 4 | * Distributed under the GNU GPL license version 2. |
| 5 | * |
| 6 | * Modified by George Anzinger to reuse immediately and to use |
| 7 | * find bit instructions. Also removed _irq on spinlocks. |
| 8 | * |
Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 9 | * Modified by Nadia Derbey to make it RCU safe. |
| 10 | * |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 11 | * Small id to pointer translation service. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 13 | * It uses a radix tree like structure as a sparse array indexed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * by the id to obtain the pointer. The bitmap makes allocating |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 15 | * a new id quick. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | * |
| 17 | * You call it to allocate an id (an int) an associate with that id a |
| 18 | * pointer or what ever, we treat it as a (void *). You can pass this |
| 19 | * id to a user for him to pass back at a later time. You then pass |
| 20 | * that id to this code and it returns your pointer. |
| 21 | |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 22 | * You can release ids at any time. When all ids are released, most of |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 24 | * don't need to go to the memory "store" during an id allocate, just |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * so you don't need to be too concerned about locking and conflicts |
| 26 | * with the slab allocator. |
| 27 | */ |
| 28 | |
| 29 | #ifndef TEST // to test in user space... |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/init.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 32 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #endif |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 34 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/string.h> |
| 36 | #include <linux/idr.h> |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 37 | #include <linux/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 39 | static struct kmem_cache *idr_layer_cache; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 40 | static DEFINE_SPINLOCK(simple_ida_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 42 | /* the maximum ID which can be allocated given idr->layers */ |
| 43 | static int idr_max(int layers) |
| 44 | { |
| 45 | int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT); |
| 46 | |
| 47 | return (1 << bits) - 1; |
| 48 | } |
| 49 | |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 50 | static struct idr_layer *get_from_free_list(struct idr *idp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | { |
| 52 | struct idr_layer *p; |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 53 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 55 | spin_lock_irqsave(&idp->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | if ((p = idp->id_free)) { |
| 57 | idp->id_free = p->ary[0]; |
| 58 | idp->id_free_cnt--; |
| 59 | p->ary[0] = NULL; |
| 60 | } |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 61 | spin_unlock_irqrestore(&idp->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | return(p); |
| 63 | } |
| 64 | |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 65 | static void idr_layer_rcu_free(struct rcu_head *head) |
| 66 | { |
| 67 | struct idr_layer *layer; |
| 68 | |
| 69 | layer = container_of(head, struct idr_layer, rcu_head); |
| 70 | kmem_cache_free(idr_layer_cache, layer); |
| 71 | } |
| 72 | |
| 73 | static inline void free_layer(struct idr_layer *p) |
| 74 | { |
| 75 | call_rcu(&p->rcu_head, idr_layer_rcu_free); |
| 76 | } |
| 77 | |
Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 78 | /* only called when idp->lock is held */ |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 79 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 80 | { |
| 81 | p->ary[0] = idp->id_free; |
| 82 | idp->id_free = p; |
| 83 | idp->id_free_cnt++; |
| 84 | } |
| 85 | |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 86 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | { |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 88 | unsigned long flags; |
| 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* |
| 91 | * Depends on the return element being zeroed. |
| 92 | */ |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 93 | spin_lock_irqsave(&idp->lock, flags); |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 94 | __move_to_free_list(idp, p); |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 95 | spin_unlock_irqrestore(&idp->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | } |
| 97 | |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 98 | static void idr_mark_full(struct idr_layer **pa, int id) |
| 99 | { |
| 100 | struct idr_layer *p = pa[0]; |
| 101 | int l = 0; |
| 102 | |
| 103 | __set_bit(id & IDR_MASK, &p->bitmap); |
| 104 | /* |
| 105 | * If this layer is full mark the bit in the layer above to |
| 106 | * show that this part of the radix tree is full. This may |
| 107 | * complete the layer above and require walking up the radix |
| 108 | * tree. |
| 109 | */ |
| 110 | while (p->bitmap == IDR_FULL) { |
| 111 | if (!(p = pa[++l])) |
| 112 | break; |
| 113 | id = id >> IDR_BITS; |
| 114 | __set_bit((id & IDR_MASK), &p->bitmap); |
| 115 | } |
| 116 | } |
| 117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | /** |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 119 | * idr_pre_get - reserve resources for idr allocation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | * @idp: idr handle |
| 121 | * @gfp_mask: memory allocation flags |
| 122 | * |
Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 123 | * This function should be called prior to calling the idr_get_new* functions. |
| 124 | * It preallocates enough memory to satisfy the worst possible allocation. The |
| 125 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
| 126 | * no spinning locks be held. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 128 | * If the system is REALLY out of memory this function returns %0, |
| 129 | * otherwise %1. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | */ |
Al Viro | fd4f2df | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 131 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { |
| 133 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
| 134 | struct idr_layer *new; |
Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 135 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 136 | if (new == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | return (0); |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 138 | move_to_free_list(idp, new); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | } |
| 140 | return 1; |
| 141 | } |
| 142 | EXPORT_SYMBOL(idr_pre_get); |
| 143 | |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 144 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | { |
| 146 | int n, m, sh; |
| 147 | struct idr_layer *p, *new; |
Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 148 | int l, id, oid; |
Al Viro | 5ba2533 | 2007-10-14 19:35:50 +0100 | [diff] [blame] | 149 | unsigned long bm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
| 151 | id = *starting_id; |
Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 152 | restart: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | p = idp->top; |
| 154 | l = idp->layers; |
| 155 | pa[l--] = NULL; |
| 156 | while (1) { |
| 157 | /* |
| 158 | * We run around this while until we reach the leaf node... |
| 159 | */ |
| 160 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
| 161 | bm = ~p->bitmap; |
| 162 | m = find_next_bit(&bm, IDR_SIZE, n); |
| 163 | if (m == IDR_SIZE) { |
| 164 | /* no space available go back to previous layer. */ |
| 165 | l++; |
Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 166 | oid = id; |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 167 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 168 | |
| 169 | /* if already at the top layer, we need to grow */ |
Tejun Heo | d2e7276 | 2010-02-22 12:44:19 -0800 | [diff] [blame] | 170 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | *starting_id = id; |
Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 172 | return IDR_NEED_TO_GROW; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | } |
Tejun Heo | d2e7276 | 2010-02-22 12:44:19 -0800 | [diff] [blame] | 174 | p = pa[l]; |
| 175 | BUG_ON(!p); |
Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 176 | |
| 177 | /* If we need to go up one layer, continue the |
| 178 | * loop; otherwise, restart from the top. |
| 179 | */ |
| 180 | sh = IDR_BITS * (l + 1); |
| 181 | if (oid >> sh == id >> sh) |
| 182 | continue; |
| 183 | else |
| 184 | goto restart; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
| 186 | if (m != n) { |
| 187 | sh = IDR_BITS*l; |
| 188 | id = ((id >> sh) ^ n ^ m) << sh; |
| 189 | } |
| 190 | if ((id >= MAX_ID_BIT) || (id < 0)) |
Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 191 | return IDR_NOMORE_SPACE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | if (l == 0) |
| 193 | break; |
| 194 | /* |
| 195 | * Create the layer below if it is missing. |
| 196 | */ |
| 197 | if (!p->ary[m]) { |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 198 | new = get_from_free_list(idp); |
| 199 | if (!new) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | return -1; |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 201 | new->layer = l-1; |
Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 202 | rcu_assign_pointer(p->ary[m], new); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | p->count++; |
| 204 | } |
| 205 | pa[l--] = p; |
| 206 | p = p->ary[m]; |
| 207 | } |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 208 | |
| 209 | pa[l] = p; |
| 210 | return id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } |
| 212 | |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 213 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
| 214 | struct idr_layer **pa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | { |
| 216 | struct idr_layer *p, *new; |
| 217 | int layers, v, id; |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 218 | unsigned long flags; |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 219 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | id = starting_id; |
| 221 | build_up: |
| 222 | p = idp->top; |
| 223 | layers = idp->layers; |
| 224 | if (unlikely(!p)) { |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 225 | if (!(p = get_from_free_list(idp))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | return -1; |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 227 | p->layer = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | layers = 1; |
| 229 | } |
| 230 | /* |
| 231 | * Add a new layer to the top of the tree if the requested |
| 232 | * id is larger than the currently allocated space. |
| 233 | */ |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 234 | while (id > idr_max(layers)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | layers++; |
Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 236 | if (!p->count) { |
| 237 | /* special case: if the tree is currently empty, |
| 238 | * then we grow the tree by moving the top node |
| 239 | * upwards. |
| 240 | */ |
| 241 | p->layer++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | continue; |
Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 243 | } |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 244 | if (!(new = get_from_free_list(idp))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | /* |
| 246 | * The allocation failed. If we built part of |
| 247 | * the structure tear it down. |
| 248 | */ |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 249 | spin_lock_irqsave(&idp->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | for (new = p; p && p != idp->top; new = p) { |
| 251 | p = p->ary[0]; |
| 252 | new->ary[0] = NULL; |
| 253 | new->bitmap = new->count = 0; |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 254 | __move_to_free_list(idp, new); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } |
Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 256 | spin_unlock_irqrestore(&idp->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | return -1; |
| 258 | } |
| 259 | new->ary[0] = p; |
| 260 | new->count = 1; |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 261 | new->layer = layers-1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | if (p->bitmap == IDR_FULL) |
| 263 | __set_bit(0, &new->bitmap); |
| 264 | p = new; |
| 265 | } |
Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 266 | rcu_assign_pointer(idp->top, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | idp->layers = layers; |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 268 | v = sub_alloc(idp, &id, pa); |
Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 269 | if (v == IDR_NEED_TO_GROW) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | goto build_up; |
| 271 | return(v); |
| 272 | } |
| 273 | |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 274 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
| 275 | { |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 276 | struct idr_layer *pa[MAX_LEVEL + 1]; |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 277 | int id; |
| 278 | |
| 279 | id = idr_get_empty_slot(idp, starting_id, pa); |
| 280 | if (id >= 0) { |
| 281 | /* |
| 282 | * Successfully found an empty slot. Install the user |
| 283 | * pointer and mark the slot full. |
| 284 | */ |
Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 285 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
| 286 | (struct idr_layer *)ptr); |
Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 287 | pa[0]->count++; |
| 288 | idr_mark_full(pa, id); |
| 289 | } |
| 290 | |
| 291 | return id; |
| 292 | } |
| 293 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | /** |
John McCutchan | 7c657f2 | 2005-08-26 14:02:04 -0400 | [diff] [blame] | 295 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | * @idp: idr handle |
Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 297 | * @ptr: pointer you want associated with the id |
Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 298 | * @starting_id: id to start search at |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | * @id: pointer to the allocated handle |
| 300 | * |
| 301 | * This is the allocate id function. It should be called with any |
| 302 | * required locks. |
| 303 | * |
Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 304 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 305 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 306 | * IDR's preallocation and then retry the idr_get_new_above() call. |
| 307 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 308 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 310 | * @id returns a value in the range @starting_id ... %0x7fffffff |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | */ |
| 312 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
| 313 | { |
| 314 | int rv; |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 315 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
| 317 | /* |
| 318 | * This is a cheap hack until the IDR code can be fixed to |
| 319 | * return proper error values. |
| 320 | */ |
Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 321 | if (rv < 0) |
| 322 | return _idr_rc_to_errno(rv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | *id = rv; |
| 324 | return 0; |
| 325 | } |
| 326 | EXPORT_SYMBOL(idr_get_new_above); |
| 327 | |
| 328 | /** |
| 329 | * idr_get_new - allocate new idr entry |
| 330 | * @idp: idr handle |
Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 331 | * @ptr: pointer you want associated with the id |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | * @id: pointer to the allocated handle |
| 333 | * |
Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 334 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 335 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
Naohiro Aota | 066a9be | 2010-10-26 14:23:03 -0700 | [diff] [blame] | 336 | * IDR's preallocation and then retry the idr_get_new_above() call. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 338 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 340 | * @id returns a value in the range %0 ... %0x7fffffff |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | */ |
| 342 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
| 343 | { |
| 344 | int rv; |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 345 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | rv = idr_get_new_above_int(idp, ptr, 0); |
| 347 | /* |
| 348 | * This is a cheap hack until the IDR code can be fixed to |
| 349 | * return proper error values. |
| 350 | */ |
Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 351 | if (rv < 0) |
| 352 | return _idr_rc_to_errno(rv); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | *id = rv; |
| 354 | return 0; |
| 355 | } |
| 356 | EXPORT_SYMBOL(idr_get_new); |
| 357 | |
| 358 | static void idr_remove_warning(int id) |
| 359 | { |
Nadia Derbey | f098ad6 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 360 | printk(KERN_WARNING |
| 361 | "idr_remove called for id=%d which is not allocated.\n", id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | dump_stack(); |
| 363 | } |
| 364 | |
| 365 | static void sub_remove(struct idr *idp, int shift, int id) |
| 366 | { |
| 367 | struct idr_layer *p = idp->top; |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 368 | struct idr_layer **pa[MAX_LEVEL + 1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | struct idr_layer ***paa = &pa[0]; |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 370 | struct idr_layer *to_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | int n; |
| 372 | |
| 373 | *paa = NULL; |
| 374 | *++paa = &idp->top; |
| 375 | |
| 376 | while ((shift > 0) && p) { |
| 377 | n = (id >> shift) & IDR_MASK; |
| 378 | __clear_bit(n, &p->bitmap); |
| 379 | *++paa = &p->ary[n]; |
| 380 | p = p->ary[n]; |
| 381 | shift -= IDR_BITS; |
| 382 | } |
| 383 | n = id & IDR_MASK; |
| 384 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
| 385 | __clear_bit(n, &p->bitmap); |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 386 | rcu_assign_pointer(p->ary[n], NULL); |
| 387 | to_free = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | while(*paa && ! --((**paa)->count)){ |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 389 | if (to_free) |
| 390 | free_layer(to_free); |
| 391 | to_free = **paa; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | **paa-- = NULL; |
| 393 | } |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 394 | if (!*paa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | idp->layers = 0; |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 396 | if (to_free) |
| 397 | free_layer(to_free); |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 398 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | idr_remove_warning(id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | /** |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 403 | * idr_remove - remove the given id and free its slot |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 404 | * @idp: idr handle |
| 405 | * @id: unique key |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | */ |
| 407 | void idr_remove(struct idr *idp, int id) |
| 408 | { |
| 409 | struct idr_layer *p; |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 410 | struct idr_layer *to_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | |
| 412 | /* Mask off upper bits we don't use for the search. */ |
| 413 | id &= MAX_ID_MASK; |
| 414 | |
| 415 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 416 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 417 | idp->top->ary[0]) { |
| 418 | /* |
| 419 | * Single child at leftmost slot: we can shrink the tree. |
| 420 | * This level is not needed anymore since when layers are |
| 421 | * inserted, they are inserted at the top of the existing |
| 422 | * tree. |
| 423 | */ |
| 424 | to_free = idp->top; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | p = idp->top->ary[0]; |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 426 | rcu_assign_pointer(idp->top, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | --idp->layers; |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 428 | to_free->bitmap = to_free->count = 0; |
| 429 | free_layer(to_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | } |
| 431 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 432 | p = get_from_free_list(idp); |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 433 | /* |
| 434 | * Note: we don't call the rcu callback here, since the only |
| 435 | * layers that fall into the freelist are those that have been |
| 436 | * preallocated. |
| 437 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | kmem_cache_free(idr_layer_cache, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } |
Nadia Derbey | af8e2a4 | 2008-05-01 04:34:57 -0700 | [diff] [blame] | 440 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | } |
| 442 | EXPORT_SYMBOL(idr_remove); |
| 443 | |
| 444 | /** |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 445 | * idr_remove_all - remove all ids from the given idr tree |
| 446 | * @idp: idr handle |
| 447 | * |
| 448 | * idr_destroy() only frees up unused, cached idp_layers, but this |
| 449 | * function will remove all id mappings and leave all idp_layers |
| 450 | * unused. |
| 451 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 452 | * A typical clean-up sequence for objects stored in an idr tree will |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 453 | * use idr_for_each() to free all objects, if necessay, then |
| 454 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
| 455 | * up the cached idr_layers. |
| 456 | */ |
| 457 | void idr_remove_all(struct idr *idp) |
| 458 | { |
Oleg Nesterov | 6ace06d | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 459 | int n, id, max; |
Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 460 | int bt_mask; |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 461 | struct idr_layer *p; |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 462 | struct idr_layer *pa[MAX_LEVEL + 1]; |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 463 | struct idr_layer **paa = &pa[0]; |
| 464 | |
| 465 | n = idp->layers * IDR_BITS; |
| 466 | p = idp->top; |
Paul E. McKenney | 1b23336 | 2009-03-10 12:55:52 -0700 | [diff] [blame] | 467 | rcu_assign_pointer(idp->top, NULL); |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 468 | max = idr_max(idp->layers); |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 469 | |
| 470 | id = 0; |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 471 | while (id >= 0 && id <= max) { |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 472 | while (n > IDR_BITS && p) { |
| 473 | n -= IDR_BITS; |
| 474 | *paa++ = p; |
| 475 | p = p->ary[(id >> n) & IDR_MASK]; |
| 476 | } |
| 477 | |
Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 478 | bt_mask = id; |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 479 | id += 1 << n; |
Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 480 | /* Get the highest bit that the above add changed from 0->1. */ |
| 481 | while (n < fls(id ^ bt_mask)) { |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 482 | if (p) |
| 483 | free_layer(p); |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 484 | n += IDR_BITS; |
| 485 | p = *--paa; |
| 486 | } |
| 487 | } |
Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 488 | idp->layers = 0; |
| 489 | } |
| 490 | EXPORT_SYMBOL(idr_remove_all); |
| 491 | |
| 492 | /** |
Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 493 | * idr_destroy - release all cached layers within an idr tree |
Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 494 | * @idp: idr handle |
Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 495 | */ |
| 496 | void idr_destroy(struct idr *idp) |
| 497 | { |
| 498 | while (idp->id_free_cnt) { |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 499 | struct idr_layer *p = get_from_free_list(idp); |
Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 500 | kmem_cache_free(idr_layer_cache, p); |
| 501 | } |
| 502 | } |
| 503 | EXPORT_SYMBOL(idr_destroy); |
| 504 | |
| 505 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | * idr_find - return pointer for given id |
| 507 | * @idp: idr handle |
| 508 | * @id: lookup key |
| 509 | * |
| 510 | * Return the pointer given the id it has been registered with. A %NULL |
| 511 | * return indicates that @id is not valid or you passed %NULL in |
| 512 | * idr_get_new(). |
| 513 | * |
Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 514 | * This function can be called under rcu_read_lock(), given that the leaf |
| 515 | * pointers lifetimes are correctly managed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | */ |
| 517 | void *idr_find(struct idr *idp, int id) |
| 518 | { |
| 519 | int n; |
| 520 | struct idr_layer *p; |
| 521 | |
Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 522 | p = rcu_dereference_raw(idp->top); |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 523 | if (!p) |
| 524 | return NULL; |
| 525 | n = (p->layer+1) * IDR_BITS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | |
| 527 | /* Mask off upper bits we don't use for the search. */ |
| 528 | id &= MAX_ID_MASK; |
| 529 | |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 530 | if (id > idr_max(p->layer + 1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | return NULL; |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 532 | BUG_ON(n == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | |
| 534 | while (n > 0 && p) { |
| 535 | n -= IDR_BITS; |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 536 | BUG_ON(n != p->layer*IDR_BITS); |
Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 537 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | } |
| 539 | return((void *)p); |
| 540 | } |
| 541 | EXPORT_SYMBOL(idr_find); |
| 542 | |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 543 | /** |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 544 | * idr_for_each - iterate through all stored pointers |
| 545 | * @idp: idr handle |
| 546 | * @fn: function to be called for each pointer |
| 547 | * @data: data passed back to callback function |
| 548 | * |
| 549 | * Iterate over the pointers registered with the given idr. The |
| 550 | * callback function will be called for each pointer currently |
| 551 | * registered, passing the id, the pointer and the data pointer passed |
| 552 | * to this function. It is not safe to modify the idr tree while in |
| 553 | * the callback, so functions such as idr_get_new and idr_remove are |
| 554 | * not allowed. |
| 555 | * |
| 556 | * We check the return of @fn each time. If it returns anything other |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 557 | * than %0, we break out and return that value. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 558 | * |
| 559 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
| 560 | */ |
| 561 | int idr_for_each(struct idr *idp, |
| 562 | int (*fn)(int id, void *p, void *data), void *data) |
| 563 | { |
| 564 | int n, id, max, error = 0; |
| 565 | struct idr_layer *p; |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 566 | struct idr_layer *pa[MAX_LEVEL + 1]; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 567 | struct idr_layer **paa = &pa[0]; |
| 568 | |
| 569 | n = idp->layers * IDR_BITS; |
Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 570 | p = rcu_dereference_raw(idp->top); |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 571 | max = idr_max(idp->layers); |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 572 | |
| 573 | id = 0; |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 574 | while (id >= 0 && id <= max) { |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 575 | while (n > 0 && p) { |
| 576 | n -= IDR_BITS; |
| 577 | *paa++ = p; |
Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 578 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 579 | } |
| 580 | |
| 581 | if (p) { |
| 582 | error = fn(id, (void *)p, data); |
| 583 | if (error) |
| 584 | break; |
| 585 | } |
| 586 | |
| 587 | id += 1 << n; |
| 588 | while (n < fls(id)) { |
| 589 | n += IDR_BITS; |
| 590 | p = *--paa; |
| 591 | } |
| 592 | } |
| 593 | |
| 594 | return error; |
| 595 | } |
| 596 | EXPORT_SYMBOL(idr_for_each); |
| 597 | |
| 598 | /** |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 599 | * idr_get_next - lookup next object of id to given id. |
| 600 | * @idp: idr handle |
Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 601 | * @nextidp: pointer to lookup key |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 602 | * |
| 603 | * Returns pointer to registered object with id, which is next number to |
Naohiro Aota | 1458ce1 | 2010-08-27 17:43:46 +0900 | [diff] [blame] | 604 | * given id. After being looked up, *@nextidp will be updated for the next |
| 605 | * iteration. |
Hugh Dickins | 9f7de82 | 2012-03-21 16:34:20 -0700 | [diff] [blame] | 606 | * |
| 607 | * This function can be called under rcu_read_lock(), given that the leaf |
| 608 | * pointers lifetimes are correctly managed. |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 609 | */ |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 610 | void *idr_get_next(struct idr *idp, int *nextidp) |
| 611 | { |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 612 | struct idr_layer *p, *pa[MAX_LEVEL + 1]; |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 613 | struct idr_layer **paa = &pa[0]; |
| 614 | int id = *nextidp; |
| 615 | int n, max; |
| 616 | |
| 617 | /* find first ent */ |
Paul E. McKenney | 94bfa3b | 2010-06-07 17:09:45 -0700 | [diff] [blame] | 618 | p = rcu_dereference_raw(idp->top); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 619 | if (!p) |
| 620 | return NULL; |
Hugh Dickins | 9f7de82 | 2012-03-21 16:34:20 -0700 | [diff] [blame] | 621 | n = (p->layer + 1) * IDR_BITS; |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 622 | max = idr_max(p->layer + 1); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 623 | |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 624 | while (id >= 0 && id <= max) { |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 625 | while (n > 0 && p) { |
| 626 | n -= IDR_BITS; |
| 627 | *paa++ = p; |
Paul E. McKenney | 94bfa3b | 2010-06-07 17:09:45 -0700 | [diff] [blame] | 628 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | if (p) { |
| 632 | *nextidp = id; |
| 633 | return p; |
| 634 | } |
| 635 | |
Prakash Kamliya | e1aa7f5 | 2013-12-07 17:17:27 +0530 | [diff] [blame] | 636 | /* |
| 637 | * Proceed to the next layer at the current level. Unlike |
| 638 | * idr_for_each(), @id isn't guaranteed to be aligned to |
| 639 | * layer boundary at this point and adding 1 << n may |
| 640 | * incorrectly skip IDs. Make sure we jump to the |
| 641 | * beginning of the next layer using round_up(). |
| 642 | */ |
| 643 | id = round_up(id + 1, 1 << n); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 644 | while (n < fls(id)) { |
| 645 | n += IDR_BITS; |
| 646 | p = *--paa; |
| 647 | } |
| 648 | } |
| 649 | return NULL; |
| 650 | } |
Ben Hutchings | 4d1ee80 | 2010-01-29 20:59:17 +0000 | [diff] [blame] | 651 | EXPORT_SYMBOL(idr_get_next); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 652 | |
| 653 | |
| 654 | /** |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 655 | * idr_replace - replace pointer for given id |
| 656 | * @idp: idr handle |
| 657 | * @ptr: pointer you want associated with the id |
| 658 | * @id: lookup key |
| 659 | * |
| 660 | * Replace the pointer registered with an id and return the old value. |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 661 | * A %-ENOENT return indicates that @id was not found. |
| 662 | * A %-EINVAL return indicates that @id was not within valid constraints. |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 663 | * |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 664 | * The caller must serialize with writers. |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 665 | */ |
| 666 | void *idr_replace(struct idr *idp, void *ptr, int id) |
| 667 | { |
| 668 | int n; |
| 669 | struct idr_layer *p, *old_p; |
| 670 | |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 671 | p = idp->top; |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 672 | if (!p) |
| 673 | return ERR_PTR(-EINVAL); |
| 674 | |
| 675 | n = (p->layer+1) * IDR_BITS; |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 676 | |
| 677 | id &= MAX_ID_MASK; |
| 678 | |
| 679 | if (id >= (1 << n)) |
| 680 | return ERR_PTR(-EINVAL); |
| 681 | |
| 682 | n -= IDR_BITS; |
| 683 | while ((n > 0) && p) { |
| 684 | p = p->ary[(id >> n) & IDR_MASK]; |
| 685 | n -= IDR_BITS; |
| 686 | } |
| 687 | |
| 688 | n = id & IDR_MASK; |
| 689 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
| 690 | return ERR_PTR(-ENOENT); |
| 691 | |
| 692 | old_p = p->ary[n]; |
Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 693 | rcu_assign_pointer(p->ary[n], ptr); |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 694 | |
| 695 | return old_p; |
| 696 | } |
| 697 | EXPORT_SYMBOL(idr_replace); |
| 698 | |
Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 699 | void __init idr_init_cache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | { |
Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 701 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 702 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | /** |
| 706 | * idr_init - initialize idr handle |
| 707 | * @idp: idr handle |
| 708 | * |
| 709 | * This function is use to set up the handle (@idp) that you will pass |
| 710 | * to the rest of the functions. |
| 711 | */ |
| 712 | void idr_init(struct idr *idp) |
| 713 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | memset(idp, 0, sizeof(struct idr)); |
| 715 | spin_lock_init(&idp->lock); |
| 716 | } |
| 717 | EXPORT_SYMBOL(idr_init); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 718 | |
| 719 | |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 720 | /** |
| 721 | * DOC: IDA description |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 722 | * IDA - IDR based ID allocator |
| 723 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 724 | * This is id allocator without id -> pointer translation. Memory |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 725 | * usage is much lower than full blown idr because each id only |
| 726 | * occupies a bit. ida uses a custom leaf node which contains |
| 727 | * IDA_BITMAP_BITS slots. |
| 728 | * |
| 729 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> |
| 730 | */ |
| 731 | |
| 732 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
| 733 | { |
| 734 | unsigned long flags; |
| 735 | |
| 736 | if (!ida->free_bitmap) { |
| 737 | spin_lock_irqsave(&ida->idr.lock, flags); |
| 738 | if (!ida->free_bitmap) { |
| 739 | ida->free_bitmap = bitmap; |
| 740 | bitmap = NULL; |
| 741 | } |
| 742 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
| 743 | } |
| 744 | |
| 745 | kfree(bitmap); |
| 746 | } |
| 747 | |
| 748 | /** |
| 749 | * ida_pre_get - reserve resources for ida allocation |
| 750 | * @ida: ida handle |
| 751 | * @gfp_mask: memory allocation flag |
| 752 | * |
| 753 | * This function should be called prior to locking and calling the |
| 754 | * following function. It preallocates enough memory to satisfy the |
| 755 | * worst possible allocation. |
| 756 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 757 | * If the system is REALLY out of memory this function returns %0, |
| 758 | * otherwise %1. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 759 | */ |
| 760 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
| 761 | { |
| 762 | /* allocate idr_layers */ |
| 763 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
| 764 | return 0; |
| 765 | |
| 766 | /* allocate free_bitmap */ |
| 767 | if (!ida->free_bitmap) { |
| 768 | struct ida_bitmap *bitmap; |
| 769 | |
| 770 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
| 771 | if (!bitmap) |
| 772 | return 0; |
| 773 | |
| 774 | free_bitmap(ida, bitmap); |
| 775 | } |
| 776 | |
| 777 | return 1; |
| 778 | } |
| 779 | EXPORT_SYMBOL(ida_pre_get); |
| 780 | |
| 781 | /** |
| 782 | * ida_get_new_above - allocate new ID above or equal to a start id |
| 783 | * @ida: ida handle |
Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 784 | * @starting_id: id to start search at |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 785 | * @p_id: pointer to the allocated handle |
| 786 | * |
Wang Sheng-Hui | e3816c5 | 2011-10-31 17:12:36 -0700 | [diff] [blame] | 787 | * Allocate new ID above or equal to @starting_id. It should be called |
| 788 | * with any required locks. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 789 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 790 | * If memory is required, it will return %-EAGAIN, you should unlock |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 791 | * and go back to the ida_pre_get() call. If the ida is full, it will |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 792 | * return %-ENOSPC. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 793 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 794 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 795 | */ |
| 796 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
| 797 | { |
Tejun Heo | f4fe979 | 2013-02-27 17:05:02 -0800 | [diff] [blame] | 798 | struct idr_layer *pa[MAX_LEVEL + 1]; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 799 | struct ida_bitmap *bitmap; |
| 800 | unsigned long flags; |
| 801 | int idr_id = starting_id / IDA_BITMAP_BITS; |
| 802 | int offset = starting_id % IDA_BITMAP_BITS; |
| 803 | int t, id; |
| 804 | |
| 805 | restart: |
| 806 | /* get vacant slot */ |
| 807 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 808 | if (t < 0) |
| 809 | return _idr_rc_to_errno(t); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 810 | |
| 811 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
| 812 | return -ENOSPC; |
| 813 | |
| 814 | if (t != idr_id) |
| 815 | offset = 0; |
| 816 | idr_id = t; |
| 817 | |
| 818 | /* if bitmap isn't there, create a new one */ |
| 819 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
| 820 | if (!bitmap) { |
| 821 | spin_lock_irqsave(&ida->idr.lock, flags); |
| 822 | bitmap = ida->free_bitmap; |
| 823 | ida->free_bitmap = NULL; |
| 824 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
| 825 | |
| 826 | if (!bitmap) |
| 827 | return -EAGAIN; |
| 828 | |
| 829 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 830 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
| 831 | (void *)bitmap); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 832 | pa[0]->count++; |
| 833 | } |
| 834 | |
| 835 | /* lookup for empty slot */ |
| 836 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
| 837 | if (t == IDA_BITMAP_BITS) { |
| 838 | /* no empty slot after offset, continue to the next chunk */ |
| 839 | idr_id++; |
| 840 | offset = 0; |
| 841 | goto restart; |
| 842 | } |
| 843 | |
| 844 | id = idr_id * IDA_BITMAP_BITS + t; |
| 845 | if (id >= MAX_ID_BIT) |
| 846 | return -ENOSPC; |
| 847 | |
| 848 | __set_bit(t, bitmap->bitmap); |
| 849 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
| 850 | idr_mark_full(pa, idr_id); |
| 851 | |
| 852 | *p_id = id; |
| 853 | |
| 854 | /* Each leaf node can handle nearly a thousand slots and the |
| 855 | * whole idea of ida is to have small memory foot print. |
| 856 | * Throw away extra resources one by one after each successful |
| 857 | * allocation. |
| 858 | */ |
| 859 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 860 | struct idr_layer *p = get_from_free_list(&ida->idr); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 861 | if (p) |
| 862 | kmem_cache_free(idr_layer_cache, p); |
| 863 | } |
| 864 | |
| 865 | return 0; |
| 866 | } |
| 867 | EXPORT_SYMBOL(ida_get_new_above); |
| 868 | |
| 869 | /** |
| 870 | * ida_get_new - allocate new ID |
| 871 | * @ida: idr handle |
| 872 | * @p_id: pointer to the allocated handle |
| 873 | * |
| 874 | * Allocate new ID. It should be called with any required locks. |
| 875 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 876 | * If memory is required, it will return %-EAGAIN, you should unlock |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 877 | * and go back to the idr_pre_get() call. If the idr is full, it will |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 878 | * return %-ENOSPC. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 879 | * |
Paul Bolle | f5c3dd7 | 2011-08-03 16:18:39 +0200 | [diff] [blame] | 880 | * @p_id returns a value in the range %0 ... %0x7fffffff. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 881 | */ |
| 882 | int ida_get_new(struct ida *ida, int *p_id) |
| 883 | { |
| 884 | return ida_get_new_above(ida, 0, p_id); |
| 885 | } |
| 886 | EXPORT_SYMBOL(ida_get_new); |
| 887 | |
| 888 | /** |
| 889 | * ida_remove - remove the given ID |
| 890 | * @ida: ida handle |
| 891 | * @id: ID to free |
| 892 | */ |
| 893 | void ida_remove(struct ida *ida, int id) |
| 894 | { |
| 895 | struct idr_layer *p = ida->idr.top; |
| 896 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
| 897 | int idr_id = id / IDA_BITMAP_BITS; |
| 898 | int offset = id % IDA_BITMAP_BITS; |
| 899 | int n; |
| 900 | struct ida_bitmap *bitmap; |
| 901 | |
| 902 | /* clear full bits while looking up the leaf idr_layer */ |
| 903 | while ((shift > 0) && p) { |
| 904 | n = (idr_id >> shift) & IDR_MASK; |
| 905 | __clear_bit(n, &p->bitmap); |
| 906 | p = p->ary[n]; |
| 907 | shift -= IDR_BITS; |
| 908 | } |
| 909 | |
| 910 | if (p == NULL) |
| 911 | goto err; |
| 912 | |
| 913 | n = idr_id & IDR_MASK; |
| 914 | __clear_bit(n, &p->bitmap); |
| 915 | |
| 916 | bitmap = (void *)p->ary[n]; |
| 917 | if (!test_bit(offset, bitmap->bitmap)) |
| 918 | goto err; |
| 919 | |
| 920 | /* update bitmap and remove it if empty */ |
| 921 | __clear_bit(offset, bitmap->bitmap); |
| 922 | if (--bitmap->nr_busy == 0) { |
| 923 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
| 924 | idr_remove(&ida->idr, idr_id); |
| 925 | free_bitmap(ida, bitmap); |
| 926 | } |
| 927 | |
| 928 | return; |
| 929 | |
| 930 | err: |
| 931 | printk(KERN_WARNING |
| 932 | "ida_remove called for id=%d which is not allocated.\n", id); |
| 933 | } |
| 934 | EXPORT_SYMBOL(ida_remove); |
| 935 | |
| 936 | /** |
| 937 | * ida_destroy - release all cached layers within an ida tree |
Naohiro Aota | ea24ea8 | 2010-08-31 00:37:03 +0900 | [diff] [blame] | 938 | * @ida: ida handle |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 939 | */ |
| 940 | void ida_destroy(struct ida *ida) |
| 941 | { |
| 942 | idr_destroy(&ida->idr); |
| 943 | kfree(ida->free_bitmap); |
| 944 | } |
| 945 | EXPORT_SYMBOL(ida_destroy); |
| 946 | |
| 947 | /** |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 948 | * ida_simple_get - get a new id. |
| 949 | * @ida: the (initialized) ida. |
| 950 | * @start: the minimum id (inclusive, < 0x8000000) |
| 951 | * @end: the maximum id (exclusive, < 0x8000000 or 0) |
| 952 | * @gfp_mask: memory allocation flags |
| 953 | * |
| 954 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. |
| 955 | * On memory allocation failure, returns -ENOMEM. |
| 956 | * |
| 957 | * Use ida_simple_remove() to get rid of an id. |
| 958 | */ |
| 959 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
| 960 | gfp_t gfp_mask) |
| 961 | { |
| 962 | int ret, id; |
| 963 | unsigned int max; |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 964 | unsigned long flags; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 965 | |
| 966 | BUG_ON((int)start < 0); |
| 967 | BUG_ON((int)end < 0); |
| 968 | |
| 969 | if (end == 0) |
| 970 | max = 0x80000000; |
| 971 | else { |
| 972 | BUG_ON(end < start); |
| 973 | max = end - 1; |
| 974 | } |
| 975 | |
| 976 | again: |
| 977 | if (!ida_pre_get(ida, gfp_mask)) |
| 978 | return -ENOMEM; |
| 979 | |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 980 | spin_lock_irqsave(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 981 | ret = ida_get_new_above(ida, start, &id); |
| 982 | if (!ret) { |
| 983 | if (id > max) { |
| 984 | ida_remove(ida, id); |
| 985 | ret = -ENOSPC; |
| 986 | } else { |
| 987 | ret = id; |
| 988 | } |
| 989 | } |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 990 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 991 | |
| 992 | if (unlikely(ret == -EAGAIN)) |
| 993 | goto again; |
| 994 | |
| 995 | return ret; |
| 996 | } |
| 997 | EXPORT_SYMBOL(ida_simple_get); |
| 998 | |
| 999 | /** |
| 1000 | * ida_simple_remove - remove an allocated id. |
| 1001 | * @ida: the (initialized) ida. |
| 1002 | * @id: the id returned by ida_simple_get. |
| 1003 | */ |
| 1004 | void ida_simple_remove(struct ida *ida, unsigned int id) |
| 1005 | { |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 1006 | unsigned long flags; |
| 1007 | |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 1008 | BUG_ON((int)id < 0); |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 1009 | spin_lock_irqsave(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 1010 | ida_remove(ida, id); |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 1011 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 1012 | } |
| 1013 | EXPORT_SYMBOL(ida_simple_remove); |
| 1014 | |
| 1015 | /** |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 1016 | * ida_init - initialize ida handle |
| 1017 | * @ida: ida handle |
| 1018 | * |
| 1019 | * This function is use to set up the handle (@ida) that you will pass |
| 1020 | * to the rest of the functions. |
| 1021 | */ |
| 1022 | void ida_init(struct ida *ida) |
| 1023 | { |
| 1024 | memset(ida, 0, sizeof(struct ida)); |
| 1025 | idr_init(&ida->idr); |
| 1026 | |
| 1027 | } |
| 1028 | EXPORT_SYMBOL(ida_init); |