| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * 2002-10-18  written by Jim Houston jim.houston@ccur.com | 
|  | 3 | *	Copyright (C) 2002 by Concurrent Computer Corporation | 
|  | 4 | *	Distributed under the GNU GPL license version 2. | 
|  | 5 | * | 
|  | 6 | * Modified by George Anzinger to reuse immediately and to use | 
|  | 7 | * find bit instructions.  Also removed _irq on spinlocks. | 
|  | 8 | * | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 9 | * Modified by Nadia Derbey to make it RCU safe. | 
|  | 10 | * | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 11 | * Small id to pointer translation service. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 13 | * It uses a radix tree like structure as a sparse array indexed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * by the id to obtain the pointer.  The bitmap makes allocating | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 15 | * a new id quick. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | * | 
|  | 17 | * You call it to allocate an id (an int) an associate with that id a | 
|  | 18 | * pointer or what ever, we treat it as a (void *).  You can pass this | 
|  | 19 | * id to a user for him to pass back at a later time.  You then pass | 
|  | 20 | * that id to this code and it returns your pointer. | 
|  | 21 |  | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 22 | * You can release ids at any time. When all ids are released, most of | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 24 | * don't need to go to the memory "store" during an id allocate, just | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * so you don't need to be too concerned about locking and conflicts | 
|  | 26 | * with the slab allocator. | 
|  | 27 | */ | 
|  | 28 |  | 
|  | 29 | #ifndef TEST                        // to test in user space... | 
|  | 30 | #include <linux/slab.h> | 
|  | 31 | #include <linux/init.h> | 
|  | 32 | #include <linux/module.h> | 
|  | 33 | #endif | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 34 | #include <linux/err.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/string.h> | 
|  | 36 | #include <linux/idr.h> | 
|  | 37 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 38 | static struct kmem_cache *idr_layer_cache; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 40 | static struct idr_layer *get_from_free_list(struct idr *idp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | { | 
|  | 42 | struct idr_layer *p; | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 43 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 45 | spin_lock_irqsave(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | if ((p = idp->id_free)) { | 
|  | 47 | idp->id_free = p->ary[0]; | 
|  | 48 | idp->id_free_cnt--; | 
|  | 49 | p->ary[0] = NULL; | 
|  | 50 | } | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 51 | spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | return(p); | 
|  | 53 | } | 
|  | 54 |  | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 55 | static void idr_layer_rcu_free(struct rcu_head *head) | 
|  | 56 | { | 
|  | 57 | struct idr_layer *layer; | 
|  | 58 |  | 
|  | 59 | layer = container_of(head, struct idr_layer, rcu_head); | 
|  | 60 | kmem_cache_free(idr_layer_cache, layer); | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | static inline void free_layer(struct idr_layer *p) | 
|  | 64 | { | 
|  | 65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | 
|  | 66 | } | 
|  | 67 |  | 
| Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 68 | /* only called when idp->lock is held */ | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) | 
| Sonny Rao | 1eec005 | 2006-06-25 05:49:34 -0700 | [diff] [blame] | 70 | { | 
|  | 71 | p->ary[0] = idp->id_free; | 
|  | 72 | idp->id_free = p; | 
|  | 73 | idp->id_free_cnt++; | 
|  | 74 | } | 
|  | 75 |  | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 76 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | { | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 78 | unsigned long flags; | 
|  | 79 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | /* | 
|  | 81 | * Depends on the return element being zeroed. | 
|  | 82 | */ | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 83 | spin_lock_irqsave(&idp->lock, flags); | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 84 | __move_to_free_list(idp, p); | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 85 | spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } | 
|  | 87 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 88 | static void idr_mark_full(struct idr_layer **pa, int id) | 
|  | 89 | { | 
|  | 90 | struct idr_layer *p = pa[0]; | 
|  | 91 | int l = 0; | 
|  | 92 |  | 
|  | 93 | __set_bit(id & IDR_MASK, &p->bitmap); | 
|  | 94 | /* | 
|  | 95 | * If this layer is full mark the bit in the layer above to | 
|  | 96 | * show that this part of the radix tree is full.  This may | 
|  | 97 | * complete the layer above and require walking up the radix | 
|  | 98 | * tree. | 
|  | 99 | */ | 
|  | 100 | while (p->bitmap == IDR_FULL) { | 
|  | 101 | if (!(p = pa[++l])) | 
|  | 102 | break; | 
|  | 103 | id = id >> IDR_BITS; | 
|  | 104 | __set_bit((id & IDR_MASK), &p->bitmap); | 
|  | 105 | } | 
|  | 106 | } | 
|  | 107 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | /** | 
|  | 109 | * idr_pre_get - reserver resources for idr allocation | 
|  | 110 | * @idp:	idr handle | 
|  | 111 | * @gfp_mask:	memory allocation flags | 
|  | 112 | * | 
|  | 113 | * This function should be called prior to locking and calling the | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 114 | * idr_get_new* functions. It preallocates enough memory to satisfy | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | * the worst possible allocation. | 
|  | 116 | * | 
|  | 117 | * If the system is REALLY out of memory this function returns 0, | 
|  | 118 | * otherwise 1. | 
|  | 119 | */ | 
| Al Viro | fd4f2df | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 120 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { | 
|  | 122 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 
|  | 123 | struct idr_layer *new; | 
| Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 124 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 125 | if (new == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | return (0); | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 127 | move_to_free_list(idp, new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } | 
|  | 129 | return 1; | 
|  | 130 | } | 
|  | 131 | EXPORT_SYMBOL(idr_pre_get); | 
|  | 132 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 133 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { | 
|  | 135 | int n, m, sh; | 
|  | 136 | struct idr_layer *p, *new; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 137 | int l, id, oid; | 
| Al Viro | 5ba2533 | 2007-10-14 19:35:50 +0100 | [diff] [blame] | 138 | unsigned long bm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 |  | 
|  | 140 | id = *starting_id; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 141 | restart: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | p = idp->top; | 
|  | 143 | l = idp->layers; | 
|  | 144 | pa[l--] = NULL; | 
|  | 145 | while (1) { | 
|  | 146 | /* | 
|  | 147 | * We run around this while until we reach the leaf node... | 
|  | 148 | */ | 
|  | 149 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | 
|  | 150 | bm = ~p->bitmap; | 
|  | 151 | m = find_next_bit(&bm, IDR_SIZE, n); | 
|  | 152 | if (m == IDR_SIZE) { | 
|  | 153 | /* no space available go back to previous layer. */ | 
|  | 154 | l++; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 155 | oid = id; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 157 |  | 
|  | 158 | /* if already at the top layer, we need to grow */ | 
| Tejun Heo | d2e7276 | 2010-02-22 12:44:19 -0800 | [diff] [blame] | 159 | if (id >= 1 << (idp->layers * IDR_BITS)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | *starting_id = id; | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 161 | return IDR_NEED_TO_GROW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } | 
| Tejun Heo | d2e7276 | 2010-02-22 12:44:19 -0800 | [diff] [blame] | 163 | p = pa[l]; | 
|  | 164 | BUG_ON(!p); | 
| Tejun Heo | 7aae6dd | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 165 |  | 
|  | 166 | /* If we need to go up one layer, continue the | 
|  | 167 | * loop; otherwise, restart from the top. | 
|  | 168 | */ | 
|  | 169 | sh = IDR_BITS * (l + 1); | 
|  | 170 | if (oid >> sh == id >> sh) | 
|  | 171 | continue; | 
|  | 172 | else | 
|  | 173 | goto restart; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | } | 
|  | 175 | if (m != n) { | 
|  | 176 | sh = IDR_BITS*l; | 
|  | 177 | id = ((id >> sh) ^ n ^ m) << sh; | 
|  | 178 | } | 
|  | 179 | if ((id >= MAX_ID_BIT) || (id < 0)) | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 180 | return IDR_NOMORE_SPACE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | if (l == 0) | 
|  | 182 | break; | 
|  | 183 | /* | 
|  | 184 | * Create the layer below if it is missing. | 
|  | 185 | */ | 
|  | 186 | if (!p->ary[m]) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 187 | new = get_from_free_list(idp); | 
|  | 188 | if (!new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | return -1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 190 | new->layer = l-1; | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 191 | rcu_assign_pointer(p->ary[m], new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | p->count++; | 
|  | 193 | } | 
|  | 194 | pa[l--] = p; | 
|  | 195 | p = p->ary[m]; | 
|  | 196 | } | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 197 |  | 
|  | 198 | pa[l] = p; | 
|  | 199 | return id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } | 
|  | 201 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 202 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | 
|  | 203 | struct idr_layer **pa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | { | 
|  | 205 | struct idr_layer *p, *new; | 
|  | 206 | int layers, v, id; | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 207 | unsigned long flags; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 208 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | id = starting_id; | 
|  | 210 | build_up: | 
|  | 211 | p = idp->top; | 
|  | 212 | layers = idp->layers; | 
|  | 213 | if (unlikely(!p)) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 214 | if (!(p = get_from_free_list(idp))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | return -1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 216 | p->layer = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | layers = 1; | 
|  | 218 | } | 
|  | 219 | /* | 
|  | 220 | * Add a new layer to the top of the tree if the requested | 
|  | 221 | * id is larger than the currently allocated space. | 
|  | 222 | */ | 
| Zaur Kambarov | 589777e | 2005-06-21 17:14:31 -0700 | [diff] [blame] | 223 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | layers++; | 
| Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 225 | if (!p->count) { | 
|  | 226 | /* special case: if the tree is currently empty, | 
|  | 227 | * then we grow the tree by moving the top node | 
|  | 228 | * upwards. | 
|  | 229 | */ | 
|  | 230 | p->layer++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | continue; | 
| Manfred Spraul | 711a49a | 2008-12-10 18:17:06 +0100 | [diff] [blame] | 232 | } | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 233 | if (!(new = get_from_free_list(idp))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | /* | 
|  | 235 | * The allocation failed.  If we built part of | 
|  | 236 | * the structure tear it down. | 
|  | 237 | */ | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 238 | spin_lock_irqsave(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | for (new = p; p && p != idp->top; new = p) { | 
|  | 240 | p = p->ary[0]; | 
|  | 241 | new->ary[0] = NULL; | 
|  | 242 | new->bitmap = new->count = 0; | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 243 | __move_to_free_list(idp, new); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } | 
| Roland Dreier | c259cc2 | 2006-07-14 00:24:23 -0700 | [diff] [blame] | 245 | spin_unlock_irqrestore(&idp->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | return -1; | 
|  | 247 | } | 
|  | 248 | new->ary[0] = p; | 
|  | 249 | new->count = 1; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 250 | new->layer = layers-1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | if (p->bitmap == IDR_FULL) | 
|  | 252 | __set_bit(0, &new->bitmap); | 
|  | 253 | p = new; | 
|  | 254 | } | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 255 | rcu_assign_pointer(idp->top, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | idp->layers = layers; | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 257 | v = sub_alloc(idp, &id, pa); | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 258 | if (v == IDR_NEED_TO_GROW) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | goto build_up; | 
|  | 260 | return(v); | 
|  | 261 | } | 
|  | 262 |  | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 263 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 
|  | 264 | { | 
|  | 265 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 266 | int id; | 
|  | 267 |  | 
|  | 268 | id = idr_get_empty_slot(idp, starting_id, pa); | 
|  | 269 | if (id >= 0) { | 
|  | 270 | /* | 
|  | 271 | * Successfully found an empty slot.  Install the user | 
|  | 272 | * pointer and mark the slot full. | 
|  | 273 | */ | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 274 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | 
|  | 275 | (struct idr_layer *)ptr); | 
| Tejun Heo | e33ac8b | 2007-06-14 03:45:12 +0900 | [diff] [blame] | 276 | pa[0]->count++; | 
|  | 277 | idr_mark_full(pa, id); | 
|  | 278 | } | 
|  | 279 |  | 
|  | 280 | return id; | 
|  | 281 | } | 
|  | 282 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | /** | 
| John McCutchan | 7c657f2 | 2005-08-26 14:02:04 -0400 | [diff] [blame] | 284 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | * @idp: idr handle | 
| Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 286 | * @ptr: pointer you want associated with the id | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | * @start_id: id to start search at | 
|  | 288 | * @id: pointer to the allocated handle | 
|  | 289 | * | 
|  | 290 | * This is the allocate id function.  It should be called with any | 
|  | 291 | * required locks. | 
|  | 292 | * | 
|  | 293 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 294 | * and go back to the idr_pre_get() call.  If the idr is full, it will | 
|  | 295 | * return -ENOSPC. | 
|  | 296 | * | 
| Li Zefan | b098161 | 2009-01-15 13:51:00 -0800 | [diff] [blame] | 297 | * @id returns a value in the range @starting_id ... 0x7fffffff | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | */ | 
|  | 299 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 
|  | 300 | { | 
|  | 301 | int rv; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 302 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | rv = idr_get_new_above_int(idp, ptr, starting_id); | 
|  | 304 | /* | 
|  | 305 | * This is a cheap hack until the IDR code can be fixed to | 
|  | 306 | * return proper error values. | 
|  | 307 | */ | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 308 | if (rv < 0) | 
|  | 309 | return _idr_rc_to_errno(rv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | *id = rv; | 
|  | 311 | return 0; | 
|  | 312 | } | 
|  | 313 | EXPORT_SYMBOL(idr_get_new_above); | 
|  | 314 |  | 
|  | 315 | /** | 
|  | 316 | * idr_get_new - allocate new idr entry | 
|  | 317 | * @idp: idr handle | 
| Thadeu Lima de Souza Cascardo | 94e2bd6 | 2009-10-16 15:20:49 +0200 | [diff] [blame] | 318 | * @ptr: pointer you want associated with the id | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | * @id: pointer to the allocated handle | 
|  | 320 | * | 
|  | 321 | * This is the allocate id function.  It should be called with any | 
|  | 322 | * required locks. | 
|  | 323 | * | 
|  | 324 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 325 | * and go back to the idr_pre_get() call.  If the idr is full, it will | 
|  | 326 | * return -ENOSPC. | 
|  | 327 | * | 
|  | 328 | * @id returns a value in the range 0 ... 0x7fffffff | 
|  | 329 | */ | 
|  | 330 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 
|  | 331 | { | 
|  | 332 | int rv; | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 333 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | rv = idr_get_new_above_int(idp, ptr, 0); | 
|  | 335 | /* | 
|  | 336 | * This is a cheap hack until the IDR code can be fixed to | 
|  | 337 | * return proper error values. | 
|  | 338 | */ | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 339 | if (rv < 0) | 
|  | 340 | return _idr_rc_to_errno(rv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | *id = rv; | 
|  | 342 | return 0; | 
|  | 343 | } | 
|  | 344 | EXPORT_SYMBOL(idr_get_new); | 
|  | 345 |  | 
|  | 346 | static void idr_remove_warning(int id) | 
|  | 347 | { | 
| Nadia Derbey | f098ad6 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 348 | printk(KERN_WARNING | 
|  | 349 | "idr_remove called for id=%d which is not allocated.\n", id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | dump_stack(); | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | static void sub_remove(struct idr *idp, int shift, int id) | 
|  | 354 | { | 
|  | 355 | struct idr_layer *p = idp->top; | 
|  | 356 | struct idr_layer **pa[MAX_LEVEL]; | 
|  | 357 | struct idr_layer ***paa = &pa[0]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 358 | struct idr_layer *to_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | int n; | 
|  | 360 |  | 
|  | 361 | *paa = NULL; | 
|  | 362 | *++paa = &idp->top; | 
|  | 363 |  | 
|  | 364 | while ((shift > 0) && p) { | 
|  | 365 | n = (id >> shift) & IDR_MASK; | 
|  | 366 | __clear_bit(n, &p->bitmap); | 
|  | 367 | *++paa = &p->ary[n]; | 
|  | 368 | p = p->ary[n]; | 
|  | 369 | shift -= IDR_BITS; | 
|  | 370 | } | 
|  | 371 | n = id & IDR_MASK; | 
|  | 372 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 
|  | 373 | __clear_bit(n, &p->bitmap); | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 374 | rcu_assign_pointer(p->ary[n], NULL); | 
|  | 375 | to_free = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | while(*paa && ! --((**paa)->count)){ | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 377 | if (to_free) | 
|  | 378 | free_layer(to_free); | 
|  | 379 | to_free = **paa; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | **paa-- = NULL; | 
|  | 381 | } | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 382 | if (!*paa) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | idp->layers = 0; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 384 | if (to_free) | 
|  | 385 | free_layer(to_free); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 386 | } else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | idr_remove_warning(id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | } | 
|  | 389 |  | 
|  | 390 | /** | 
|  | 391 | * idr_remove - remove the given id and free it's slot | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 392 | * @idp: idr handle | 
|  | 393 | * @id: unique key | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | */ | 
|  | 395 | void idr_remove(struct idr *idp, int id) | 
|  | 396 | { | 
|  | 397 | struct idr_layer *p; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 398 | struct idr_layer *to_free; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 |  | 
|  | 400 | /* Mask off upper bits we don't use for the search. */ | 
|  | 401 | id &= MAX_ID_MASK; | 
|  | 402 |  | 
|  | 403 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 
| Jesper Juhl | e15ae2d | 2005-10-30 15:02:14 -0800 | [diff] [blame] | 404 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 405 | idp->top->ary[0]) { | 
|  | 406 | /* | 
|  | 407 | * Single child at leftmost slot: we can shrink the tree. | 
|  | 408 | * This level is not needed anymore since when layers are | 
|  | 409 | * inserted, they are inserted at the top of the existing | 
|  | 410 | * tree. | 
|  | 411 | */ | 
|  | 412 | to_free = idp->top; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | p = idp->top->ary[0]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 414 | rcu_assign_pointer(idp->top, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | --idp->layers; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 416 | to_free->bitmap = to_free->count = 0; | 
|  | 417 | free_layer(to_free); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | } | 
|  | 419 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 420 | p = get_from_free_list(idp); | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 421 | /* | 
|  | 422 | * Note: we don't call the rcu callback here, since the only | 
|  | 423 | * layers that fall into the freelist are those that have been | 
|  | 424 | * preallocated. | 
|  | 425 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | kmem_cache_free(idr_layer_cache, p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | } | 
| Nadia Derbey | af8e2a4 | 2008-05-01 04:34:57 -0700 | [diff] [blame] | 428 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | } | 
|  | 430 | EXPORT_SYMBOL(idr_remove); | 
|  | 431 |  | 
|  | 432 | /** | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 433 | * idr_remove_all - remove all ids from the given idr tree | 
|  | 434 | * @idp: idr handle | 
|  | 435 | * | 
|  | 436 | * idr_destroy() only frees up unused, cached idp_layers, but this | 
|  | 437 | * function will remove all id mappings and leave all idp_layers | 
|  | 438 | * unused. | 
|  | 439 | * | 
|  | 440 | * A typical clean-up sequence for objects stored in an idr tree, will | 
|  | 441 | * use idr_for_each() to free all objects, if necessay, then | 
|  | 442 | * idr_remove_all() to remove all ids, and idr_destroy() to free | 
|  | 443 | * up the cached idr_layers. | 
|  | 444 | */ | 
|  | 445 | void idr_remove_all(struct idr *idp) | 
|  | 446 | { | 
| Oleg Nesterov | 6ace06d | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 447 | int n, id, max; | 
| Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 448 | int bt_mask; | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 449 | struct idr_layer *p; | 
|  | 450 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 451 | struct idr_layer **paa = &pa[0]; | 
|  | 452 |  | 
|  | 453 | n = idp->layers * IDR_BITS; | 
|  | 454 | p = idp->top; | 
| Paul E. McKenney | 1b23336 | 2009-03-10 12:55:52 -0700 | [diff] [blame] | 455 | rcu_assign_pointer(idp->top, NULL); | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 456 | max = 1 << n; | 
|  | 457 |  | 
|  | 458 | id = 0; | 
| Oleg Nesterov | 6ace06d | 2007-07-31 00:39:19 -0700 | [diff] [blame] | 459 | while (id < max) { | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 460 | while (n > IDR_BITS && p) { | 
|  | 461 | n -= IDR_BITS; | 
|  | 462 | *paa++ = p; | 
|  | 463 | p = p->ary[(id >> n) & IDR_MASK]; | 
|  | 464 | } | 
|  | 465 |  | 
| Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 466 | bt_mask = id; | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 467 | id += 1 << n; | 
| Imre Deak | 2dcb22b | 2010-05-26 14:43:38 -0700 | [diff] [blame] | 468 | /* Get the highest bit that the above add changed from 0->1. */ | 
|  | 469 | while (n < fls(id ^ bt_mask)) { | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 470 | if (p) | 
|  | 471 | free_layer(p); | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 472 | n += IDR_BITS; | 
|  | 473 | p = *--paa; | 
|  | 474 | } | 
|  | 475 | } | 
| Kristian Hoegsberg | 23936cc | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 476 | idp->layers = 0; | 
|  | 477 | } | 
|  | 478 | EXPORT_SYMBOL(idr_remove_all); | 
|  | 479 |  | 
|  | 480 | /** | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 481 | * idr_destroy - release all cached layers within an idr tree | 
|  | 482 | * idp: idr handle | 
|  | 483 | */ | 
|  | 484 | void idr_destroy(struct idr *idp) | 
|  | 485 | { | 
|  | 486 | while (idp->id_free_cnt) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 487 | struct idr_layer *p = get_from_free_list(idp); | 
| Andrew Morton | 8d3b359 | 2005-10-23 12:57:18 -0700 | [diff] [blame] | 488 | kmem_cache_free(idr_layer_cache, p); | 
|  | 489 | } | 
|  | 490 | } | 
|  | 491 | EXPORT_SYMBOL(idr_destroy); | 
|  | 492 |  | 
|  | 493 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | * idr_find - return pointer for given id | 
|  | 495 | * @idp: idr handle | 
|  | 496 | * @id: lookup key | 
|  | 497 | * | 
|  | 498 | * Return the pointer given the id it has been registered with.  A %NULL | 
|  | 499 | * return indicates that @id is not valid or you passed %NULL in | 
|  | 500 | * idr_get_new(). | 
|  | 501 | * | 
| Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 502 | * This function can be called under rcu_read_lock(), given that the leaf | 
|  | 503 | * pointers lifetimes are correctly managed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | */ | 
|  | 505 | void *idr_find(struct idr *idp, int id) | 
|  | 506 | { | 
|  | 507 | int n; | 
|  | 508 | struct idr_layer *p; | 
|  | 509 |  | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 510 | p = rcu_dereference_raw(idp->top); | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 511 | if (!p) | 
|  | 512 | return NULL; | 
|  | 513 | n = (p->layer+1) * IDR_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 |  | 
|  | 515 | /* Mask off upper bits we don't use for the search. */ | 
|  | 516 | id &= MAX_ID_MASK; | 
|  | 517 |  | 
|  | 518 | if (id >= (1 << n)) | 
|  | 519 | return NULL; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 520 | BUG_ON(n == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 |  | 
|  | 522 | while (n > 0 && p) { | 
|  | 523 | n -= IDR_BITS; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 524 | BUG_ON(n != p->layer*IDR_BITS); | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 525 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | } | 
|  | 527 | return((void *)p); | 
|  | 528 | } | 
|  | 529 | EXPORT_SYMBOL(idr_find); | 
|  | 530 |  | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 531 | /** | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 532 | * idr_for_each - iterate through all stored pointers | 
|  | 533 | * @idp: idr handle | 
|  | 534 | * @fn: function to be called for each pointer | 
|  | 535 | * @data: data passed back to callback function | 
|  | 536 | * | 
|  | 537 | * Iterate over the pointers registered with the given idr.  The | 
|  | 538 | * callback function will be called for each pointer currently | 
|  | 539 | * registered, passing the id, the pointer and the data pointer passed | 
|  | 540 | * to this function.  It is not safe to modify the idr tree while in | 
|  | 541 | * the callback, so functions such as idr_get_new and idr_remove are | 
|  | 542 | * not allowed. | 
|  | 543 | * | 
|  | 544 | * We check the return of @fn each time. If it returns anything other | 
|  | 545 | * than 0, we break out and return that value. | 
|  | 546 | * | 
|  | 547 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 
|  | 548 | */ | 
|  | 549 | int idr_for_each(struct idr *idp, | 
|  | 550 | int (*fn)(int id, void *p, void *data), void *data) | 
|  | 551 | { | 
|  | 552 | int n, id, max, error = 0; | 
|  | 553 | struct idr_layer *p; | 
|  | 554 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 555 | struct idr_layer **paa = &pa[0]; | 
|  | 556 |  | 
|  | 557 | n = idp->layers * IDR_BITS; | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 558 | p = rcu_dereference_raw(idp->top); | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 559 | max = 1 << n; | 
|  | 560 |  | 
|  | 561 | id = 0; | 
|  | 562 | while (id < max) { | 
|  | 563 | while (n > 0 && p) { | 
|  | 564 | n -= IDR_BITS; | 
|  | 565 | *paa++ = p; | 
| Paul E. McKenney | 96be753 | 2010-02-22 17:04:55 -0800 | [diff] [blame] | 566 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 
| Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 567 | } | 
|  | 568 |  | 
|  | 569 | if (p) { | 
|  | 570 | error = fn(id, (void *)p, data); | 
|  | 571 | if (error) | 
|  | 572 | break; | 
|  | 573 | } | 
|  | 574 |  | 
|  | 575 | id += 1 << n; | 
|  | 576 | while (n < fls(id)) { | 
|  | 577 | n += IDR_BITS; | 
|  | 578 | p = *--paa; | 
|  | 579 | } | 
|  | 580 | } | 
|  | 581 |  | 
|  | 582 | return error; | 
|  | 583 | } | 
|  | 584 | EXPORT_SYMBOL(idr_for_each); | 
|  | 585 |  | 
|  | 586 | /** | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 587 | * idr_get_next - lookup next object of id to given id. | 
|  | 588 | * @idp: idr handle | 
|  | 589 | * @id:  pointer to lookup key | 
|  | 590 | * | 
|  | 591 | * Returns pointer to registered object with id, which is next number to | 
|  | 592 | * given id. | 
|  | 593 | */ | 
|  | 594 |  | 
|  | 595 | void *idr_get_next(struct idr *idp, int *nextidp) | 
|  | 596 | { | 
|  | 597 | struct idr_layer *p, *pa[MAX_LEVEL]; | 
|  | 598 | struct idr_layer **paa = &pa[0]; | 
|  | 599 | int id = *nextidp; | 
|  | 600 | int n, max; | 
|  | 601 |  | 
|  | 602 | /* find first ent */ | 
|  | 603 | n = idp->layers * IDR_BITS; | 
|  | 604 | max = 1 << n; | 
|  | 605 | p = rcu_dereference(idp->top); | 
|  | 606 | if (!p) | 
|  | 607 | return NULL; | 
|  | 608 |  | 
|  | 609 | while (id < max) { | 
|  | 610 | while (n > 0 && p) { | 
|  | 611 | n -= IDR_BITS; | 
|  | 612 | *paa++ = p; | 
|  | 613 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 
|  | 614 | } | 
|  | 615 |  | 
|  | 616 | if (p) { | 
|  | 617 | *nextidp = id; | 
|  | 618 | return p; | 
|  | 619 | } | 
|  | 620 |  | 
|  | 621 | id += 1 << n; | 
|  | 622 | while (n < fls(id)) { | 
|  | 623 | n += IDR_BITS; | 
|  | 624 | p = *--paa; | 
|  | 625 | } | 
|  | 626 | } | 
|  | 627 | return NULL; | 
|  | 628 | } | 
| Ben Hutchings | 4d1ee80 | 2010-01-29 20:59:17 +0000 | [diff] [blame] | 629 | EXPORT_SYMBOL(idr_get_next); | 
| KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 630 |  | 
|  | 631 |  | 
|  | 632 | /** | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 633 | * idr_replace - replace pointer for given id | 
|  | 634 | * @idp: idr handle | 
|  | 635 | * @ptr: pointer you want associated with the id | 
|  | 636 | * @id: lookup key | 
|  | 637 | * | 
|  | 638 | * Replace the pointer registered with an id and return the old value. | 
|  | 639 | * A -ENOENT return indicates that @id was not found. | 
|  | 640 | * A -EINVAL return indicates that @id was not within valid constraints. | 
|  | 641 | * | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 642 | * The caller must serialize with writers. | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 643 | */ | 
|  | 644 | void *idr_replace(struct idr *idp, void *ptr, int id) | 
|  | 645 | { | 
|  | 646 | int n; | 
|  | 647 | struct idr_layer *p, *old_p; | 
|  | 648 |  | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 649 | p = idp->top; | 
| Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 650 | if (!p) | 
|  | 651 | return ERR_PTR(-EINVAL); | 
|  | 652 |  | 
|  | 653 | n = (p->layer+1) * IDR_BITS; | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 654 |  | 
|  | 655 | id &= MAX_ID_MASK; | 
|  | 656 |  | 
|  | 657 | if (id >= (1 << n)) | 
|  | 658 | return ERR_PTR(-EINVAL); | 
|  | 659 |  | 
|  | 660 | n -= IDR_BITS; | 
|  | 661 | while ((n > 0) && p) { | 
|  | 662 | p = p->ary[(id >> n) & IDR_MASK]; | 
|  | 663 | n -= IDR_BITS; | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | n = id & IDR_MASK; | 
|  | 667 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | 
|  | 668 | return ERR_PTR(-ENOENT); | 
|  | 669 |  | 
|  | 670 | old_p = p->ary[n]; | 
| Nadia Derbey | cf481c2 | 2008-07-25 01:48:02 -0700 | [diff] [blame] | 671 | rcu_assign_pointer(p->ary[n], ptr); | 
| Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 672 |  | 
|  | 673 | return old_p; | 
|  | 674 | } | 
|  | 675 | EXPORT_SYMBOL(idr_replace); | 
|  | 676 |  | 
| Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 677 | void __init idr_init_cache(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | { | 
| Akinobu Mita | 199f0ca | 2008-04-29 01:03:13 -0700 | [diff] [blame] | 679 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 
| Andrew Morton | 5b019e9 | 2009-01-15 13:51:21 -0800 | [diff] [blame] | 680 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | } | 
|  | 682 |  | 
|  | 683 | /** | 
|  | 684 | * idr_init - initialize idr handle | 
|  | 685 | * @idp:	idr handle | 
|  | 686 | * | 
|  | 687 | * This function is use to set up the handle (@idp) that you will pass | 
|  | 688 | * to the rest of the functions. | 
|  | 689 | */ | 
|  | 690 | void idr_init(struct idr *idp) | 
|  | 691 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | memset(idp, 0, sizeof(struct idr)); | 
|  | 693 | spin_lock_init(&idp->lock); | 
|  | 694 | } | 
|  | 695 | EXPORT_SYMBOL(idr_init); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 696 |  | 
|  | 697 |  | 
|  | 698 | /* | 
|  | 699 | * IDA - IDR based ID allocator | 
|  | 700 | * | 
|  | 701 | * this is id allocator without id -> pointer translation.  Memory | 
|  | 702 | * usage is much lower than full blown idr because each id only | 
|  | 703 | * occupies a bit.  ida uses a custom leaf node which contains | 
|  | 704 | * IDA_BITMAP_BITS slots. | 
|  | 705 | * | 
|  | 706 | * 2007-04-25  written by Tejun Heo <htejun@gmail.com> | 
|  | 707 | */ | 
|  | 708 |  | 
|  | 709 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | 
|  | 710 | { | 
|  | 711 | unsigned long flags; | 
|  | 712 |  | 
|  | 713 | if (!ida->free_bitmap) { | 
|  | 714 | spin_lock_irqsave(&ida->idr.lock, flags); | 
|  | 715 | if (!ida->free_bitmap) { | 
|  | 716 | ida->free_bitmap = bitmap; | 
|  | 717 | bitmap = NULL; | 
|  | 718 | } | 
|  | 719 | spin_unlock_irqrestore(&ida->idr.lock, flags); | 
|  | 720 | } | 
|  | 721 |  | 
|  | 722 | kfree(bitmap); | 
|  | 723 | } | 
|  | 724 |  | 
|  | 725 | /** | 
|  | 726 | * ida_pre_get - reserve resources for ida allocation | 
|  | 727 | * @ida:	ida handle | 
|  | 728 | * @gfp_mask:	memory allocation flag | 
|  | 729 | * | 
|  | 730 | * This function should be called prior to locking and calling the | 
|  | 731 | * following function.  It preallocates enough memory to satisfy the | 
|  | 732 | * worst possible allocation. | 
|  | 733 | * | 
|  | 734 | * If the system is REALLY out of memory this function returns 0, | 
|  | 735 | * otherwise 1. | 
|  | 736 | */ | 
|  | 737 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 
|  | 738 | { | 
|  | 739 | /* allocate idr_layers */ | 
|  | 740 | if (!idr_pre_get(&ida->idr, gfp_mask)) | 
|  | 741 | return 0; | 
|  | 742 |  | 
|  | 743 | /* allocate free_bitmap */ | 
|  | 744 | if (!ida->free_bitmap) { | 
|  | 745 | struct ida_bitmap *bitmap; | 
|  | 746 |  | 
|  | 747 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | 
|  | 748 | if (!bitmap) | 
|  | 749 | return 0; | 
|  | 750 |  | 
|  | 751 | free_bitmap(ida, bitmap); | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | return 1; | 
|  | 755 | } | 
|  | 756 | EXPORT_SYMBOL(ida_pre_get); | 
|  | 757 |  | 
|  | 758 | /** | 
|  | 759 | * ida_get_new_above - allocate new ID above or equal to a start id | 
|  | 760 | * @ida:	ida handle | 
|  | 761 | * @staring_id:	id to start search at | 
|  | 762 | * @p_id:	pointer to the allocated handle | 
|  | 763 | * | 
|  | 764 | * Allocate new ID above or equal to @ida.  It should be called with | 
|  | 765 | * any required locks. | 
|  | 766 | * | 
|  | 767 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 768 | * and go back to the ida_pre_get() call.  If the ida is full, it will | 
|  | 769 | * return -ENOSPC. | 
|  | 770 | * | 
| Li Zefan | b098161 | 2009-01-15 13:51:00 -0800 | [diff] [blame] | 771 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 772 | */ | 
|  | 773 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 
|  | 774 | { | 
|  | 775 | struct idr_layer *pa[MAX_LEVEL]; | 
|  | 776 | struct ida_bitmap *bitmap; | 
|  | 777 | unsigned long flags; | 
|  | 778 | int idr_id = starting_id / IDA_BITMAP_BITS; | 
|  | 779 | int offset = starting_id % IDA_BITMAP_BITS; | 
|  | 780 | int t, id; | 
|  | 781 |  | 
|  | 782 | restart: | 
|  | 783 | /* get vacant slot */ | 
|  | 784 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 
| Nadia Derbey | 944ca05 | 2008-07-25 01:47:59 -0700 | [diff] [blame] | 785 | if (t < 0) | 
|  | 786 | return _idr_rc_to_errno(t); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 787 |  | 
|  | 788 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 
|  | 789 | return -ENOSPC; | 
|  | 790 |  | 
|  | 791 | if (t != idr_id) | 
|  | 792 | offset = 0; | 
|  | 793 | idr_id = t; | 
|  | 794 |  | 
|  | 795 | /* if bitmap isn't there, create a new one */ | 
|  | 796 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | 
|  | 797 | if (!bitmap) { | 
|  | 798 | spin_lock_irqsave(&ida->idr.lock, flags); | 
|  | 799 | bitmap = ida->free_bitmap; | 
|  | 800 | ida->free_bitmap = NULL; | 
|  | 801 | spin_unlock_irqrestore(&ida->idr.lock, flags); | 
|  | 802 |  | 
|  | 803 | if (!bitmap) | 
|  | 804 | return -EAGAIN; | 
|  | 805 |  | 
|  | 806 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | 
| Nadia Derbey | 3219b3b | 2008-07-25 01:48:00 -0700 | [diff] [blame] | 807 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], | 
|  | 808 | (void *)bitmap); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 809 | pa[0]->count++; | 
|  | 810 | } | 
|  | 811 |  | 
|  | 812 | /* lookup for empty slot */ | 
|  | 813 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | 
|  | 814 | if (t == IDA_BITMAP_BITS) { | 
|  | 815 | /* no empty slot after offset, continue to the next chunk */ | 
|  | 816 | idr_id++; | 
|  | 817 | offset = 0; | 
|  | 818 | goto restart; | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | id = idr_id * IDA_BITMAP_BITS + t; | 
|  | 822 | if (id >= MAX_ID_BIT) | 
|  | 823 | return -ENOSPC; | 
|  | 824 |  | 
|  | 825 | __set_bit(t, bitmap->bitmap); | 
|  | 826 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | 
|  | 827 | idr_mark_full(pa, idr_id); | 
|  | 828 |  | 
|  | 829 | *p_id = id; | 
|  | 830 |  | 
|  | 831 | /* Each leaf node can handle nearly a thousand slots and the | 
|  | 832 | * whole idea of ida is to have small memory foot print. | 
|  | 833 | * Throw away extra resources one by one after each successful | 
|  | 834 | * allocation. | 
|  | 835 | */ | 
|  | 836 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | 
| Nadia Derbey | 4ae5378 | 2008-07-25 01:47:58 -0700 | [diff] [blame] | 837 | struct idr_layer *p = get_from_free_list(&ida->idr); | 
| Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 838 | if (p) | 
|  | 839 | kmem_cache_free(idr_layer_cache, p); | 
|  | 840 | } | 
|  | 841 |  | 
|  | 842 | return 0; | 
|  | 843 | } | 
|  | 844 | EXPORT_SYMBOL(ida_get_new_above); | 
|  | 845 |  | 
|  | 846 | /** | 
|  | 847 | * ida_get_new - allocate new ID | 
|  | 848 | * @ida:	idr handle | 
|  | 849 | * @p_id:	pointer to the allocated handle | 
|  | 850 | * | 
|  | 851 | * Allocate new ID.  It should be called with any required locks. | 
|  | 852 | * | 
|  | 853 | * If memory is required, it will return -EAGAIN, you should unlock | 
|  | 854 | * and go back to the idr_pre_get() call.  If the idr is full, it will | 
|  | 855 | * return -ENOSPC. | 
|  | 856 | * | 
|  | 857 | * @id returns a value in the range 0 ... 0x7fffffff. | 
|  | 858 | */ | 
|  | 859 | int ida_get_new(struct ida *ida, int *p_id) | 
|  | 860 | { | 
|  | 861 | return ida_get_new_above(ida, 0, p_id); | 
|  | 862 | } | 
|  | 863 | EXPORT_SYMBOL(ida_get_new); | 
|  | 864 |  | 
|  | 865 | /** | 
|  | 866 | * ida_remove - remove the given ID | 
|  | 867 | * @ida:	ida handle | 
|  | 868 | * @id:		ID to free | 
|  | 869 | */ | 
|  | 870 | void ida_remove(struct ida *ida, int id) | 
|  | 871 | { | 
|  | 872 | struct idr_layer *p = ida->idr.top; | 
|  | 873 | int shift = (ida->idr.layers - 1) * IDR_BITS; | 
|  | 874 | int idr_id = id / IDA_BITMAP_BITS; | 
|  | 875 | int offset = id % IDA_BITMAP_BITS; | 
|  | 876 | int n; | 
|  | 877 | struct ida_bitmap *bitmap; | 
|  | 878 |  | 
|  | 879 | /* clear full bits while looking up the leaf idr_layer */ | 
|  | 880 | while ((shift > 0) && p) { | 
|  | 881 | n = (idr_id >> shift) & IDR_MASK; | 
|  | 882 | __clear_bit(n, &p->bitmap); | 
|  | 883 | p = p->ary[n]; | 
|  | 884 | shift -= IDR_BITS; | 
|  | 885 | } | 
|  | 886 |  | 
|  | 887 | if (p == NULL) | 
|  | 888 | goto err; | 
|  | 889 |  | 
|  | 890 | n = idr_id & IDR_MASK; | 
|  | 891 | __clear_bit(n, &p->bitmap); | 
|  | 892 |  | 
|  | 893 | bitmap = (void *)p->ary[n]; | 
|  | 894 | if (!test_bit(offset, bitmap->bitmap)) | 
|  | 895 | goto err; | 
|  | 896 |  | 
|  | 897 | /* update bitmap and remove it if empty */ | 
|  | 898 | __clear_bit(offset, bitmap->bitmap); | 
|  | 899 | if (--bitmap->nr_busy == 0) { | 
|  | 900 | __set_bit(n, &p->bitmap);	/* to please idr_remove() */ | 
|  | 901 | idr_remove(&ida->idr, idr_id); | 
|  | 902 | free_bitmap(ida, bitmap); | 
|  | 903 | } | 
|  | 904 |  | 
|  | 905 | return; | 
|  | 906 |  | 
|  | 907 | err: | 
|  | 908 | printk(KERN_WARNING | 
|  | 909 | "ida_remove called for id=%d which is not allocated.\n", id); | 
|  | 910 | } | 
|  | 911 | EXPORT_SYMBOL(ida_remove); | 
|  | 912 |  | 
|  | 913 | /** | 
|  | 914 | * ida_destroy - release all cached layers within an ida tree | 
|  | 915 | * ida:		ida handle | 
|  | 916 | */ | 
|  | 917 | void ida_destroy(struct ida *ida) | 
|  | 918 | { | 
|  | 919 | idr_destroy(&ida->idr); | 
|  | 920 | kfree(ida->free_bitmap); | 
|  | 921 | } | 
|  | 922 | EXPORT_SYMBOL(ida_destroy); | 
|  | 923 |  | 
|  | 924 | /** | 
|  | 925 | * ida_init - initialize ida handle | 
|  | 926 | * @ida:	ida handle | 
|  | 927 | * | 
|  | 928 | * This function is use to set up the handle (@ida) that you will pass | 
|  | 929 | * to the rest of the functions. | 
|  | 930 | */ | 
|  | 931 | void ida_init(struct ida *ida) | 
|  | 932 | { | 
|  | 933 | memset(ida, 0, sizeof(struct ida)); | 
|  | 934 | idr_init(&ida->idr); | 
|  | 935 |  | 
|  | 936 | } | 
|  | 937 | EXPORT_SYMBOL(ida_init); |