Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1 | #include <linux/bitmap.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 2 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #include <linux/idr.h> |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 4 | #include <linux/slab.h> |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 5 | #include <linux/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 7 | DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 8 | static DEFINE_SPINLOCK(simple_ida_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 10 | int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, |
| 11 | unsigned long start, unsigned long end, gfp_t gfp, |
| 12 | bool ext) |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 13 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 14 | struct radix_tree_iter iter; |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 15 | void __rcu **slot; |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 16 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 17 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
| 18 | return -EINVAL; |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 19 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 20 | radix_tree_iter_init(&iter, start); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 21 | if (ext) |
| 22 | slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end); |
| 23 | else |
| 24 | slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 25 | if (IS_ERR(slot)) |
| 26 | return PTR_ERR(slot); |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 27 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 28 | radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); |
| 29 | radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 30 | |
| 31 | if (index) |
| 32 | *index = iter.index; |
| 33 | return 0; |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 34 | } |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 35 | EXPORT_SYMBOL_GPL(idr_alloc_cmn); |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 36 | |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 37 | /** |
| 38 | * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 39 | * @idr: idr handle |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 40 | * @ptr: pointer to be associated with the new id |
| 41 | * @start: the minimum id (inclusive) |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 42 | * @end: the maximum id (exclusive) |
| 43 | * @gfp: memory allocation flags |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 44 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 45 | * Allocates an ID larger than the last ID allocated if one is available. |
| 46 | * If not, it will attempt to allocate the smallest ID that is larger or |
| 47 | * equal to @start. |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 48 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 49 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 50 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 51 | int id, curr = idr->idr_next; |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 52 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 53 | if (curr < start) |
| 54 | curr = start; |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 55 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 56 | id = idr_alloc(idr, ptr, curr, end, gfp); |
| 57 | if ((id == -ENOSPC) && (curr > start)) |
| 58 | id = idr_alloc(idr, ptr, start, curr, gfp); |
| 59 | |
| 60 | if (id >= 0) |
| 61 | idr->idr_next = id + 1U; |
| 62 | |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 63 | return id; |
| 64 | } |
| 65 | EXPORT_SYMBOL(idr_alloc_cyclic); |
| 66 | |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 67 | /** |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 68 | * idr_for_each - iterate through all stored pointers |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 69 | * @idr: idr handle |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 70 | * @fn: function to be called for each pointer |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 71 | * @data: data passed to callback function |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 72 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 73 | * The callback function will be called for each entry in @idr, passing |
| 74 | * the id, the pointer and the data pointer passed to this function. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 75 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 76 | * If @fn returns anything other than %0, the iteration stops and that |
| 77 | * value is returned from this function. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 78 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 79 | * idr_for_each() can be called concurrently with idr_alloc() and |
| 80 | * idr_remove() if protected by RCU. Newly added entries may not be |
| 81 | * seen and deleted entries may be seen, but adding and removing entries |
| 82 | * will not cause other entries to be skipped, nor spurious ones to be seen. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 83 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 84 | int idr_for_each(const struct idr *idr, |
| 85 | int (*fn)(int id, void *p, void *data), void *data) |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 86 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 87 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 88 | void __rcu **slot; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 89 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 90 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { |
| 91 | int ret = fn(iter.index, rcu_dereference_raw(*slot), data); |
| 92 | if (ret) |
| 93 | return ret; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 94 | } |
| 95 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 96 | return 0; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 97 | } |
| 98 | EXPORT_SYMBOL(idr_for_each); |
| 99 | |
| 100 | /** |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 101 | * idr_get_next - Find next populated entry |
| 102 | * @idr: idr handle |
| 103 | * @nextid: Pointer to lowest possible ID to return |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 104 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 105 | * Returns the next populated entry in the tree with an ID greater than |
| 106 | * or equal to the value pointed to by @nextid. On exit, @nextid is updated |
| 107 | * to the ID of the found value. To use in a loop, the value pointed to by |
| 108 | * nextid must be incremented by the user. |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 109 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 110 | void *idr_get_next(struct idr *idr, int *nextid) |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 111 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 112 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 113 | void __rcu **slot; |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 114 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 115 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); |
| 116 | if (!slot) |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 117 | return NULL; |
| 118 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 119 | *nextid = iter.index; |
| 120 | return rcu_dereference_raw(*slot); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 121 | } |
Ben Hutchings | 4d1ee80 | 2010-01-29 20:59:17 +0000 | [diff] [blame] | 122 | EXPORT_SYMBOL(idr_get_next); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 123 | |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 124 | void *idr_get_next_ext(struct idr *idr, unsigned long *nextid) |
| 125 | { |
| 126 | struct radix_tree_iter iter; |
| 127 | void __rcu **slot; |
| 128 | |
| 129 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); |
| 130 | if (!slot) |
| 131 | return NULL; |
| 132 | |
| 133 | *nextid = iter.index; |
| 134 | return rcu_dereference_raw(*slot); |
| 135 | } |
| 136 | EXPORT_SYMBOL(idr_get_next_ext); |
| 137 | |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 138 | /** |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 139 | * idr_replace - replace pointer for given id |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 140 | * @idr: idr handle |
| 141 | * @ptr: New pointer to associate with the ID |
| 142 | * @id: Lookup key |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 143 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 144 | * Replace the pointer registered with an ID and return the old value. |
| 145 | * This function can be called under the RCU read lock concurrently with |
| 146 | * idr_alloc() and idr_remove() (as long as the ID being removed is not |
| 147 | * the one being replaced!). |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 148 | * |
Eric Biggers | a70e43a | 2017-10-03 16:16:13 -0700 | [diff] [blame] | 149 | * Returns: the old value on success. %-ENOENT indicates that @id was not |
| 150 | * found. %-EINVAL indicates that @id or @ptr were not valid. |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 151 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 152 | void *idr_replace(struct idr *idr, void *ptr, int id) |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 153 | { |
Eric Biggers | a47f68d | 2017-09-13 16:28:11 -0700 | [diff] [blame] | 154 | if (id < 0) |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 155 | return ERR_PTR(-EINVAL); |
| 156 | |
| 157 | return idr_replace_ext(idr, ptr, id); |
| 158 | } |
| 159 | EXPORT_SYMBOL(idr_replace); |
| 160 | |
| 161 | void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id) |
| 162 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 163 | struct radix_tree_node *node; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 164 | void __rcu **slot = NULL; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 165 | void *entry; |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 166 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 167 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
Tejun Heo | e8c8d1b | 2013-02-27 17:05:04 -0800 | [diff] [blame] | 168 | return ERR_PTR(-EINVAL); |
| 169 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 170 | entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); |
| 171 | if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) |
Lai Jiangshan | b93804b | 2014-06-06 14:37:13 -0700 | [diff] [blame] | 172 | return ERR_PTR(-ENOENT); |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 173 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 174 | __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL); |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 175 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 176 | return entry; |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 177 | } |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 178 | EXPORT_SYMBOL(idr_replace_ext); |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 179 | |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 180 | /** |
| 181 | * DOC: IDA description |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 182 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 183 | * The IDA is an ID allocator which does not provide the ability to |
| 184 | * associate an ID with a pointer. As such, it only needs to store one |
| 185 | * bit per ID, and so is more space efficient than an IDR. To use an IDA, |
| 186 | * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, |
| 187 | * then initialise it using ida_init()). To allocate a new ID, call |
| 188 | * ida_simple_get(). To free an ID, call ida_simple_remove(). |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 189 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 190 | * If you have more complex locking requirements, use a loop around |
| 191 | * ida_pre_get() and ida_get_new() to allocate a new ID. Then use |
| 192 | * ida_remove() to free an ID. You must make sure that ida_get_new() and |
| 193 | * ida_remove() cannot be called at the same time as each other for the |
| 194 | * same IDA. |
| 195 | * |
| 196 | * You can also use ida_get_new_above() if you need an ID to be allocated |
| 197 | * above a particular number. ida_destroy() can be used to dispose of an |
| 198 | * IDA without needing to free the individual IDs in it. You can use |
| 199 | * ida_is_empty() to find out whether the IDA has any IDs currently allocated. |
| 200 | * |
| 201 | * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward |
| 202 | * limitation, it should be quite straightforward to raise the maximum. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 203 | */ |
| 204 | |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 205 | /* |
| 206 | * Developer's notes: |
| 207 | * |
| 208 | * The IDA uses the functionality provided by the IDR & radix tree to store |
| 209 | * bitmaps in each entry. The IDR_FREE tag means there is at least one bit |
| 210 | * free, unlike the IDR where it means at least one entry is free. |
| 211 | * |
| 212 | * I considered telling the radix tree that each slot is an order-10 node |
| 213 | * and storing the bit numbers in the radix tree, but the radix tree can't |
| 214 | * allow a single multiorder entry at index 0, which would significantly |
| 215 | * increase memory consumption for the IDA. So instead we divide the index |
| 216 | * by the number of bits in the leaf bitmap before doing a radix tree lookup. |
| 217 | * |
| 218 | * As an optimisation, if there are only a few low bits set in any given |
| 219 | * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional |
| 220 | * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits |
| 221 | * directly in the entry. By being really tricksy, we could store |
| 222 | * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising |
| 223 | * for 0-3 allocated IDs. |
| 224 | * |
| 225 | * We allow the radix tree 'exceptional' count to get out of date. Nothing |
| 226 | * in the IDA nor the radix tree code checks it. If it becomes important |
| 227 | * to maintain an accurate exceptional count, switch the rcu_assign_pointer() |
| 228 | * calls to radix_tree_iter_replace() which will correct the exceptional |
| 229 | * count. |
| 230 | * |
| 231 | * The IDA always requires a lock to alloc/free. If we add a 'test_bit' |
| 232 | * equivalent, it will still need locking. Going to RCU lookup would require |
| 233 | * using RCU to free bitmaps, and that's not trivial without embedding an |
| 234 | * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte |
| 235 | * bitmap, which is excessive. |
| 236 | */ |
| 237 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 238 | #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) |
| 239 | |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 240 | /** |
| 241 | * ida_get_new_above - allocate new ID above or equal to a start id |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 242 | * @ida: ida handle |
| 243 | * @start: id to start search at |
| 244 | * @id: pointer to the allocated handle |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 245 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 246 | * Allocate new ID above or equal to @start. It should be called |
| 247 | * with any required locks to ensure that concurrent calls to |
| 248 | * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. |
| 249 | * Consider using ida_simple_get() if you do not have complex locking |
| 250 | * requirements. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 251 | * |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 252 | * If memory is required, it will return %-EAGAIN, you should unlock |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 253 | * and go back to the ida_pre_get() call. If the ida is full, it will |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 254 | * return %-ENOSPC. On success, it will return 0. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 255 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 256 | * @id returns a value in the range @start ... %0x7fffffff. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 257 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 258 | int ida_get_new_above(struct ida *ida, int start, int *id) |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 259 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 260 | struct radix_tree_root *root = &ida->ida_rt; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 261 | void __rcu **slot; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 262 | struct radix_tree_iter iter; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 263 | struct ida_bitmap *bitmap; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 264 | unsigned long index; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 265 | unsigned bit, ebit; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 266 | int new; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 267 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 268 | index = start / IDA_BITMAP_BITS; |
| 269 | bit = start % IDA_BITMAP_BITS; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 270 | ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 271 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 272 | slot = radix_tree_iter_init(&iter, index); |
| 273 | for (;;) { |
| 274 | if (slot) |
| 275 | slot = radix_tree_next_slot(slot, &iter, |
| 276 | RADIX_TREE_ITER_TAGGED); |
| 277 | if (!slot) { |
| 278 | slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); |
| 279 | if (IS_ERR(slot)) { |
| 280 | if (slot == ERR_PTR(-ENOMEM)) |
| 281 | return -EAGAIN; |
| 282 | return PTR_ERR(slot); |
| 283 | } |
| 284 | } |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 285 | if (iter.index > index) { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 286 | bit = 0; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 287 | ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 288 | } |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 289 | new = iter.index * IDA_BITMAP_BITS; |
| 290 | bitmap = rcu_dereference_raw(*slot); |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 291 | if (radix_tree_exception(bitmap)) { |
| 292 | unsigned long tmp = (unsigned long)bitmap; |
| 293 | ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); |
| 294 | if (ebit < BITS_PER_LONG) { |
| 295 | tmp |= 1UL << ebit; |
| 296 | rcu_assign_pointer(*slot, (void *)tmp); |
| 297 | *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 298 | return 0; |
| 299 | } |
| 300 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
| 301 | if (!bitmap) |
| 302 | return -EAGAIN; |
| 303 | memset(bitmap, 0, sizeof(*bitmap)); |
| 304 | bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 305 | rcu_assign_pointer(*slot, bitmap); |
| 306 | } |
| 307 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 308 | if (bitmap) { |
| 309 | bit = find_next_zero_bit(bitmap->bitmap, |
| 310 | IDA_BITMAP_BITS, bit); |
| 311 | new += bit; |
| 312 | if (new < 0) |
| 313 | return -ENOSPC; |
| 314 | if (bit == IDA_BITMAP_BITS) |
| 315 | continue; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 316 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 317 | __set_bit(bit, bitmap->bitmap); |
| 318 | if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) |
| 319 | radix_tree_iter_tag_clear(root, &iter, |
| 320 | IDR_FREE); |
| 321 | } else { |
| 322 | new += bit; |
| 323 | if (new < 0) |
| 324 | return -ENOSPC; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 325 | if (ebit < BITS_PER_LONG) { |
| 326 | bitmap = (void *)((1UL << ebit) | |
| 327 | RADIX_TREE_EXCEPTIONAL_ENTRY); |
| 328 | radix_tree_iter_replace(root, &iter, slot, |
| 329 | bitmap); |
| 330 | *id = new; |
| 331 | return 0; |
| 332 | } |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 333 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 334 | if (!bitmap) |
| 335 | return -EAGAIN; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 336 | memset(bitmap, 0, sizeof(*bitmap)); |
| 337 | __set_bit(bit, bitmap->bitmap); |
| 338 | radix_tree_iter_replace(root, &iter, slot, bitmap); |
| 339 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 340 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 341 | *id = new; |
| 342 | return 0; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 343 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 344 | } |
| 345 | EXPORT_SYMBOL(ida_get_new_above); |
| 346 | |
| 347 | /** |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 348 | * ida_remove - Free the given ID |
| 349 | * @ida: ida handle |
| 350 | * @id: ID to free |
| 351 | * |
| 352 | * This function should not be called at the same time as ida_get_new_above(). |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 353 | */ |
| 354 | void ida_remove(struct ida *ida, int id) |
| 355 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 356 | unsigned long index = id / IDA_BITMAP_BITS; |
| 357 | unsigned offset = id % IDA_BITMAP_BITS; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 358 | struct ida_bitmap *bitmap; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 359 | unsigned long *btmp; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 360 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 361 | void __rcu **slot; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 362 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 363 | slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); |
| 364 | if (!slot) |
Lai Jiangshan | 8f9f665 | 2014-06-06 14:37:11 -0700 | [diff] [blame] | 365 | goto err; |
| 366 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 367 | bitmap = rcu_dereference_raw(*slot); |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 368 | if (radix_tree_exception(bitmap)) { |
| 369 | btmp = (unsigned long *)slot; |
| 370 | offset += RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 371 | if (offset >= BITS_PER_LONG) |
| 372 | goto err; |
| 373 | } else { |
| 374 | btmp = bitmap->bitmap; |
| 375 | } |
| 376 | if (!test_bit(offset, btmp)) |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 377 | goto err; |
| 378 | |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 379 | __clear_bit(offset, btmp); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 380 | radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 381 | if (radix_tree_exception(bitmap)) { |
| 382 | if (rcu_dereference_raw(*slot) == |
| 383 | (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) |
| 384 | radix_tree_iter_delete(&ida->ida_rt, &iter, slot); |
| 385 | } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 386 | kfree(bitmap); |
| 387 | radix_tree_iter_delete(&ida->ida_rt, &iter, slot); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 388 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 389 | return; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 390 | err: |
Jean Delvare | dd04b45 | 2013-07-03 15:08:47 -0700 | [diff] [blame] | 391 | WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 392 | } |
| 393 | EXPORT_SYMBOL(ida_remove); |
| 394 | |
| 395 | /** |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 396 | * ida_destroy - Free the contents of an ida |
| 397 | * @ida: ida handle |
| 398 | * |
| 399 | * Calling this function releases all resources associated with an IDA. When |
| 400 | * this call returns, the IDA is empty and can be reused or freed. The caller |
| 401 | * should not allow ida_remove() or ida_get_new_above() to be called at the |
| 402 | * same time. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 403 | */ |
| 404 | void ida_destroy(struct ida *ida) |
| 405 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 406 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 407 | void __rcu **slot; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 408 | |
| 409 | radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { |
| 410 | struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 411 | if (!radix_tree_exception(bitmap)) |
| 412 | kfree(bitmap); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 413 | radix_tree_iter_delete(&ida->ida_rt, &iter, slot); |
| 414 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 415 | } |
| 416 | EXPORT_SYMBOL(ida_destroy); |
| 417 | |
| 418 | /** |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 419 | * ida_simple_get - get a new id. |
| 420 | * @ida: the (initialized) ida. |
| 421 | * @start: the minimum id (inclusive, < 0x8000000) |
| 422 | * @end: the maximum id (exclusive, < 0x8000000 or 0) |
| 423 | * @gfp_mask: memory allocation flags |
| 424 | * |
| 425 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. |
| 426 | * On memory allocation failure, returns -ENOMEM. |
| 427 | * |
Daniel Vetter | a2ef947 | 2016-12-12 16:46:20 -0800 | [diff] [blame] | 428 | * Compared to ida_get_new_above() this function does its own locking, and |
| 429 | * should be used unless there are special requirements. |
| 430 | * |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 431 | * Use ida_simple_remove() to get rid of an id. |
| 432 | */ |
| 433 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
| 434 | gfp_t gfp_mask) |
| 435 | { |
| 436 | int ret, id; |
| 437 | unsigned int max; |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 438 | unsigned long flags; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 439 | |
| 440 | BUG_ON((int)start < 0); |
| 441 | BUG_ON((int)end < 0); |
| 442 | |
| 443 | if (end == 0) |
| 444 | max = 0x80000000; |
| 445 | else { |
| 446 | BUG_ON(end < start); |
| 447 | max = end - 1; |
| 448 | } |
| 449 | |
| 450 | again: |
| 451 | if (!ida_pre_get(ida, gfp_mask)) |
| 452 | return -ENOMEM; |
| 453 | |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 454 | spin_lock_irqsave(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 455 | ret = ida_get_new_above(ida, start, &id); |
| 456 | if (!ret) { |
| 457 | if (id > max) { |
| 458 | ida_remove(ida, id); |
| 459 | ret = -ENOSPC; |
| 460 | } else { |
| 461 | ret = id; |
| 462 | } |
| 463 | } |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 464 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 465 | |
| 466 | if (unlikely(ret == -EAGAIN)) |
| 467 | goto again; |
| 468 | |
| 469 | return ret; |
| 470 | } |
| 471 | EXPORT_SYMBOL(ida_simple_get); |
| 472 | |
| 473 | /** |
| 474 | * ida_simple_remove - remove an allocated id. |
| 475 | * @ida: the (initialized) ida. |
| 476 | * @id: the id returned by ida_simple_get. |
Daniel Vetter | a2ef947 | 2016-12-12 16:46:20 -0800 | [diff] [blame] | 477 | * |
| 478 | * Use to release an id allocated with ida_simple_get(). |
| 479 | * |
| 480 | * Compared to ida_remove() this function does its own locking, and should be |
| 481 | * used unless there are special requirements. |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 482 | */ |
| 483 | void ida_simple_remove(struct ida *ida, unsigned int id) |
| 484 | { |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 485 | unsigned long flags; |
| 486 | |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 487 | BUG_ON((int)id < 0); |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 488 | spin_lock_irqsave(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 489 | ida_remove(ida, id); |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 490 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 491 | } |
| 492 | EXPORT_SYMBOL(ida_simple_remove); |