Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/linux/idr.h |
| 3 | * |
| 4 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
| 5 | * Copyright (C) 2002 by Concurrent Computer Corporation |
| 6 | * Distributed under the GNU GPL license version 2. |
| 7 | * |
| 8 | * Small id to pointer translation service avoiding fixed sized |
| 9 | * tables. |
| 10 | */ |
Luben Tuikov | f668ab1 | 2005-11-08 17:14:08 +0100 | [diff] [blame] | 11 | |
| 12 | #ifndef __IDR_H__ |
| 13 | #define __IDR_H__ |
| 14 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 15 | #include <linux/radix-tree.h> |
| 16 | #include <linux/gfp.h> |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 17 | #include <linux/percpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | struct idr { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 20 | struct radix_tree_root idr_rt; |
| 21 | unsigned int idr_next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | }; |
| 23 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 24 | /* |
| 25 | * The IDR API does not expose the tagging functionality of the radix tree |
| 26 | * to users. Use tag 0 to track whether a node has free space below it. |
| 27 | */ |
| 28 | #define IDR_FREE 0 |
| 29 | |
| 30 | /* Set the IDR flag and the IDR_FREE tag */ |
| 31 | #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) |
| 32 | |
| 33 | #define IDR_INIT \ |
Tejun Heo | 4106eca | 2013-02-27 17:03:51 -0800 | [diff] [blame] | 34 | { \ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 35 | .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | } |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 37 | #define DEFINE_IDR(name) struct idr name = IDR_INIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 39 | /** |
Matthew Wilcox | 4443061 | 2016-12-14 15:09:19 -0800 | [diff] [blame] | 40 | * idr_get_cursor - Return the current position of the cyclic allocator |
| 41 | * @idr: idr handle |
| 42 | * |
| 43 | * The value returned is the value that will be next returned from |
| 44 | * idr_alloc_cyclic() if it is free (otherwise the search will start from |
| 45 | * this position). |
| 46 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 47 | static inline unsigned int idr_get_cursor(const struct idr *idr) |
Matthew Wilcox | 4443061 | 2016-12-14 15:09:19 -0800 | [diff] [blame] | 48 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 49 | return READ_ONCE(idr->idr_next); |
Matthew Wilcox | 4443061 | 2016-12-14 15:09:19 -0800 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | /** |
| 53 | * idr_set_cursor - Set the current position of the cyclic allocator |
| 54 | * @idr: idr handle |
| 55 | * @val: new position |
| 56 | * |
| 57 | * The next call to idr_alloc_cyclic() will return @val if it is free |
| 58 | * (otherwise the search will start from this position). |
| 59 | */ |
| 60 | static inline void idr_set_cursor(struct idr *idr, unsigned int val) |
| 61 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 62 | WRITE_ONCE(idr->idr_next, val); |
Matthew Wilcox | 4443061 | 2016-12-14 15:09:19 -0800 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /** |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 66 | * DOC: idr sync |
Nadia Derbey | f9c46d6 | 2008-07-25 01:48:01 -0700 | [diff] [blame] | 67 | * idr synchronization (stolen from radix-tree.h) |
| 68 | * |
| 69 | * idr_find() is able to be called locklessly, using RCU. The caller must |
| 70 | * ensure calls to this function are made within rcu_read_lock() regions. |
| 71 | * Other readers (lock-free or otherwise) and modifications may be running |
| 72 | * concurrently. |
| 73 | * |
| 74 | * It is still required that the caller manage the synchronization and |
| 75 | * lifetimes of the items. So if RCU lock-free lookups are used, typically |
| 76 | * this would mean that the items have their own locks, or are amenable to |
| 77 | * lock-free access; and that the items are freed by RCU (or only freed after |
| 78 | * having been deleted from the idr tree *and* a synchronize_rcu() grace |
| 79 | * period). |
| 80 | */ |
| 81 | |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 82 | void idr_preload(gfp_t gfp_mask); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 83 | |
| 84 | int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, |
| 85 | unsigned long start, unsigned long end, gfp_t gfp, |
| 86 | bool ext); |
| 87 | |
| 88 | /** |
| 89 | * idr_alloc - allocate an id |
| 90 | * @idr: idr handle |
| 91 | * @ptr: pointer to be associated with the new id |
| 92 | * @start: the minimum id (inclusive) |
| 93 | * @end: the maximum id (exclusive) |
| 94 | * @gfp: memory allocation flags |
| 95 | * |
| 96 | * Allocates an unused ID in the range [start, end). Returns -ENOSPC |
| 97 | * if there are no unused IDs in that range. |
| 98 | * |
| 99 | * Note that @end is treated as max when <= 0. This is to always allow |
| 100 | * using @start + N as @end as long as N is inside integer range. |
| 101 | * |
| 102 | * Simultaneous modifications to the @idr are not allowed and should be |
| 103 | * prevented by the user, usually with a lock. idr_alloc() may be called |
| 104 | * concurrently with read-only accesses to the @idr, such as idr_find() and |
| 105 | * idr_for_each_entry(). |
| 106 | */ |
| 107 | static inline int idr_alloc(struct idr *idr, void *ptr, |
| 108 | int start, int end, gfp_t gfp) |
| 109 | { |
| 110 | unsigned long id; |
| 111 | int ret; |
| 112 | |
| 113 | if (WARN_ON_ONCE(start < 0)) |
| 114 | return -EINVAL; |
| 115 | |
| 116 | ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false); |
| 117 | |
| 118 | if (ret) |
| 119 | return ret; |
| 120 | |
| 121 | return id; |
| 122 | } |
| 123 | |
| 124 | static inline int idr_alloc_ext(struct idr *idr, void *ptr, |
| 125 | unsigned long *index, |
| 126 | unsigned long start, |
| 127 | unsigned long end, |
| 128 | gfp_t gfp) |
| 129 | { |
| 130 | return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true); |
| 131 | } |
| 132 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 133 | int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); |
| 134 | int idr_for_each(const struct idr *, |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 135 | int (*fn)(int id, void *p, void *data), void *data); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 136 | void *idr_get_next(struct idr *, int *nextid); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 137 | void *idr_get_next_ext(struct idr *idr, unsigned long *nextid); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 138 | void *idr_replace(struct idr *, void *, int id); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 139 | void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 140 | void idr_destroy(struct idr *); |
| 141 | |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 142 | static inline void *idr_remove_ext(struct idr *idr, unsigned long id) |
| 143 | { |
| 144 | return radix_tree_delete_item(&idr->idr_rt, id, NULL); |
| 145 | } |
| 146 | |
Matthew Wilcox | d3e709e | 2016-12-22 13:30:22 -0500 | [diff] [blame] | 147 | static inline void *idr_remove(struct idr *idr, int id) |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 148 | { |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 149 | return idr_remove_ext(idr, id); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | static inline void idr_init(struct idr *idr) |
| 153 | { |
| 154 | INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); |
| 155 | idr->idr_next = 0; |
| 156 | } |
| 157 | |
| 158 | static inline bool idr_is_empty(const struct idr *idr) |
| 159 | { |
| 160 | return radix_tree_empty(&idr->idr_rt) && |
| 161 | radix_tree_tagged(&idr->idr_rt, IDR_FREE); |
| 162 | } |
Luben Tuikov | f668ab1 | 2005-11-08 17:14:08 +0100 | [diff] [blame] | 163 | |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 164 | /** |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 165 | * idr_preload_end - end preload section started with idr_preload() |
| 166 | * |
| 167 | * Each idr_preload() should be matched with an invocation of this |
| 168 | * function. See idr_preload() for details. |
| 169 | */ |
| 170 | static inline void idr_preload_end(void) |
| 171 | { |
| 172 | preempt_enable(); |
| 173 | } |
| 174 | |
| 175 | /** |
Tejun Heo | 0ffc2a9 | 2013-02-27 17:05:08 -0800 | [diff] [blame] | 176 | * idr_find - return pointer for given id |
Randy Dunlap | 5857f70 | 2013-03-04 14:32:54 -0800 | [diff] [blame] | 177 | * @idr: idr handle |
Tejun Heo | 0ffc2a9 | 2013-02-27 17:05:08 -0800 | [diff] [blame] | 178 | * @id: lookup key |
| 179 | * |
| 180 | * Return the pointer given the id it has been registered with. A %NULL |
| 181 | * return indicates that @id is not valid or you passed %NULL in |
| 182 | * idr_get_new(). |
| 183 | * |
| 184 | * This function can be called under rcu_read_lock(), given that the leaf |
| 185 | * pointers lifetimes are correctly managed. |
| 186 | */ |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 187 | static inline void *idr_find_ext(const struct idr *idr, unsigned long id) |
Tejun Heo | 0ffc2a9 | 2013-02-27 17:05:08 -0800 | [diff] [blame] | 188 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 189 | return radix_tree_lookup(&idr->idr_rt, id); |
Tejun Heo | 0ffc2a9 | 2013-02-27 17:05:08 -0800 | [diff] [blame] | 190 | } |
| 191 | |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 192 | static inline void *idr_find(const struct idr *idr, int id) |
| 193 | { |
| 194 | return idr_find_ext(idr, id); |
| 195 | } |
| 196 | |
Tejun Heo | 0ffc2a9 | 2013-02-27 17:05:08 -0800 | [diff] [blame] | 197 | /** |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 198 | * idr_for_each_entry - iterate over an idr's elements of a given type |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 199 | * @idr: idr handle |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 200 | * @entry: the type * to use as cursor |
| 201 | * @id: id entry's key |
George Spelvin | b949be5 | 2013-03-27 14:08:33 +0100 | [diff] [blame] | 202 | * |
| 203 | * @entry and @id do not need to be initialized before the loop, and |
| 204 | * after normal terminatinon @entry is left with the value NULL. This |
| 205 | * is convenient for a "not found" value. |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 206 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 207 | #define idr_for_each_entry(idr, entry, id) \ |
| 208 | for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 209 | #define idr_for_each_entry_ext(idr, entry, id) \ |
| 210 | for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id) |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 211 | |
Andreas Gruenbacher | a55bbd3 | 2014-08-28 13:31:14 +0200 | [diff] [blame] | 212 | /** |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 213 | * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type |
| 214 | * @idr: idr handle |
Andreas Gruenbacher | a55bbd3 | 2014-08-28 13:31:14 +0200 | [diff] [blame] | 215 | * @entry: the type * to use as cursor |
| 216 | * @id: id entry's key |
| 217 | * |
| 218 | * Continue to iterate over list of given type, continuing after |
| 219 | * the current position. |
| 220 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 221 | #define idr_for_each_entry_continue(idr, entry, id) \ |
| 222 | for ((entry) = idr_get_next((idr), &(id)); \ |
Andreas Gruenbacher | a55bbd3 | 2014-08-28 13:31:14 +0200 | [diff] [blame] | 223 | entry; \ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 224 | ++id, (entry) = idr_get_next((idr), &(id))) |
Andreas Gruenbacher | a55bbd3 | 2014-08-28 13:31:14 +0200 | [diff] [blame] | 225 | |
Tejun Heo | c8615d3 | 2013-03-13 14:59:42 -0700 | [diff] [blame] | 226 | /* |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 227 | * IDA - IDR based id allocator, use when translation from id to |
| 228 | * pointer isn't necessary. |
| 229 | */ |
| 230 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 231 | #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) |
Namhyung Kim | ed9f524 | 2010-09-16 01:30:19 +0900 | [diff] [blame] | 232 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 233 | |
| 234 | struct ida_bitmap { |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 235 | unsigned long bitmap[IDA_BITMAP_LONGS]; |
| 236 | }; |
| 237 | |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 238 | DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); |
| 239 | |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 240 | struct ida { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 241 | struct radix_tree_root ida_rt; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 242 | }; |
| 243 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 244 | #define IDA_INIT { \ |
| 245 | .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ |
| 246 | } |
| 247 | #define DEFINE_IDA(name) struct ida name = IDA_INIT |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 248 | |
| 249 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask); |
| 250 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 251 | void ida_remove(struct ida *ida, int id); |
| 252 | void ida_destroy(struct ida *ida); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 253 | |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 254 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
| 255 | gfp_t gfp_mask); |
| 256 | void ida_simple_remove(struct ida *ida, unsigned int id); |
| 257 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 258 | static inline void ida_init(struct ida *ida) |
| 259 | { |
| 260 | INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 261 | } |
| 262 | |
Philipp Reisner | 9749f30 | 2011-07-20 14:59:37 +0200 | [diff] [blame] | 263 | /** |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 264 | * ida_get_new - allocate new ID |
| 265 | * @ida: idr handle |
| 266 | * @p_id: pointer to the allocated handle |
| 267 | * |
| 268 | * Simple wrapper around ida_get_new_above() w/ @starting_id of zero. |
Philipp Reisner | 9749f30 | 2011-07-20 14:59:37 +0200 | [diff] [blame] | 269 | */ |
Tejun Heo | 49038ef | 2013-02-27 17:03:52 -0800 | [diff] [blame] | 270 | static inline int ida_get_new(struct ida *ida, int *p_id) |
| 271 | { |
| 272 | return ida_get_new_above(ida, 0, p_id); |
| 273 | } |
| 274 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 275 | static inline bool ida_is_empty(const struct ida *ida) |
Matthew Wilcox | 99c4940 | 2016-12-14 15:09:13 -0800 | [diff] [blame] | 276 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 277 | return radix_tree_empty(&ida->ida_rt); |
Matthew Wilcox | 99c4940 | 2016-12-14 15:09:13 -0800 | [diff] [blame] | 278 | } |
Luben Tuikov | f668ab1 | 2005-11-08 17:14:08 +0100 | [diff] [blame] | 279 | #endif /* __IDR_H__ */ |