Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/mempool.c |
| 3 | * |
| 4 | * memory buffer pool support. Such pools are mostly used |
| 5 | * for guaranteed, deadlock-free memory allocations during |
| 6 | * extreme VM load. |
| 7 | * |
| 8 | * started by Ingo Molnar, Copyright (C) 2001 |
David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 9 | * debugging by David Rientjes, Copyright (C) 2015 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/slab.h> |
David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 14 | #include <linux/highmem.h> |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 15 | #include <linux/kasan.h> |
Catalin Marinas | 1741196 | 2014-06-06 14:38:19 -0700 | [diff] [blame] | 16 | #include <linux/kmemleak.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 17 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/mempool.h> |
| 19 | #include <linux/blkdev.h> |
| 20 | #include <linux/writeback.h> |
David Rientjes | e244c9e | 2015-04-15 16:14:14 -0700 | [diff] [blame] | 21 | #include "slab.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 23 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
| 24 | static void poison_error(mempool_t *pool, void *element, size_t size, |
| 25 | size_t byte) |
| 26 | { |
| 27 | const int nr = pool->curr_nr; |
| 28 | const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); |
| 29 | const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); |
| 30 | int i; |
| 31 | |
| 32 | pr_err("BUG: mempool element poison mismatch\n"); |
| 33 | pr_err("Mempool %p size %zu\n", pool, size); |
| 34 | pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); |
| 35 | for (i = start; i < end; i++) |
| 36 | pr_cont("%x ", *(u8 *)(element + i)); |
| 37 | pr_cont("%s\n", end < size ? "..." : ""); |
| 38 | dump_stack(); |
| 39 | } |
| 40 | |
| 41 | static void __check_element(mempool_t *pool, void *element, size_t size) |
| 42 | { |
| 43 | u8 *obj = element; |
| 44 | size_t i; |
| 45 | |
| 46 | for (i = 0; i < size; i++) { |
| 47 | u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; |
| 48 | |
| 49 | if (obj[i] != exp) { |
| 50 | poison_error(pool, element, size, i); |
| 51 | return; |
| 52 | } |
| 53 | } |
| 54 | memset(obj, POISON_INUSE, size); |
| 55 | } |
| 56 | |
| 57 | static void check_element(mempool_t *pool, void *element) |
| 58 | { |
| 59 | /* Mempools backed by slab allocator */ |
| 60 | if (pool->free == mempool_free_slab || pool->free == mempool_kfree) |
| 61 | __check_element(pool, element, ksize(element)); |
| 62 | |
| 63 | /* Mempools backed by page allocator */ |
| 64 | if (pool->free == mempool_free_pages) { |
| 65 | int order = (int)(long)pool->pool_data; |
| 66 | void *addr = kmap_atomic((struct page *)element); |
| 67 | |
| 68 | __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); |
| 69 | kunmap_atomic(addr); |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | static void __poison_element(void *element, size_t size) |
| 74 | { |
| 75 | u8 *obj = element; |
| 76 | |
| 77 | memset(obj, POISON_FREE, size - 1); |
| 78 | obj[size - 1] = POISON_END; |
| 79 | } |
| 80 | |
| 81 | static void poison_element(mempool_t *pool, void *element) |
| 82 | { |
| 83 | /* Mempools backed by slab allocator */ |
| 84 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
| 85 | __poison_element(element, ksize(element)); |
| 86 | |
| 87 | /* Mempools backed by page allocator */ |
| 88 | if (pool->alloc == mempool_alloc_pages) { |
| 89 | int order = (int)(long)pool->pool_data; |
| 90 | void *addr = kmap_atomic((struct page *)element); |
| 91 | |
| 92 | __poison_element(addr, 1UL << (PAGE_SHIFT + order)); |
| 93 | kunmap_atomic(addr); |
| 94 | } |
| 95 | } |
| 96 | #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ |
| 97 | static inline void check_element(mempool_t *pool, void *element) |
| 98 | { |
| 99 | } |
| 100 | static inline void poison_element(mempool_t *pool, void *element) |
| 101 | { |
| 102 | } |
| 103 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ |
| 104 | |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 105 | static void kasan_poison_element(mempool_t *pool, void *element) |
| 106 | { |
Andrey Ryabinin | 9b75a86 | 2016-06-24 14:49:34 -0700 | [diff] [blame] | 107 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
| 108 | kasan_poison_kfree(element); |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 109 | if (pool->alloc == mempool_alloc_pages) |
| 110 | kasan_free_pages(element, (unsigned long)pool->pool_data); |
| 111 | } |
| 112 | |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 113 | static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 114 | { |
Andrey Ryabinin | 9b75a86 | 2016-06-24 14:49:34 -0700 | [diff] [blame] | 115 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
| 116 | kasan_unpoison_slab(element); |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 117 | if (pool->alloc == mempool_alloc_pages) |
| 118 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); |
| 119 | } |
| 120 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | static void add_element(mempool_t *pool, void *element) |
| 122 | { |
| 123 | BUG_ON(pool->curr_nr >= pool->min_nr); |
David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 124 | poison_element(pool, element); |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 125 | kasan_poison_element(pool, element); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | pool->elements[pool->curr_nr++] = element; |
| 127 | } |
| 128 | |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 129 | static void *remove_element(mempool_t *pool, gfp_t flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | { |
David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 131 | void *element = pool->elements[--pool->curr_nr]; |
| 132 | |
| 133 | BUG_ON(pool->curr_nr < 0); |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 134 | kasan_unpoison_element(pool, element, flags); |
Matthew Dawson | 7640131 | 2016-03-11 13:08:07 -0800 | [diff] [blame] | 135 | check_element(pool, element); |
David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 136 | return element; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 139 | /** |
| 140 | * mempool_destroy - deallocate a memory pool |
| 141 | * @pool: pointer to the memory pool which was allocated via |
| 142 | * mempool_create(). |
| 143 | * |
| 144 | * Free all reserved elements in @pool and @pool itself. This function |
| 145 | * only sleeps if the free_fn() function sleeps. |
| 146 | */ |
| 147 | void mempool_destroy(mempool_t *pool) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | { |
Sergey Senozhatsky | 4e3ca3e | 2015-09-08 15:00:53 -0700 | [diff] [blame] | 149 | if (unlikely(!pool)) |
| 150 | return; |
| 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | while (pool->curr_nr) { |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 153 | void *element = remove_element(pool, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | pool->free(element, pool->pool_data); |
| 155 | } |
| 156 | kfree(pool->elements); |
| 157 | kfree(pool); |
| 158 | } |
Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 159 | EXPORT_SYMBOL(mempool_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
| 161 | /** |
| 162 | * mempool_create - create a memory pool |
| 163 | * @min_nr: the minimum number of elements guaranteed to be |
| 164 | * allocated for this pool. |
| 165 | * @alloc_fn: user-defined element-allocation function. |
| 166 | * @free_fn: user-defined element-freeing function. |
| 167 | * @pool_data: optional private data available to the user-defined functions. |
| 168 | * |
| 169 | * this function creates and allocates a guaranteed size, preallocated |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 170 | * memory pool. The pool can be used from the mempool_alloc() and mempool_free() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | * functions. This function might sleep. Both the alloc_fn() and the free_fn() |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 172 | * functions might sleep - as long as the mempool_alloc() function is not called |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | * from IRQ contexts. |
| 174 | */ |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 175 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | mempool_free_t *free_fn, void *pool_data) |
| 177 | { |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 178 | return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, |
| 179 | GFP_KERNEL, NUMA_NO_NODE); |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 180 | } |
| 181 | EXPORT_SYMBOL(mempool_create); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 183 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 184 | mempool_free_t *free_fn, void *pool_data, |
| 185 | gfp_t gfp_mask, int node_id) |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 186 | { |
| 187 | mempool_t *pool; |
Joe Perches | 7b5219d | 2013-09-11 14:23:07 -0700 | [diff] [blame] | 188 | pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | if (!pool) |
| 190 | return NULL; |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 191 | pool->elements = kmalloc_node(min_nr * sizeof(void *), |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 192 | gfp_mask, node_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | if (!pool->elements) { |
| 194 | kfree(pool); |
| 195 | return NULL; |
| 196 | } |
| 197 | spin_lock_init(&pool->lock); |
| 198 | pool->min_nr = min_nr; |
| 199 | pool->pool_data = pool_data; |
| 200 | init_waitqueue_head(&pool->wait); |
| 201 | pool->alloc = alloc_fn; |
| 202 | pool->free = free_fn; |
| 203 | |
| 204 | /* |
| 205 | * First pre-allocate the guaranteed number of buffers. |
| 206 | */ |
| 207 | while (pool->curr_nr < pool->min_nr) { |
| 208 | void *element; |
| 209 | |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 210 | element = pool->alloc(gfp_mask, pool->pool_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | if (unlikely(!element)) { |
Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 212 | mempool_destroy(pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | return NULL; |
| 214 | } |
| 215 | add_element(pool, element); |
| 216 | } |
| 217 | return pool; |
| 218 | } |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 219 | EXPORT_SYMBOL(mempool_create_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
| 221 | /** |
| 222 | * mempool_resize - resize an existing memory pool |
| 223 | * @pool: pointer to the memory pool which was allocated via |
| 224 | * mempool_create(). |
| 225 | * @new_min_nr: the new minimum number of elements guaranteed to be |
| 226 | * allocated for this pool. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | * |
| 228 | * This function shrinks/grows the pool. In the case of growing, |
| 229 | * it cannot be guaranteed that the pool will be grown to the new |
| 230 | * size immediately, but new mempool_free() calls will refill it. |
David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 231 | * This function may sleep. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | * |
| 233 | * Note, the caller must guarantee that no mempool_destroy is called |
| 234 | * while this function is running. mempool_alloc() & mempool_free() |
| 235 | * might be called (eg. from IRQ contexts) while this function executes. |
| 236 | */ |
David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 237 | int mempool_resize(mempool_t *pool, int new_min_nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | { |
| 239 | void *element; |
| 240 | void **new_elements; |
| 241 | unsigned long flags; |
| 242 | |
| 243 | BUG_ON(new_min_nr <= 0); |
David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 244 | might_sleep(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
| 246 | spin_lock_irqsave(&pool->lock, flags); |
| 247 | if (new_min_nr <= pool->min_nr) { |
| 248 | while (new_min_nr < pool->curr_nr) { |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 249 | element = remove_element(pool, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | spin_unlock_irqrestore(&pool->lock, flags); |
| 251 | pool->free(element, pool->pool_data); |
| 252 | spin_lock_irqsave(&pool->lock, flags); |
| 253 | } |
| 254 | pool->min_nr = new_min_nr; |
| 255 | goto out_unlock; |
| 256 | } |
| 257 | spin_unlock_irqrestore(&pool->lock, flags); |
| 258 | |
| 259 | /* Grow the pool */ |
David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 260 | new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), |
| 261 | GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | if (!new_elements) |
| 263 | return -ENOMEM; |
| 264 | |
| 265 | spin_lock_irqsave(&pool->lock, flags); |
| 266 | if (unlikely(new_min_nr <= pool->min_nr)) { |
| 267 | /* Raced, other resize will do our work */ |
| 268 | spin_unlock_irqrestore(&pool->lock, flags); |
| 269 | kfree(new_elements); |
| 270 | goto out; |
| 271 | } |
| 272 | memcpy(new_elements, pool->elements, |
| 273 | pool->curr_nr * sizeof(*new_elements)); |
| 274 | kfree(pool->elements); |
| 275 | pool->elements = new_elements; |
| 276 | pool->min_nr = new_min_nr; |
| 277 | |
| 278 | while (pool->curr_nr < pool->min_nr) { |
| 279 | spin_unlock_irqrestore(&pool->lock, flags); |
David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 280 | element = pool->alloc(GFP_KERNEL, pool->pool_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | if (!element) |
| 282 | goto out; |
| 283 | spin_lock_irqsave(&pool->lock, flags); |
| 284 | if (pool->curr_nr < pool->min_nr) { |
| 285 | add_element(pool, element); |
| 286 | } else { |
| 287 | spin_unlock_irqrestore(&pool->lock, flags); |
| 288 | pool->free(element, pool->pool_data); /* Raced */ |
| 289 | goto out; |
| 290 | } |
| 291 | } |
| 292 | out_unlock: |
| 293 | spin_unlock_irqrestore(&pool->lock, flags); |
| 294 | out: |
| 295 | return 0; |
| 296 | } |
| 297 | EXPORT_SYMBOL(mempool_resize); |
| 298 | |
| 299 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | * mempool_alloc - allocate an element from a specific memory pool |
| 301 | * @pool: pointer to the memory pool which was allocated via |
| 302 | * mempool_create(). |
| 303 | * @gfp_mask: the usual allocation bitmask. |
| 304 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 305 | * this function only sleeps if the alloc_fn() function sleeps or |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | * returns NULL. Note that due to preallocation, this function |
| 307 | * *never* fails when called from process contexts. (it might |
| 308 | * fail if called from an IRQ context.) |
Michal Hocko | 4e390b2 | 2016-07-28 15:48:44 -0700 | [diff] [blame] | 309 | * Note: using __GFP_ZERO is not supported. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | */ |
David Rientjes | f9054c7 | 2016-03-17 14:19:19 -0700 | [diff] [blame] | 311 | void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | { |
| 313 | void *element; |
| 314 | unsigned long flags; |
Benjamin LaHaise | 01890a4 | 2005-06-23 00:10:01 -0700 | [diff] [blame] | 315 | wait_queue_t wait; |
Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 316 | gfp_t gfp_temp; |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 317 | |
Sebastian Ott | 8bf8fcb | 2014-06-04 16:07:00 -0700 | [diff] [blame] | 318 | VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 319 | might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 320 | |
Michal Hocko | 4e390b2 | 2016-07-28 15:48:44 -0700 | [diff] [blame] | 321 | gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 322 | gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ |
| 323 | gfp_mask |= __GFP_NOWARN; /* failures are OK */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 325 | gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | repeat_alloc: |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 328 | |
| 329 | element = pool->alloc(gfp_temp, pool->pool_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | if (likely(element != NULL)) |
| 331 | return element; |
| 332 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | spin_lock_irqsave(&pool->lock, flags); |
| 334 | if (likely(pool->curr_nr)) { |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 335 | element = remove_element(pool, gfp_temp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | spin_unlock_irqrestore(&pool->lock, flags); |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 337 | /* paired with rmb in mempool_free(), read comment there */ |
| 338 | smp_wmb(); |
Catalin Marinas | 1741196 | 2014-06-06 14:38:19 -0700 | [diff] [blame] | 339 | /* |
| 340 | * Update the allocation stack trace as this is more useful |
| 341 | * for debugging. |
| 342 | */ |
| 343 | kmemleak_update_trace(element); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | return element; |
| 345 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | |
Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 347 | /* |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 348 | * We use gfp mask w/o direct reclaim or IO for the first round. If |
Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 349 | * alloc failed with that and @pool was empty, retry immediately. |
| 350 | */ |
Michal Hocko | 4e390b2 | 2016-07-28 15:48:44 -0700 | [diff] [blame] | 351 | if (gfp_temp != gfp_mask) { |
Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 352 | spin_unlock_irqrestore(&pool->lock, flags); |
| 353 | gfp_temp = gfp_mask; |
| 354 | goto repeat_alloc; |
| 355 | } |
| 356 | |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 357 | /* We must not sleep if !__GFP_DIRECT_RECLAIM */ |
| 358 | if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 359 | spin_unlock_irqrestore(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | return NULL; |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 361 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 363 | /* Let's wait for someone else to return an element to @pool */ |
Benjamin LaHaise | 01890a4 | 2005-06-23 00:10:01 -0700 | [diff] [blame] | 364 | init_wait(&wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 367 | spin_unlock_irqrestore(&pool->lock, flags); |
| 368 | |
| 369 | /* |
| 370 | * FIXME: this should be io_schedule(). The timeout is there as a |
| 371 | * workaround for some DM problems in 2.6.18. |
| 372 | */ |
| 373 | io_schedule_timeout(5*HZ); |
| 374 | |
| 375 | finish_wait(&pool->wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | goto repeat_alloc; |
| 377 | } |
| 378 | EXPORT_SYMBOL(mempool_alloc); |
| 379 | |
| 380 | /** |
| 381 | * mempool_free - return an element to the pool. |
| 382 | * @element: pool element pointer. |
| 383 | * @pool: pointer to the memory pool which was allocated via |
| 384 | * mempool_create(). |
| 385 | * |
| 386 | * this function only sleeps if the free_fn() function sleeps. |
| 387 | */ |
| 388 | void mempool_free(void *element, mempool_t *pool) |
| 389 | { |
| 390 | unsigned long flags; |
| 391 | |
Rusty Russell | c80e7a8 | 2007-07-15 23:42:00 -0700 | [diff] [blame] | 392 | if (unlikely(element == NULL)) |
| 393 | return; |
| 394 | |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 395 | /* |
| 396 | * Paired with the wmb in mempool_alloc(). The preceding read is |
| 397 | * for @element and the following @pool->curr_nr. This ensures |
| 398 | * that the visible value of @pool->curr_nr is from after the |
| 399 | * allocation of @element. This is necessary for fringe cases |
| 400 | * where @element was passed to this task without going through |
| 401 | * barriers. |
| 402 | * |
| 403 | * For example, assume @p is %NULL at the beginning and one task |
| 404 | * performs "p = mempool_alloc(...);" while another task is doing |
| 405 | * "while (!p) cpu_relax(); mempool_free(p, ...);". This function |
| 406 | * may end up using curr_nr value which is from before allocation |
| 407 | * of @p without the following rmb. |
| 408 | */ |
| 409 | smp_rmb(); |
| 410 | |
| 411 | /* |
| 412 | * For correctness, we need a test which is guaranteed to trigger |
| 413 | * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr |
| 414 | * without locking achieves that and refilling as soon as possible |
| 415 | * is desirable. |
| 416 | * |
| 417 | * Because curr_nr visible here is always a value after the |
| 418 | * allocation of @element, any task which decremented curr_nr below |
| 419 | * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets |
| 420 | * incremented to min_nr afterwards. If curr_nr gets incremented |
| 421 | * to min_nr after the allocation of @element, the elements |
| 422 | * allocated after that are subject to the same guarantee. |
| 423 | * |
| 424 | * Waiters happen iff curr_nr is 0 and the above guarantee also |
| 425 | * ensures that there will be frees which return elements to the |
| 426 | * pool waking up the waiters. |
| 427 | */ |
Mikulas Patocka | eb9a3c6 | 2014-04-07 15:37:35 -0700 | [diff] [blame] | 428 | if (unlikely(pool->curr_nr < pool->min_nr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | spin_lock_irqsave(&pool->lock, flags); |
Mikulas Patocka | eb9a3c6 | 2014-04-07 15:37:35 -0700 | [diff] [blame] | 430 | if (likely(pool->curr_nr < pool->min_nr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | add_element(pool, element); |
| 432 | spin_unlock_irqrestore(&pool->lock, flags); |
| 433 | wake_up(&pool->wait); |
| 434 | return; |
| 435 | } |
| 436 | spin_unlock_irqrestore(&pool->lock, flags); |
| 437 | } |
| 438 | pool->free(element, pool->pool_data); |
| 439 | } |
| 440 | EXPORT_SYMBOL(mempool_free); |
| 441 | |
| 442 | /* |
| 443 | * A commonly used alloc and free fn. |
| 444 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 445 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | { |
Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 447 | struct kmem_cache *mem = pool_data; |
David Rientjes | e244c9e | 2015-04-15 16:14:14 -0700 | [diff] [blame] | 448 | VM_BUG_ON(mem->ctor); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | return kmem_cache_alloc(mem, gfp_mask); |
| 450 | } |
| 451 | EXPORT_SYMBOL(mempool_alloc_slab); |
| 452 | |
| 453 | void mempool_free_slab(void *element, void *pool_data) |
| 454 | { |
Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 455 | struct kmem_cache *mem = pool_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | kmem_cache_free(mem, element); |
| 457 | } |
| 458 | EXPORT_SYMBOL(mempool_free_slab); |
Matthew Dobson | 6e0678f | 2006-03-26 01:37:44 -0800 | [diff] [blame] | 459 | |
| 460 | /* |
Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 461 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory |
Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 462 | * specified by pool_data |
Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 463 | */ |
| 464 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) |
| 465 | { |
Figo.zhang | 5e2f89b | 2009-08-08 21:01:22 +0800 | [diff] [blame] | 466 | size_t size = (size_t)pool_data; |
Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 467 | return kmalloc(size, gfp_mask); |
| 468 | } |
| 469 | EXPORT_SYMBOL(mempool_kmalloc); |
| 470 | |
| 471 | void mempool_kfree(void *element, void *pool_data) |
| 472 | { |
| 473 | kfree(element); |
| 474 | } |
| 475 | EXPORT_SYMBOL(mempool_kfree); |
| 476 | |
| 477 | /* |
Matthew Dobson | 6e0678f | 2006-03-26 01:37:44 -0800 | [diff] [blame] | 478 | * A simple mempool-backed page allocator that allocates pages |
| 479 | * of the order specified by pool_data. |
| 480 | */ |
| 481 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) |
| 482 | { |
| 483 | int order = (int)(long)pool_data; |
| 484 | return alloc_pages(gfp_mask, order); |
| 485 | } |
| 486 | EXPORT_SYMBOL(mempool_alloc_pages); |
| 487 | |
| 488 | void mempool_free_pages(void *element, void *pool_data) |
| 489 | { |
| 490 | int order = (int)(long)pool_data; |
| 491 | __free_pages(element, order); |
| 492 | } |
| 493 | EXPORT_SYMBOL(mempool_free_pages); |