| Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* | 
 | 3 |  *  linux/mm/mempool.c | 
 | 4 |  * | 
 | 5 |  *  memory buffer pool support. Such pools are mostly used | 
 | 6 |  *  for guaranteed, deadlock-free memory allocations during | 
 | 7 |  *  extreme VM load. | 
 | 8 |  * | 
 | 9 |  *  started by Ingo Molnar, Copyright (C) 2001 | 
| David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 10 |  *  debugging by David Rientjes, Copyright (C) 2015 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  */ | 
 | 12 |  | 
 | 13 | #include <linux/mm.h> | 
 | 14 | #include <linux/slab.h> | 
| David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 15 | #include <linux/highmem.h> | 
| Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 16 | #include <linux/kasan.h> | 
| Catalin Marinas | 1741196 | 2014-06-06 14:38:19 -0700 | [diff] [blame] | 17 | #include <linux/kmemleak.h> | 
| Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 18 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/mempool.h> | 
 | 20 | #include <linux/blkdev.h> | 
 | 21 | #include <linux/writeback.h> | 
| David Rientjes | e244c9e | 2015-04-15 16:14:14 -0700 | [diff] [blame] | 22 | #include "slab.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
| David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 24 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) | 
 | 25 | static void poison_error(mempool_t *pool, void *element, size_t size, | 
 | 26 | 			 size_t byte) | 
 | 27 | { | 
 | 28 | 	const int nr = pool->curr_nr; | 
 | 29 | 	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); | 
 | 30 | 	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); | 
 | 31 | 	int i; | 
 | 32 |  | 
 | 33 | 	pr_err("BUG: mempool element poison mismatch\n"); | 
 | 34 | 	pr_err("Mempool %p size %zu\n", pool, size); | 
 | 35 | 	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); | 
 | 36 | 	for (i = start; i < end; i++) | 
 | 37 | 		pr_cont("%x ", *(u8 *)(element + i)); | 
 | 38 | 	pr_cont("%s\n", end < size ? "..." : ""); | 
 | 39 | 	dump_stack(); | 
 | 40 | } | 
 | 41 |  | 
 | 42 | static void __check_element(mempool_t *pool, void *element, size_t size) | 
 | 43 | { | 
 | 44 | 	u8 *obj = element; | 
 | 45 | 	size_t i; | 
 | 46 |  | 
 | 47 | 	for (i = 0; i < size; i++) { | 
 | 48 | 		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; | 
 | 49 |  | 
 | 50 | 		if (obj[i] != exp) { | 
 | 51 | 			poison_error(pool, element, size, i); | 
 | 52 | 			return; | 
 | 53 | 		} | 
 | 54 | 	} | 
 | 55 | 	memset(obj, POISON_INUSE, size); | 
 | 56 | } | 
 | 57 |  | 
 | 58 | static void check_element(mempool_t *pool, void *element) | 
 | 59 | { | 
 | 60 | 	/* Mempools backed by slab allocator */ | 
 | 61 | 	if (pool->free == mempool_free_slab || pool->free == mempool_kfree) | 
 | 62 | 		__check_element(pool, element, ksize(element)); | 
 | 63 |  | 
 | 64 | 	/* Mempools backed by page allocator */ | 
 | 65 | 	if (pool->free == mempool_free_pages) { | 
 | 66 | 		int order = (int)(long)pool->pool_data; | 
 | 67 | 		void *addr = kmap_atomic((struct page *)element); | 
 | 68 |  | 
 | 69 | 		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); | 
 | 70 | 		kunmap_atomic(addr); | 
 | 71 | 	} | 
 | 72 | } | 
 | 73 |  | 
 | 74 | static void __poison_element(void *element, size_t size) | 
 | 75 | { | 
 | 76 | 	u8 *obj = element; | 
 | 77 |  | 
 | 78 | 	memset(obj, POISON_FREE, size - 1); | 
 | 79 | 	obj[size - 1] = POISON_END; | 
 | 80 | } | 
 | 81 |  | 
 | 82 | static void poison_element(mempool_t *pool, void *element) | 
 | 83 | { | 
 | 84 | 	/* Mempools backed by slab allocator */ | 
 | 85 | 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | 
 | 86 | 		__poison_element(element, ksize(element)); | 
 | 87 |  | 
 | 88 | 	/* Mempools backed by page allocator */ | 
 | 89 | 	if (pool->alloc == mempool_alloc_pages) { | 
 | 90 | 		int order = (int)(long)pool->pool_data; | 
 | 91 | 		void *addr = kmap_atomic((struct page *)element); | 
 | 92 |  | 
 | 93 | 		__poison_element(addr, 1UL << (PAGE_SHIFT + order)); | 
 | 94 | 		kunmap_atomic(addr); | 
 | 95 | 	} | 
 | 96 | } | 
 | 97 | #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | 
 | 98 | static inline void check_element(mempool_t *pool, void *element) | 
 | 99 | { | 
 | 100 | } | 
 | 101 | static inline void poison_element(mempool_t *pool, void *element) | 
 | 102 | { | 
 | 103 | } | 
 | 104 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | 
 | 105 |  | 
| Dmitry Vyukov | 6860f63 | 2018-02-06 15:36:30 -0800 | [diff] [blame] | 106 | static __always_inline void kasan_poison_element(mempool_t *pool, void *element) | 
| Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 107 | { | 
| Andrey Ryabinin | 9b75a86 | 2016-06-24 14:49:34 -0700 | [diff] [blame] | 108 | 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | 
| Dmitry Vyukov | 6860f63 | 2018-02-06 15:36:30 -0800 | [diff] [blame] | 109 | 		kasan_poison_kfree(element, _RET_IP_); | 
| Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 110 | 	if (pool->alloc == mempool_alloc_pages) | 
 | 111 | 		kasan_free_pages(element, (unsigned long)pool->pool_data); | 
 | 112 | } | 
 | 113 |  | 
| Jia-Ju Bai | 8cded86 | 2018-08-17 15:45:22 -0700 | [diff] [blame] | 114 | static void kasan_unpoison_element(mempool_t *pool, void *element) | 
| Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 115 | { | 
| Andrey Ryabinin | 9b75a86 | 2016-06-24 14:49:34 -0700 | [diff] [blame] | 116 | 	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | 
 | 117 | 		kasan_unpoison_slab(element); | 
| Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 118 | 	if (pool->alloc == mempool_alloc_pages) | 
 | 119 | 		kasan_alloc_pages(element, (unsigned long)pool->pool_data); | 
 | 120 | } | 
 | 121 |  | 
| Dmitry Vyukov | 6860f63 | 2018-02-06 15:36:30 -0800 | [diff] [blame] | 122 | static __always_inline void add_element(mempool_t *pool, void *element) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | { | 
 | 124 | 	BUG_ON(pool->curr_nr >= pool->min_nr); | 
| David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 125 | 	poison_element(pool, element); | 
| Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 126 | 	kasan_poison_element(pool, element); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | 	pool->elements[pool->curr_nr++] = element; | 
 | 128 | } | 
 | 129 |  | 
| Jia-Ju Bai | 8cded86 | 2018-08-17 15:45:22 -0700 | [diff] [blame] | 130 | static void *remove_element(mempool_t *pool) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { | 
| David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 132 | 	void *element = pool->elements[--pool->curr_nr]; | 
 | 133 |  | 
 | 134 | 	BUG_ON(pool->curr_nr < 0); | 
| Jia-Ju Bai | 8cded86 | 2018-08-17 15:45:22 -0700 | [diff] [blame] | 135 | 	kasan_unpoison_element(pool, element); | 
| Matthew Dawson | 7640131 | 2016-03-11 13:08:07 -0800 | [diff] [blame] | 136 | 	check_element(pool, element); | 
| David Rientjes | bdfedb7 | 2015-04-15 16:14:17 -0700 | [diff] [blame] | 137 | 	return element; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } | 
 | 139 |  | 
| Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 140 | /** | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 141 |  * mempool_exit - exit a mempool initialized with mempool_init() | 
 | 142 |  * @pool:      pointer to the memory pool which was initialized with | 
 | 143 |  *             mempool_init(). | 
 | 144 |  * | 
 | 145 |  * Free all reserved elements in @pool and @pool itself.  This function | 
 | 146 |  * only sleeps if the free_fn() function sleeps. | 
 | 147 |  * | 
 | 148 |  * May be called on a zeroed but uninitialized mempool (i.e. allocated with | 
 | 149 |  * kzalloc()). | 
 | 150 |  */ | 
 | 151 | void mempool_exit(mempool_t *pool) | 
 | 152 | { | 
 | 153 | 	while (pool->curr_nr) { | 
| Jia-Ju Bai | 8cded86 | 2018-08-17 15:45:22 -0700 | [diff] [blame] | 154 | 		void *element = remove_element(pool); | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 155 | 		pool->free(element, pool->pool_data); | 
 | 156 | 	} | 
 | 157 | 	kfree(pool->elements); | 
 | 158 | 	pool->elements = NULL; | 
 | 159 | } | 
 | 160 | EXPORT_SYMBOL(mempool_exit); | 
 | 161 |  | 
 | 162 | /** | 
| Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 163 |  * mempool_destroy - deallocate a memory pool | 
 | 164 |  * @pool:      pointer to the memory pool which was allocated via | 
 | 165 |  *             mempool_create(). | 
 | 166 |  * | 
 | 167 |  * Free all reserved elements in @pool and @pool itself.  This function | 
 | 168 |  * only sleeps if the free_fn() function sleeps. | 
 | 169 |  */ | 
 | 170 | void mempool_destroy(mempool_t *pool) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | { | 
| Sergey Senozhatsky | 4e3ca3e | 2015-09-08 15:00:53 -0700 | [diff] [blame] | 172 | 	if (unlikely(!pool)) | 
 | 173 | 		return; | 
 | 174 |  | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 175 | 	mempool_exit(pool); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	kfree(pool); | 
 | 177 | } | 
| Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 178 | EXPORT_SYMBOL(mempool_destroy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 180 | int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, | 
 | 181 | 		      mempool_free_t *free_fn, void *pool_data, | 
 | 182 | 		      gfp_t gfp_mask, int node_id) | 
 | 183 | { | 
 | 184 | 	spin_lock_init(&pool->lock); | 
 | 185 | 	pool->min_nr	= min_nr; | 
 | 186 | 	pool->pool_data = pool_data; | 
 | 187 | 	pool->alloc	= alloc_fn; | 
 | 188 | 	pool->free	= free_fn; | 
 | 189 | 	init_waitqueue_head(&pool->wait); | 
 | 190 |  | 
 | 191 | 	pool->elements = kmalloc_array_node(min_nr, sizeof(void *), | 
 | 192 | 					    gfp_mask, node_id); | 
 | 193 | 	if (!pool->elements) | 
 | 194 | 		return -ENOMEM; | 
 | 195 |  | 
 | 196 | 	/* | 
 | 197 | 	 * First pre-allocate the guaranteed number of buffers. | 
 | 198 | 	 */ | 
 | 199 | 	while (pool->curr_nr < pool->min_nr) { | 
 | 200 | 		void *element; | 
 | 201 |  | 
 | 202 | 		element = pool->alloc(gfp_mask, pool->pool_data); | 
 | 203 | 		if (unlikely(!element)) { | 
 | 204 | 			mempool_exit(pool); | 
 | 205 | 			return -ENOMEM; | 
 | 206 | 		} | 
 | 207 | 		add_element(pool, element); | 
 | 208 | 	} | 
 | 209 |  | 
 | 210 | 	return 0; | 
 | 211 | } | 
 | 212 | EXPORT_SYMBOL(mempool_init_node); | 
 | 213 |  | 
 | 214 | /** | 
 | 215 |  * mempool_init - initialize a memory pool | 
| Mike Rapoport | a3bf6ce | 2018-08-21 21:53:03 -0700 | [diff] [blame] | 216 |  * @pool:      pointer to the memory pool that should be initialized | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 217 |  * @min_nr:    the minimum number of elements guaranteed to be | 
 | 218 |  *             allocated for this pool. | 
 | 219 |  * @alloc_fn:  user-defined element-allocation function. | 
 | 220 |  * @free_fn:   user-defined element-freeing function. | 
 | 221 |  * @pool_data: optional private data available to the user-defined functions. | 
 | 222 |  * | 
 | 223 |  * Like mempool_create(), but initializes the pool in (i.e. embedded in another | 
 | 224 |  * structure). | 
 | 225 |  */ | 
 | 226 | int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, | 
 | 227 | 		 mempool_free_t *free_fn, void *pool_data) | 
 | 228 | { | 
 | 229 | 	return mempool_init_node(pool, min_nr, alloc_fn, free_fn, | 
 | 230 | 				 pool_data, GFP_KERNEL, NUMA_NO_NODE); | 
 | 231 |  | 
 | 232 | } | 
 | 233 | EXPORT_SYMBOL(mempool_init); | 
 | 234 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | /** | 
 | 236 |  * mempool_create - create a memory pool | 
 | 237 |  * @min_nr:    the minimum number of elements guaranteed to be | 
 | 238 |  *             allocated for this pool. | 
 | 239 |  * @alloc_fn:  user-defined element-allocation function. | 
 | 240 |  * @free_fn:   user-defined element-freeing function. | 
 | 241 |  * @pool_data: optional private data available to the user-defined functions. | 
 | 242 |  * | 
 | 243 |  * this function creates and allocates a guaranteed size, preallocated | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 244 |  * memory pool. The pool can be used from the mempool_alloc() and mempool_free() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  * functions. This function might sleep. Both the alloc_fn() and the free_fn() | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 246 |  * functions might sleep - as long as the mempool_alloc() function is not called | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  * from IRQ contexts. | 
 | 248 |  */ | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 249 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | 				mempool_free_t *free_fn, void *pool_data) | 
 | 251 | { | 
| Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 252 | 	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, | 
 | 253 | 				   GFP_KERNEL, NUMA_NO_NODE); | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 254 | } | 
 | 255 | EXPORT_SYMBOL(mempool_create); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 |  | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 257 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, | 
| Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 258 | 			       mempool_free_t *free_fn, void *pool_data, | 
 | 259 | 			       gfp_t gfp_mask, int node_id) | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 260 | { | 
 | 261 | 	mempool_t *pool; | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 262 |  | 
| Joe Perches | 7b5219d | 2013-09-11 14:23:07 -0700 | [diff] [blame] | 263 | 	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | 	if (!pool) | 
 | 265 | 		return NULL; | 
| Kent Overstreet | c1a67fe | 2015-05-04 16:52:20 -0700 | [diff] [blame] | 266 |  | 
 | 267 | 	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, | 
 | 268 | 			      gfp_mask, node_id)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | 		kfree(pool); | 
 | 270 | 		return NULL; | 
 | 271 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | 	return pool; | 
 | 274 | } | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 275 | EXPORT_SYMBOL(mempool_create_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 |  | 
 | 277 | /** | 
 | 278 |  * mempool_resize - resize an existing memory pool | 
 | 279 |  * @pool:       pointer to the memory pool which was allocated via | 
 | 280 |  *              mempool_create(). | 
 | 281 |  * @new_min_nr: the new minimum number of elements guaranteed to be | 
 | 282 |  *              allocated for this pool. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 |  * | 
 | 284 |  * This function shrinks/grows the pool. In the case of growing, | 
 | 285 |  * it cannot be guaranteed that the pool will be grown to the new | 
 | 286 |  * size immediately, but new mempool_free() calls will refill it. | 
| David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 287 |  * This function may sleep. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  * | 
 | 289 |  * Note, the caller must guarantee that no mempool_destroy is called | 
 | 290 |  * while this function is running. mempool_alloc() & mempool_free() | 
 | 291 |  * might be called (eg. from IRQ contexts) while this function executes. | 
 | 292 |  */ | 
| David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 293 | int mempool_resize(mempool_t *pool, int new_min_nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | { | 
 | 295 | 	void *element; | 
 | 296 | 	void **new_elements; | 
 | 297 | 	unsigned long flags; | 
 | 298 |  | 
 | 299 | 	BUG_ON(new_min_nr <= 0); | 
| David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 300 | 	might_sleep(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 |  | 
 | 302 | 	spin_lock_irqsave(&pool->lock, flags); | 
 | 303 | 	if (new_min_nr <= pool->min_nr) { | 
 | 304 | 		while (new_min_nr < pool->curr_nr) { | 
| Jia-Ju Bai | 8cded86 | 2018-08-17 15:45:22 -0700 | [diff] [blame] | 305 | 			element = remove_element(pool); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | 			spin_unlock_irqrestore(&pool->lock, flags); | 
 | 307 | 			pool->free(element, pool->pool_data); | 
 | 308 | 			spin_lock_irqsave(&pool->lock, flags); | 
 | 309 | 		} | 
 | 310 | 		pool->min_nr = new_min_nr; | 
 | 311 | 		goto out_unlock; | 
 | 312 | 	} | 
 | 313 | 	spin_unlock_irqrestore(&pool->lock, flags); | 
 | 314 |  | 
 | 315 | 	/* Grow the pool */ | 
| David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 316 | 	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), | 
 | 317 | 				     GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | 	if (!new_elements) | 
 | 319 | 		return -ENOMEM; | 
 | 320 |  | 
 | 321 | 	spin_lock_irqsave(&pool->lock, flags); | 
 | 322 | 	if (unlikely(new_min_nr <= pool->min_nr)) { | 
 | 323 | 		/* Raced, other resize will do our work */ | 
 | 324 | 		spin_unlock_irqrestore(&pool->lock, flags); | 
 | 325 | 		kfree(new_elements); | 
 | 326 | 		goto out; | 
 | 327 | 	} | 
 | 328 | 	memcpy(new_elements, pool->elements, | 
 | 329 | 			pool->curr_nr * sizeof(*new_elements)); | 
 | 330 | 	kfree(pool->elements); | 
 | 331 | 	pool->elements = new_elements; | 
 | 332 | 	pool->min_nr = new_min_nr; | 
 | 333 |  | 
 | 334 | 	while (pool->curr_nr < pool->min_nr) { | 
 | 335 | 		spin_unlock_irqrestore(&pool->lock, flags); | 
| David Rientjes | 11d8336 | 2015-04-14 15:48:21 -0700 | [diff] [blame] | 336 | 		element = pool->alloc(GFP_KERNEL, pool->pool_data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | 		if (!element) | 
 | 338 | 			goto out; | 
 | 339 | 		spin_lock_irqsave(&pool->lock, flags); | 
 | 340 | 		if (pool->curr_nr < pool->min_nr) { | 
 | 341 | 			add_element(pool, element); | 
 | 342 | 		} else { | 
 | 343 | 			spin_unlock_irqrestore(&pool->lock, flags); | 
 | 344 | 			pool->free(element, pool->pool_data);	/* Raced */ | 
 | 345 | 			goto out; | 
 | 346 | 		} | 
 | 347 | 	} | 
 | 348 | out_unlock: | 
 | 349 | 	spin_unlock_irqrestore(&pool->lock, flags); | 
 | 350 | out: | 
 | 351 | 	return 0; | 
 | 352 | } | 
 | 353 | EXPORT_SYMBOL(mempool_resize); | 
 | 354 |  | 
 | 355 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 |  * mempool_alloc - allocate an element from a specific memory pool | 
 | 357 |  * @pool:      pointer to the memory pool which was allocated via | 
 | 358 |  *             mempool_create(). | 
 | 359 |  * @gfp_mask:  the usual allocation bitmask. | 
 | 360 |  * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 361 |  * this function only sleeps if the alloc_fn() function sleeps or | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 |  * returns NULL. Note that due to preallocation, this function | 
 | 363 |  * *never* fails when called from process contexts. (it might | 
 | 364 |  * fail if called from an IRQ context.) | 
| Michal Hocko | 4e390b2 | 2016-07-28 15:48:44 -0700 | [diff] [blame] | 365 |  * Note: using __GFP_ZERO is not supported. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 |  */ | 
| David Rientjes | f9054c7 | 2016-03-17 14:19:19 -0700 | [diff] [blame] | 367 | void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | { | 
 | 369 | 	void *element; | 
 | 370 | 	unsigned long flags; | 
| Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 371 | 	wait_queue_entry_t wait; | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 372 | 	gfp_t gfp_temp; | 
| Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 373 |  | 
| Sebastian Ott | 8bf8fcb | 2014-06-04 16:07:00 -0700 | [diff] [blame] | 374 | 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); | 
| Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 375 | 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); | 
| Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 376 |  | 
| Michal Hocko | 4e390b2 | 2016-07-28 15:48:44 -0700 | [diff] [blame] | 377 | 	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */ | 
| Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 378 | 	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */ | 
 | 379 | 	gfp_mask |= __GFP_NOWARN;	/* failures are OK */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 |  | 
| Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 381 | 	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); | 
| Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 382 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | repeat_alloc: | 
| Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 384 |  | 
 | 385 | 	element = pool->alloc(gfp_temp, pool->pool_data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | 	if (likely(element != NULL)) | 
 | 387 | 		return element; | 
 | 388 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | 	spin_lock_irqsave(&pool->lock, flags); | 
 | 390 | 	if (likely(pool->curr_nr)) { | 
| Jia-Ju Bai | 8cded86 | 2018-08-17 15:45:22 -0700 | [diff] [blame] | 391 | 		element = remove_element(pool); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | 		spin_unlock_irqrestore(&pool->lock, flags); | 
| Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 393 | 		/* paired with rmb in mempool_free(), read comment there */ | 
 | 394 | 		smp_wmb(); | 
| Catalin Marinas | 1741196 | 2014-06-06 14:38:19 -0700 | [diff] [blame] | 395 | 		/* | 
 | 396 | 		 * Update the allocation stack trace as this is more useful | 
 | 397 | 		 * for debugging. | 
 | 398 | 		 */ | 
 | 399 | 		kmemleak_update_trace(element); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | 		return element; | 
 | 401 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 |  | 
| Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 403 | 	/* | 
| Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 404 | 	 * We use gfp mask w/o direct reclaim or IO for the first round.  If | 
| Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 405 | 	 * alloc failed with that and @pool was empty, retry immediately. | 
 | 406 | 	 */ | 
| Michal Hocko | 4e390b2 | 2016-07-28 15:48:44 -0700 | [diff] [blame] | 407 | 	if (gfp_temp != gfp_mask) { | 
| Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 408 | 		spin_unlock_irqrestore(&pool->lock, flags); | 
 | 409 | 		gfp_temp = gfp_mask; | 
 | 410 | 		goto repeat_alloc; | 
 | 411 | 	} | 
 | 412 |  | 
| Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 413 | 	/* We must not sleep if !__GFP_DIRECT_RECLAIM */ | 
 | 414 | 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { | 
| Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 415 | 		spin_unlock_irqrestore(&pool->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | 		return NULL; | 
| Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 417 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 |  | 
| Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 419 | 	/* Let's wait for someone else to return an element to @pool */ | 
| Benjamin LaHaise | 01890a4 | 2005-06-23 00:10:01 -0700 | [diff] [blame] | 420 | 	init_wait(&wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 |  | 
| Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 423 | 	spin_unlock_irqrestore(&pool->lock, flags); | 
 | 424 |  | 
 | 425 | 	/* | 
 | 426 | 	 * FIXME: this should be io_schedule().  The timeout is there as a | 
 | 427 | 	 * workaround for some DM problems in 2.6.18. | 
 | 428 | 	 */ | 
 | 429 | 	io_schedule_timeout(5*HZ); | 
 | 430 |  | 
 | 431 | 	finish_wait(&pool->wait, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | 	goto repeat_alloc; | 
 | 433 | } | 
 | 434 | EXPORT_SYMBOL(mempool_alloc); | 
 | 435 |  | 
 | 436 | /** | 
 | 437 |  * mempool_free - return an element to the pool. | 
 | 438 |  * @element:   pool element pointer. | 
 | 439 |  * @pool:      pointer to the memory pool which was allocated via | 
 | 440 |  *             mempool_create(). | 
 | 441 |  * | 
 | 442 |  * this function only sleeps if the free_fn() function sleeps. | 
 | 443 |  */ | 
 | 444 | void mempool_free(void *element, mempool_t *pool) | 
 | 445 | { | 
 | 446 | 	unsigned long flags; | 
 | 447 |  | 
| Rusty Russell | c80e7a8 | 2007-07-15 23:42:00 -0700 | [diff] [blame] | 448 | 	if (unlikely(element == NULL)) | 
 | 449 | 		return; | 
 | 450 |  | 
| Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 451 | 	/* | 
 | 452 | 	 * Paired with the wmb in mempool_alloc().  The preceding read is | 
 | 453 | 	 * for @element and the following @pool->curr_nr.  This ensures | 
 | 454 | 	 * that the visible value of @pool->curr_nr is from after the | 
 | 455 | 	 * allocation of @element.  This is necessary for fringe cases | 
 | 456 | 	 * where @element was passed to this task without going through | 
 | 457 | 	 * barriers. | 
 | 458 | 	 * | 
 | 459 | 	 * For example, assume @p is %NULL at the beginning and one task | 
 | 460 | 	 * performs "p = mempool_alloc(...);" while another task is doing | 
 | 461 | 	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function | 
 | 462 | 	 * may end up using curr_nr value which is from before allocation | 
 | 463 | 	 * of @p without the following rmb. | 
 | 464 | 	 */ | 
 | 465 | 	smp_rmb(); | 
 | 466 |  | 
 | 467 | 	/* | 
 | 468 | 	 * For correctness, we need a test which is guaranteed to trigger | 
 | 469 | 	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr | 
 | 470 | 	 * without locking achieves that and refilling as soon as possible | 
 | 471 | 	 * is desirable. | 
 | 472 | 	 * | 
 | 473 | 	 * Because curr_nr visible here is always a value after the | 
 | 474 | 	 * allocation of @element, any task which decremented curr_nr below | 
 | 475 | 	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets | 
 | 476 | 	 * incremented to min_nr afterwards.  If curr_nr gets incremented | 
 | 477 | 	 * to min_nr after the allocation of @element, the elements | 
 | 478 | 	 * allocated after that are subject to the same guarantee. | 
 | 479 | 	 * | 
 | 480 | 	 * Waiters happen iff curr_nr is 0 and the above guarantee also | 
 | 481 | 	 * ensures that there will be frees which return elements to the | 
 | 482 | 	 * pool waking up the waiters. | 
 | 483 | 	 */ | 
| Mikulas Patocka | eb9a3c6 | 2014-04-07 15:37:35 -0700 | [diff] [blame] | 484 | 	if (unlikely(pool->curr_nr < pool->min_nr)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | 		spin_lock_irqsave(&pool->lock, flags); | 
| Mikulas Patocka | eb9a3c6 | 2014-04-07 15:37:35 -0700 | [diff] [blame] | 486 | 		if (likely(pool->curr_nr < pool->min_nr)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | 			add_element(pool, element); | 
 | 488 | 			spin_unlock_irqrestore(&pool->lock, flags); | 
 | 489 | 			wake_up(&pool->wait); | 
 | 490 | 			return; | 
 | 491 | 		} | 
 | 492 | 		spin_unlock_irqrestore(&pool->lock, flags); | 
 | 493 | 	} | 
 | 494 | 	pool->free(element, pool->pool_data); | 
 | 495 | } | 
 | 496 | EXPORT_SYMBOL(mempool_free); | 
 | 497 |  | 
 | 498 | /* | 
 | 499 |  * A commonly used alloc and free fn. | 
 | 500 |  */ | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 501 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | { | 
| Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 503 | 	struct kmem_cache *mem = pool_data; | 
| David Rientjes | e244c9e | 2015-04-15 16:14:14 -0700 | [diff] [blame] | 504 | 	VM_BUG_ON(mem->ctor); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | 	return kmem_cache_alloc(mem, gfp_mask); | 
 | 506 | } | 
 | 507 | EXPORT_SYMBOL(mempool_alloc_slab); | 
 | 508 |  | 
 | 509 | void mempool_free_slab(void *element, void *pool_data) | 
 | 510 | { | 
| Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 511 | 	struct kmem_cache *mem = pool_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | 	kmem_cache_free(mem, element); | 
 | 513 | } | 
 | 514 | EXPORT_SYMBOL(mempool_free_slab); | 
| Matthew Dobson | 6e0678f | 2006-03-26 01:37:44 -0800 | [diff] [blame] | 515 |  | 
 | 516 | /* | 
| Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 517 |  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 518 |  * specified by pool_data | 
| Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 519 |  */ | 
 | 520 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | 
 | 521 | { | 
| Figo.zhang | 5e2f89b | 2009-08-08 21:01:22 +0800 | [diff] [blame] | 522 | 	size_t size = (size_t)pool_data; | 
| Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 523 | 	return kmalloc(size, gfp_mask); | 
 | 524 | } | 
 | 525 | EXPORT_SYMBOL(mempool_kmalloc); | 
 | 526 |  | 
 | 527 | void mempool_kfree(void *element, void *pool_data) | 
 | 528 | { | 
 | 529 | 	kfree(element); | 
 | 530 | } | 
 | 531 | EXPORT_SYMBOL(mempool_kfree); | 
 | 532 |  | 
 | 533 | /* | 
| Matthew Dobson | 6e0678f | 2006-03-26 01:37:44 -0800 | [diff] [blame] | 534 |  * A simple mempool-backed page allocator that allocates pages | 
 | 535 |  * of the order specified by pool_data. | 
 | 536 |  */ | 
 | 537 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) | 
 | 538 | { | 
 | 539 | 	int order = (int)(long)pool_data; | 
 | 540 | 	return alloc_pages(gfp_mask, order); | 
 | 541 | } | 
 | 542 | EXPORT_SYMBOL(mempool_alloc_pages); | 
 | 543 |  | 
 | 544 | void mempool_free_pages(void *element, void *pool_data) | 
 | 545 | { | 
 | 546 | 	int order = (int)(long)pool_data; | 
 | 547 | 	__free_pages(element, order); | 
 | 548 | } | 
 | 549 | EXPORT_SYMBOL(mempool_free_pages); |