blob: 5c9dce34719b385c9f5ff325e31b30c944fae83d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/mempool.c
4 *
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
7 * extreme VM load.
8 *
9 * started by Ingo Molnar, Copyright (C) 2001
David Rientjesbdfedb72015-04-15 16:14:17 -070010 * debugging by David Rientjes, Copyright (C) 2015
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/mm.h>
14#include <linux/slab.h>
David Rientjesbdfedb72015-04-15 16:14:17 -070015#include <linux/highmem.h>
Andrey Ryabinin92393612015-04-15 16:15:05 -070016#include <linux/kasan.h>
Catalin Marinas17411962014-06-06 14:38:19 -070017#include <linux/kmemleak.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040018#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mempool.h>
20#include <linux/blkdev.h>
21#include <linux/writeback.h>
David Rientjese244c9e2015-04-15 16:14:14 -070022#include "slab.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
David Rientjesbdfedb72015-04-15 16:14:17 -070024#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
25static void poison_error(mempool_t *pool, void *element, size_t size,
26 size_t byte)
27{
28 const int nr = pool->curr_nr;
29 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
30 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
31 int i;
32
33 pr_err("BUG: mempool element poison mismatch\n");
34 pr_err("Mempool %p size %zu\n", pool, size);
35 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
36 for (i = start; i < end; i++)
37 pr_cont("%x ", *(u8 *)(element + i));
38 pr_cont("%s\n", end < size ? "..." : "");
39 dump_stack();
40}
41
42static void __check_element(mempool_t *pool, void *element, size_t size)
43{
44 u8 *obj = element;
45 size_t i;
46
47 for (i = 0; i < size; i++) {
48 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
49
50 if (obj[i] != exp) {
51 poison_error(pool, element, size, i);
52 return;
53 }
54 }
55 memset(obj, POISON_INUSE, size);
56}
57
58static void check_element(mempool_t *pool, void *element)
59{
60 /* Mempools backed by slab allocator */
61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
62 __check_element(pool, element, ksize(element));
63
64 /* Mempools backed by page allocator */
65 if (pool->free == mempool_free_pages) {
66 int order = (int)(long)pool->pool_data;
67 void *addr = kmap_atomic((struct page *)element);
68
69 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
70 kunmap_atomic(addr);
71 }
72}
73
74static void __poison_element(void *element, size_t size)
75{
76 u8 *obj = element;
77
78 memset(obj, POISON_FREE, size - 1);
79 obj[size - 1] = POISON_END;
80}
81
82static void poison_element(mempool_t *pool, void *element)
83{
84 /* Mempools backed by slab allocator */
85 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
86 __poison_element(element, ksize(element));
87
88 /* Mempools backed by page allocator */
89 if (pool->alloc == mempool_alloc_pages) {
90 int order = (int)(long)pool->pool_data;
91 void *addr = kmap_atomic((struct page *)element);
92
93 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
94 kunmap_atomic(addr);
95 }
96}
97#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
98static inline void check_element(mempool_t *pool, void *element)
99{
100}
101static inline void poison_element(mempool_t *pool, void *element)
102{
103}
104#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
105
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800106static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
Andrey Ryabinin92393612015-04-15 16:15:05 -0700107{
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700108 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800109 kasan_poison_kfree(element, _RET_IP_);
Andrey Ryabinin92393612015-04-15 16:15:05 -0700110 if (pool->alloc == mempool_alloc_pages)
111 kasan_free_pages(element, (unsigned long)pool->pool_data);
112}
113
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700114static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
Andrey Ryabinin92393612015-04-15 16:15:05 -0700115{
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700116 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
117 kasan_unpoison_slab(element);
Andrey Ryabinin92393612015-04-15 16:15:05 -0700118 if (pool->alloc == mempool_alloc_pages)
119 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
120}
121
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800122static __always_inline void add_element(mempool_t *pool, void *element)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 BUG_ON(pool->curr_nr >= pool->min_nr);
David Rientjesbdfedb72015-04-15 16:14:17 -0700125 poison_element(pool, element);
Andrey Ryabinin92393612015-04-15 16:15:05 -0700126 kasan_poison_element(pool, element);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 pool->elements[pool->curr_nr++] = element;
128}
129
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700130static void *remove_element(mempool_t *pool, gfp_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
David Rientjesbdfedb72015-04-15 16:14:17 -0700132 void *element = pool->elements[--pool->curr_nr];
133
134 BUG_ON(pool->curr_nr < 0);
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700135 kasan_unpoison_element(pool, element, flags);
Matthew Dawson76401312016-03-11 13:08:07 -0800136 check_element(pool, element);
David Rientjesbdfedb72015-04-15 16:14:17 -0700137 return element;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138}
139
Tejun Heo0565d312012-01-10 15:08:26 -0800140/**
141 * mempool_destroy - deallocate a memory pool
142 * @pool: pointer to the memory pool which was allocated via
143 * mempool_create().
144 *
145 * Free all reserved elements in @pool and @pool itself. This function
146 * only sleeps if the free_fn() function sleeps.
147 */
148void mempool_destroy(mempool_t *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
Sergey Senozhatsky4e3ca3e2015-09-08 15:00:53 -0700150 if (unlikely(!pool))
151 return;
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 while (pool->curr_nr) {
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700154 void *element = remove_element(pool, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 pool->free(element, pool->pool_data);
156 }
157 kfree(pool->elements);
158 kfree(pool);
159}
Tejun Heo0565d312012-01-10 15:08:26 -0800160EXPORT_SYMBOL(mempool_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162/**
163 * mempool_create - create a memory pool
164 * @min_nr: the minimum number of elements guaranteed to be
165 * allocated for this pool.
166 * @alloc_fn: user-defined element-allocation function.
167 * @free_fn: user-defined element-freeing function.
168 * @pool_data: optional private data available to the user-defined functions.
169 *
170 * this function creates and allocates a guaranteed size, preallocated
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800171 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800173 * functions might sleep - as long as the mempool_alloc() function is not called
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * from IRQ contexts.
175 */
Christoph Lameter19460892005-06-23 00:08:19 -0700176mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 mempool_free_t *free_fn, void *pool_data)
178{
Tejun Heoa91a5ac2012-06-04 20:40:53 -0700179 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
180 GFP_KERNEL, NUMA_NO_NODE);
Christoph Lameter19460892005-06-23 00:08:19 -0700181}
182EXPORT_SYMBOL(mempool_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Christoph Lameter19460892005-06-23 00:08:19 -0700184mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
Tejun Heoa91a5ac2012-06-04 20:40:53 -0700185 mempool_free_t *free_fn, void *pool_data,
186 gfp_t gfp_mask, int node_id)
Christoph Lameter19460892005-06-23 00:08:19 -0700187{
188 mempool_t *pool;
Joe Perches7b5219d2013-09-11 14:23:07 -0700189 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 if (!pool)
191 return NULL;
Johannes Thumshirn63762f52017-11-15 17:32:45 -0800192 pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
Tejun Heoa91a5ac2012-06-04 20:40:53 -0700193 gfp_mask, node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 if (!pool->elements) {
195 kfree(pool);
196 return NULL;
197 }
198 spin_lock_init(&pool->lock);
199 pool->min_nr = min_nr;
200 pool->pool_data = pool_data;
201 init_waitqueue_head(&pool->wait);
202 pool->alloc = alloc_fn;
203 pool->free = free_fn;
204
205 /*
206 * First pre-allocate the guaranteed number of buffers.
207 */
208 while (pool->curr_nr < pool->min_nr) {
209 void *element;
210
Tejun Heoa91a5ac2012-06-04 20:40:53 -0700211 element = pool->alloc(gfp_mask, pool->pool_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 if (unlikely(!element)) {
Tejun Heo0565d312012-01-10 15:08:26 -0800213 mempool_destroy(pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 return NULL;
215 }
216 add_element(pool, element);
217 }
218 return pool;
219}
Christoph Lameter19460892005-06-23 00:08:19 -0700220EXPORT_SYMBOL(mempool_create_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222/**
223 * mempool_resize - resize an existing memory pool
224 * @pool: pointer to the memory pool which was allocated via
225 * mempool_create().
226 * @new_min_nr: the new minimum number of elements guaranteed to be
227 * allocated for this pool.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 *
229 * This function shrinks/grows the pool. In the case of growing,
230 * it cannot be guaranteed that the pool will be grown to the new
231 * size immediately, but new mempool_free() calls will refill it.
David Rientjes11d83362015-04-14 15:48:21 -0700232 * This function may sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 *
234 * Note, the caller must guarantee that no mempool_destroy is called
235 * while this function is running. mempool_alloc() & mempool_free()
236 * might be called (eg. from IRQ contexts) while this function executes.
237 */
David Rientjes11d83362015-04-14 15:48:21 -0700238int mempool_resize(mempool_t *pool, int new_min_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
240 void *element;
241 void **new_elements;
242 unsigned long flags;
243
244 BUG_ON(new_min_nr <= 0);
David Rientjes11d83362015-04-14 15:48:21 -0700245 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 spin_lock_irqsave(&pool->lock, flags);
248 if (new_min_nr <= pool->min_nr) {
249 while (new_min_nr < pool->curr_nr) {
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700250 element = remove_element(pool, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 spin_unlock_irqrestore(&pool->lock, flags);
252 pool->free(element, pool->pool_data);
253 spin_lock_irqsave(&pool->lock, flags);
254 }
255 pool->min_nr = new_min_nr;
256 goto out_unlock;
257 }
258 spin_unlock_irqrestore(&pool->lock, flags);
259
260 /* Grow the pool */
David Rientjes11d83362015-04-14 15:48:21 -0700261 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
262 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (!new_elements)
264 return -ENOMEM;
265
266 spin_lock_irqsave(&pool->lock, flags);
267 if (unlikely(new_min_nr <= pool->min_nr)) {
268 /* Raced, other resize will do our work */
269 spin_unlock_irqrestore(&pool->lock, flags);
270 kfree(new_elements);
271 goto out;
272 }
273 memcpy(new_elements, pool->elements,
274 pool->curr_nr * sizeof(*new_elements));
275 kfree(pool->elements);
276 pool->elements = new_elements;
277 pool->min_nr = new_min_nr;
278
279 while (pool->curr_nr < pool->min_nr) {
280 spin_unlock_irqrestore(&pool->lock, flags);
David Rientjes11d83362015-04-14 15:48:21 -0700281 element = pool->alloc(GFP_KERNEL, pool->pool_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 if (!element)
283 goto out;
284 spin_lock_irqsave(&pool->lock, flags);
285 if (pool->curr_nr < pool->min_nr) {
286 add_element(pool, element);
287 } else {
288 spin_unlock_irqrestore(&pool->lock, flags);
289 pool->free(element, pool->pool_data); /* Raced */
290 goto out;
291 }
292 }
293out_unlock:
294 spin_unlock_irqrestore(&pool->lock, flags);
295out:
296 return 0;
297}
298EXPORT_SYMBOL(mempool_resize);
299
300/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 * mempool_alloc - allocate an element from a specific memory pool
302 * @pool: pointer to the memory pool which was allocated via
303 * mempool_create().
304 * @gfp_mask: the usual allocation bitmask.
305 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800306 * this function only sleeps if the alloc_fn() function sleeps or
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 * returns NULL. Note that due to preallocation, this function
308 * *never* fails when called from process contexts. (it might
309 * fail if called from an IRQ context.)
Michal Hocko4e390b22016-07-28 15:48:44 -0700310 * Note: using __GFP_ZERO is not supported.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 */
David Rientjesf9054c72016-03-17 14:19:19 -0700312void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
314 void *element;
315 unsigned long flags;
Ingo Molnarac6424b2017-06-20 12:06:13 +0200316 wait_queue_entry_t wait;
Al Viro6daa0e22005-10-21 03:18:50 -0400317 gfp_t gfp_temp;
Nick Piggin20a77772005-05-01 08:58:37 -0700318
Sebastian Ott8bf8fcb2014-06-04 16:07:00 -0700319 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
Mel Gormand0164ad2015-11-06 16:28:21 -0800320 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
Nick Pigginb84a35b2005-05-01 08:58:36 -0700321
Michal Hocko4e390b22016-07-28 15:48:44 -0700322 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
Nick Pigginb84a35b2005-05-01 08:58:36 -0700323 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
324 gfp_mask |= __GFP_NOWARN; /* failures are OK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Mel Gormand0164ad2015-11-06 16:28:21 -0800326 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
Nick Piggin20a77772005-05-01 08:58:37 -0700327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328repeat_alloc:
Nick Piggin20a77772005-05-01 08:58:37 -0700329
330 element = pool->alloc(gfp_temp, pool->pool_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (likely(element != NULL))
332 return element;
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 spin_lock_irqsave(&pool->lock, flags);
335 if (likely(pool->curr_nr)) {
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700336 element = remove_element(pool, gfp_temp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 spin_unlock_irqrestore(&pool->lock, flags);
Tejun Heo5b990542012-01-10 15:08:23 -0800338 /* paired with rmb in mempool_free(), read comment there */
339 smp_wmb();
Catalin Marinas17411962014-06-06 14:38:19 -0700340 /*
341 * Update the allocation stack trace as this is more useful
342 * for debugging.
343 */
344 kmemleak_update_trace(element);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return element;
346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Tejun Heo1ebb7042012-01-10 15:08:28 -0800348 /*
Mel Gormand0164ad2015-11-06 16:28:21 -0800349 * We use gfp mask w/o direct reclaim or IO for the first round. If
Tejun Heo1ebb7042012-01-10 15:08:28 -0800350 * alloc failed with that and @pool was empty, retry immediately.
351 */
Michal Hocko4e390b22016-07-28 15:48:44 -0700352 if (gfp_temp != gfp_mask) {
Tejun Heo1ebb7042012-01-10 15:08:28 -0800353 spin_unlock_irqrestore(&pool->lock, flags);
354 gfp_temp = gfp_mask;
355 goto repeat_alloc;
356 }
357
Mel Gormand0164ad2015-11-06 16:28:21 -0800358 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
359 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
Tejun Heo5b990542012-01-10 15:08:23 -0800360 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 return NULL;
Tejun Heo5b990542012-01-10 15:08:23 -0800362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Tejun Heo5b990542012-01-10 15:08:23 -0800364 /* Let's wait for someone else to return an element to @pool */
Benjamin LaHaise01890a42005-06-23 00:10:01 -0700365 init_wait(&wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Tejun Heo5b990542012-01-10 15:08:23 -0800368 spin_unlock_irqrestore(&pool->lock, flags);
369
370 /*
371 * FIXME: this should be io_schedule(). The timeout is there as a
372 * workaround for some DM problems in 2.6.18.
373 */
374 io_schedule_timeout(5*HZ);
375
376 finish_wait(&pool->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 goto repeat_alloc;
378}
379EXPORT_SYMBOL(mempool_alloc);
380
381/**
382 * mempool_free - return an element to the pool.
383 * @element: pool element pointer.
384 * @pool: pointer to the memory pool which was allocated via
385 * mempool_create().
386 *
387 * this function only sleeps if the free_fn() function sleeps.
388 */
389void mempool_free(void *element, mempool_t *pool)
390{
391 unsigned long flags;
392
Rusty Russellc80e7a82007-07-15 23:42:00 -0700393 if (unlikely(element == NULL))
394 return;
395
Tejun Heo5b990542012-01-10 15:08:23 -0800396 /*
397 * Paired with the wmb in mempool_alloc(). The preceding read is
398 * for @element and the following @pool->curr_nr. This ensures
399 * that the visible value of @pool->curr_nr is from after the
400 * allocation of @element. This is necessary for fringe cases
401 * where @element was passed to this task without going through
402 * barriers.
403 *
404 * For example, assume @p is %NULL at the beginning and one task
405 * performs "p = mempool_alloc(...);" while another task is doing
406 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
407 * may end up using curr_nr value which is from before allocation
408 * of @p without the following rmb.
409 */
410 smp_rmb();
411
412 /*
413 * For correctness, we need a test which is guaranteed to trigger
414 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
415 * without locking achieves that and refilling as soon as possible
416 * is desirable.
417 *
418 * Because curr_nr visible here is always a value after the
419 * allocation of @element, any task which decremented curr_nr below
420 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
421 * incremented to min_nr afterwards. If curr_nr gets incremented
422 * to min_nr after the allocation of @element, the elements
423 * allocated after that are subject to the same guarantee.
424 *
425 * Waiters happen iff curr_nr is 0 and the above guarantee also
426 * ensures that there will be frees which return elements to the
427 * pool waking up the waiters.
428 */
Mikulas Patockaeb9a3c62014-04-07 15:37:35 -0700429 if (unlikely(pool->curr_nr < pool->min_nr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 spin_lock_irqsave(&pool->lock, flags);
Mikulas Patockaeb9a3c62014-04-07 15:37:35 -0700431 if (likely(pool->curr_nr < pool->min_nr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 add_element(pool, element);
433 spin_unlock_irqrestore(&pool->lock, flags);
434 wake_up(&pool->wait);
435 return;
436 }
437 spin_unlock_irqrestore(&pool->lock, flags);
438 }
439 pool->free(element, pool->pool_data);
440}
441EXPORT_SYMBOL(mempool_free);
442
443/*
444 * A commonly used alloc and free fn.
445 */
Al Virodd0fc662005-10-07 07:46:04 +0100446void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800448 struct kmem_cache *mem = pool_data;
David Rientjese244c9e2015-04-15 16:14:14 -0700449 VM_BUG_ON(mem->ctor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return kmem_cache_alloc(mem, gfp_mask);
451}
452EXPORT_SYMBOL(mempool_alloc_slab);
453
454void mempool_free_slab(void *element, void *pool_data)
455{
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800456 struct kmem_cache *mem = pool_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 kmem_cache_free(mem, element);
458}
459EXPORT_SYMBOL(mempool_free_slab);
Matthew Dobson6e0678f2006-03-26 01:37:44 -0800460
461/*
Matthew Dobson53184082006-03-26 01:37:46 -0800462 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
Simon Arlott183ff222007-10-20 01:27:18 +0200463 * specified by pool_data
Matthew Dobson53184082006-03-26 01:37:46 -0800464 */
465void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
466{
Figo.zhang5e2f89b2009-08-08 21:01:22 +0800467 size_t size = (size_t)pool_data;
Matthew Dobson53184082006-03-26 01:37:46 -0800468 return kmalloc(size, gfp_mask);
469}
470EXPORT_SYMBOL(mempool_kmalloc);
471
472void mempool_kfree(void *element, void *pool_data)
473{
474 kfree(element);
475}
476EXPORT_SYMBOL(mempool_kfree);
477
478/*
Matthew Dobson6e0678f2006-03-26 01:37:44 -0800479 * A simple mempool-backed page allocator that allocates pages
480 * of the order specified by pool_data.
481 */
482void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
483{
484 int order = (int)(long)pool_data;
485 return alloc_pages(gfp_mask, order);
486}
487EXPORT_SYMBOL(mempool_alloc_pages);
488
489void mempool_free_pages(void *element, void *pool_data)
490{
491 int order = (int)(long)pool_data;
492 __free_pages(element, order);
493}
494EXPORT_SYMBOL(mempool_free_pages);