Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/mempool.c |
| 3 | * |
| 4 | * memory buffer pool support. Such pools are mostly used |
| 5 | * for guaranteed, deadlock-free memory allocations during |
| 6 | * extreme VM load. |
| 7 | * |
| 8 | * started by Ingo Molnar, Copyright (C) 2001 |
| 9 | */ |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/slab.h> |
Catalin Marinas | 1741196 | 2014-06-06 14:38:19 -0700 | [diff] [blame] | 13 | #include <linux/kmemleak.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 14 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/mempool.h> |
| 16 | #include <linux/blkdev.h> |
| 17 | #include <linux/writeback.h> |
| 18 | |
| 19 | static void add_element(mempool_t *pool, void *element) |
| 20 | { |
| 21 | BUG_ON(pool->curr_nr >= pool->min_nr); |
| 22 | pool->elements[pool->curr_nr++] = element; |
| 23 | } |
| 24 | |
| 25 | static void *remove_element(mempool_t *pool) |
| 26 | { |
| 27 | BUG_ON(pool->curr_nr <= 0); |
| 28 | return pool->elements[--pool->curr_nr]; |
| 29 | } |
| 30 | |
Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 31 | /** |
| 32 | * mempool_destroy - deallocate a memory pool |
| 33 | * @pool: pointer to the memory pool which was allocated via |
| 34 | * mempool_create(). |
| 35 | * |
| 36 | * Free all reserved elements in @pool and @pool itself. This function |
| 37 | * only sleeps if the free_fn() function sleeps. |
| 38 | */ |
| 39 | void mempool_destroy(mempool_t *pool) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | { |
| 41 | while (pool->curr_nr) { |
| 42 | void *element = remove_element(pool); |
| 43 | pool->free(element, pool->pool_data); |
| 44 | } |
| 45 | kfree(pool->elements); |
| 46 | kfree(pool); |
| 47 | } |
Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 48 | EXPORT_SYMBOL(mempool_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | /** |
| 51 | * mempool_create - create a memory pool |
| 52 | * @min_nr: the minimum number of elements guaranteed to be |
| 53 | * allocated for this pool. |
| 54 | * @alloc_fn: user-defined element-allocation function. |
| 55 | * @free_fn: user-defined element-freeing function. |
| 56 | * @pool_data: optional private data available to the user-defined functions. |
| 57 | * |
| 58 | * this function creates and allocates a guaranteed size, preallocated |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 59 | * memory pool. The pool can be used from the mempool_alloc() and mempool_free() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | * functions. This function might sleep. Both the alloc_fn() and the free_fn() |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 61 | * functions might sleep - as long as the mempool_alloc() function is not called |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | * from IRQ contexts. |
| 63 | */ |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 64 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | mempool_free_t *free_fn, void *pool_data) |
| 66 | { |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 67 | return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, |
| 68 | GFP_KERNEL, NUMA_NO_NODE); |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 69 | } |
| 70 | EXPORT_SYMBOL(mempool_create); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 72 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 73 | mempool_free_t *free_fn, void *pool_data, |
| 74 | gfp_t gfp_mask, int node_id) |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 75 | { |
| 76 | mempool_t *pool; |
Joe Perches | 7b5219d | 2013-09-11 14:23:07 -0700 | [diff] [blame] | 77 | pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | if (!pool) |
| 79 | return NULL; |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 80 | pool->elements = kmalloc_node(min_nr * sizeof(void *), |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 81 | gfp_mask, node_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | if (!pool->elements) { |
| 83 | kfree(pool); |
| 84 | return NULL; |
| 85 | } |
| 86 | spin_lock_init(&pool->lock); |
| 87 | pool->min_nr = min_nr; |
| 88 | pool->pool_data = pool_data; |
| 89 | init_waitqueue_head(&pool->wait); |
| 90 | pool->alloc = alloc_fn; |
| 91 | pool->free = free_fn; |
| 92 | |
| 93 | /* |
| 94 | * First pre-allocate the guaranteed number of buffers. |
| 95 | */ |
| 96 | while (pool->curr_nr < pool->min_nr) { |
| 97 | void *element; |
| 98 | |
Tejun Heo | a91a5ac | 2012-06-04 20:40:53 -0700 | [diff] [blame] | 99 | element = pool->alloc(gfp_mask, pool->pool_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | if (unlikely(!element)) { |
Tejun Heo | 0565d31 | 2012-01-10 15:08:26 -0800 | [diff] [blame] | 101 | mempool_destroy(pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | return NULL; |
| 103 | } |
| 104 | add_element(pool, element); |
| 105 | } |
| 106 | return pool; |
| 107 | } |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 108 | EXPORT_SYMBOL(mempool_create_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
| 110 | /** |
| 111 | * mempool_resize - resize an existing memory pool |
| 112 | * @pool: pointer to the memory pool which was allocated via |
| 113 | * mempool_create(). |
| 114 | * @new_min_nr: the new minimum number of elements guaranteed to be |
| 115 | * allocated for this pool. |
| 116 | * @gfp_mask: the usual allocation bitmask. |
| 117 | * |
| 118 | * This function shrinks/grows the pool. In the case of growing, |
| 119 | * it cannot be guaranteed that the pool will be grown to the new |
| 120 | * size immediately, but new mempool_free() calls will refill it. |
| 121 | * |
| 122 | * Note, the caller must guarantee that no mempool_destroy is called |
| 123 | * while this function is running. mempool_alloc() & mempool_free() |
| 124 | * might be called (eg. from IRQ contexts) while this function executes. |
| 125 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 126 | int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | { |
| 128 | void *element; |
| 129 | void **new_elements; |
| 130 | unsigned long flags; |
| 131 | |
| 132 | BUG_ON(new_min_nr <= 0); |
| 133 | |
| 134 | spin_lock_irqsave(&pool->lock, flags); |
| 135 | if (new_min_nr <= pool->min_nr) { |
| 136 | while (new_min_nr < pool->curr_nr) { |
| 137 | element = remove_element(pool); |
| 138 | spin_unlock_irqrestore(&pool->lock, flags); |
| 139 | pool->free(element, pool->pool_data); |
| 140 | spin_lock_irqsave(&pool->lock, flags); |
| 141 | } |
| 142 | pool->min_nr = new_min_nr; |
| 143 | goto out_unlock; |
| 144 | } |
| 145 | spin_unlock_irqrestore(&pool->lock, flags); |
| 146 | |
| 147 | /* Grow the pool */ |
| 148 | new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); |
| 149 | if (!new_elements) |
| 150 | return -ENOMEM; |
| 151 | |
| 152 | spin_lock_irqsave(&pool->lock, flags); |
| 153 | if (unlikely(new_min_nr <= pool->min_nr)) { |
| 154 | /* Raced, other resize will do our work */ |
| 155 | spin_unlock_irqrestore(&pool->lock, flags); |
| 156 | kfree(new_elements); |
| 157 | goto out; |
| 158 | } |
| 159 | memcpy(new_elements, pool->elements, |
| 160 | pool->curr_nr * sizeof(*new_elements)); |
| 161 | kfree(pool->elements); |
| 162 | pool->elements = new_elements; |
| 163 | pool->min_nr = new_min_nr; |
| 164 | |
| 165 | while (pool->curr_nr < pool->min_nr) { |
| 166 | spin_unlock_irqrestore(&pool->lock, flags); |
| 167 | element = pool->alloc(gfp_mask, pool->pool_data); |
| 168 | if (!element) |
| 169 | goto out; |
| 170 | spin_lock_irqsave(&pool->lock, flags); |
| 171 | if (pool->curr_nr < pool->min_nr) { |
| 172 | add_element(pool, element); |
| 173 | } else { |
| 174 | spin_unlock_irqrestore(&pool->lock, flags); |
| 175 | pool->free(element, pool->pool_data); /* Raced */ |
| 176 | goto out; |
| 177 | } |
| 178 | } |
| 179 | out_unlock: |
| 180 | spin_unlock_irqrestore(&pool->lock, flags); |
| 181 | out: |
| 182 | return 0; |
| 183 | } |
| 184 | EXPORT_SYMBOL(mempool_resize); |
| 185 | |
| 186 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | * mempool_alloc - allocate an element from a specific memory pool |
| 188 | * @pool: pointer to the memory pool which was allocated via |
| 189 | * mempool_create(). |
| 190 | * @gfp_mask: the usual allocation bitmask. |
| 191 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 192 | * this function only sleeps if the alloc_fn() function sleeps or |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | * returns NULL. Note that due to preallocation, this function |
| 194 | * *never* fails when called from process contexts. (it might |
| 195 | * fail if called from an IRQ context.) |
Sebastian Ott | 8bf8fcb | 2014-06-04 16:07:00 -0700 | [diff] [blame] | 196 | * Note: using __GFP_ZERO is not supported. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 198 | void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { |
| 200 | void *element; |
| 201 | unsigned long flags; |
Benjamin LaHaise | 01890a4 | 2005-06-23 00:10:01 -0700 | [diff] [blame] | 202 | wait_queue_t wait; |
Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 203 | gfp_t gfp_temp; |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 204 | |
Sebastian Ott | 8bf8fcb | 2014-06-04 16:07:00 -0700 | [diff] [blame] | 205 | VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 206 | might_sleep_if(gfp_mask & __GFP_WAIT); |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 207 | |
| 208 | gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ |
| 209 | gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ |
| 210 | gfp_mask |= __GFP_NOWARN; /* failures are OK */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 212 | gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); |
| 213 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | repeat_alloc: |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 215 | |
| 216 | element = pool->alloc(gfp_temp, pool->pool_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | if (likely(element != NULL)) |
| 218 | return element; |
| 219 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | spin_lock_irqsave(&pool->lock, flags); |
| 221 | if (likely(pool->curr_nr)) { |
| 222 | element = remove_element(pool); |
| 223 | spin_unlock_irqrestore(&pool->lock, flags); |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 224 | /* paired with rmb in mempool_free(), read comment there */ |
| 225 | smp_wmb(); |
Catalin Marinas | 1741196 | 2014-06-06 14:38:19 -0700 | [diff] [blame] | 226 | /* |
| 227 | * Update the allocation stack trace as this is more useful |
| 228 | * for debugging. |
| 229 | */ |
| 230 | kmemleak_update_trace(element); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | return element; |
| 232 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
Tejun Heo | 1ebb704 | 2012-01-10 15:08:28 -0800 | [diff] [blame] | 234 | /* |
| 235 | * We use gfp mask w/o __GFP_WAIT or IO for the first round. If |
| 236 | * alloc failed with that and @pool was empty, retry immediately. |
| 237 | */ |
| 238 | if (gfp_temp != gfp_mask) { |
| 239 | spin_unlock_irqrestore(&pool->lock, flags); |
| 240 | gfp_temp = gfp_mask; |
| 241 | goto repeat_alloc; |
| 242 | } |
| 243 | |
| 244 | /* We must not sleep if !__GFP_WAIT */ |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 245 | if (!(gfp_mask & __GFP_WAIT)) { |
| 246 | spin_unlock_irqrestore(&pool->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | return NULL; |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 248 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 250 | /* Let's wait for someone else to return an element to @pool */ |
Benjamin LaHaise | 01890a4 | 2005-06-23 00:10:01 -0700 | [diff] [blame] | 251 | init_wait(&wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 254 | spin_unlock_irqrestore(&pool->lock, flags); |
| 255 | |
| 256 | /* |
| 257 | * FIXME: this should be io_schedule(). The timeout is there as a |
| 258 | * workaround for some DM problems in 2.6.18. |
| 259 | */ |
| 260 | io_schedule_timeout(5*HZ); |
| 261 | |
| 262 | finish_wait(&pool->wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | goto repeat_alloc; |
| 264 | } |
| 265 | EXPORT_SYMBOL(mempool_alloc); |
| 266 | |
| 267 | /** |
| 268 | * mempool_free - return an element to the pool. |
| 269 | * @element: pool element pointer. |
| 270 | * @pool: pointer to the memory pool which was allocated via |
| 271 | * mempool_create(). |
| 272 | * |
| 273 | * this function only sleeps if the free_fn() function sleeps. |
| 274 | */ |
| 275 | void mempool_free(void *element, mempool_t *pool) |
| 276 | { |
| 277 | unsigned long flags; |
| 278 | |
Rusty Russell | c80e7a8 | 2007-07-15 23:42:00 -0700 | [diff] [blame] | 279 | if (unlikely(element == NULL)) |
| 280 | return; |
| 281 | |
Tejun Heo | 5b99054 | 2012-01-10 15:08:23 -0800 | [diff] [blame] | 282 | /* |
| 283 | * Paired with the wmb in mempool_alloc(). The preceding read is |
| 284 | * for @element and the following @pool->curr_nr. This ensures |
| 285 | * that the visible value of @pool->curr_nr is from after the |
| 286 | * allocation of @element. This is necessary for fringe cases |
| 287 | * where @element was passed to this task without going through |
| 288 | * barriers. |
| 289 | * |
| 290 | * For example, assume @p is %NULL at the beginning and one task |
| 291 | * performs "p = mempool_alloc(...);" while another task is doing |
| 292 | * "while (!p) cpu_relax(); mempool_free(p, ...);". This function |
| 293 | * may end up using curr_nr value which is from before allocation |
| 294 | * of @p without the following rmb. |
| 295 | */ |
| 296 | smp_rmb(); |
| 297 | |
| 298 | /* |
| 299 | * For correctness, we need a test which is guaranteed to trigger |
| 300 | * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr |
| 301 | * without locking achieves that and refilling as soon as possible |
| 302 | * is desirable. |
| 303 | * |
| 304 | * Because curr_nr visible here is always a value after the |
| 305 | * allocation of @element, any task which decremented curr_nr below |
| 306 | * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets |
| 307 | * incremented to min_nr afterwards. If curr_nr gets incremented |
| 308 | * to min_nr after the allocation of @element, the elements |
| 309 | * allocated after that are subject to the same guarantee. |
| 310 | * |
| 311 | * Waiters happen iff curr_nr is 0 and the above guarantee also |
| 312 | * ensures that there will be frees which return elements to the |
| 313 | * pool waking up the waiters. |
| 314 | */ |
Mikulas Patocka | eb9a3c6 | 2014-04-07 15:37:35 -0700 | [diff] [blame] | 315 | if (unlikely(pool->curr_nr < pool->min_nr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | spin_lock_irqsave(&pool->lock, flags); |
Mikulas Patocka | eb9a3c6 | 2014-04-07 15:37:35 -0700 | [diff] [blame] | 317 | if (likely(pool->curr_nr < pool->min_nr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | add_element(pool, element); |
| 319 | spin_unlock_irqrestore(&pool->lock, flags); |
| 320 | wake_up(&pool->wait); |
| 321 | return; |
| 322 | } |
| 323 | spin_unlock_irqrestore(&pool->lock, flags); |
| 324 | } |
| 325 | pool->free(element, pool->pool_data); |
| 326 | } |
| 327 | EXPORT_SYMBOL(mempool_free); |
| 328 | |
| 329 | /* |
| 330 | * A commonly used alloc and free fn. |
| 331 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 332 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | { |
Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 334 | struct kmem_cache *mem = pool_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | return kmem_cache_alloc(mem, gfp_mask); |
| 336 | } |
| 337 | EXPORT_SYMBOL(mempool_alloc_slab); |
| 338 | |
| 339 | void mempool_free_slab(void *element, void *pool_data) |
| 340 | { |
Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 341 | struct kmem_cache *mem = pool_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | kmem_cache_free(mem, element); |
| 343 | } |
| 344 | EXPORT_SYMBOL(mempool_free_slab); |
Matthew Dobson | 6e0678f | 2006-03-26 01:37:44 -0800 | [diff] [blame] | 345 | |
| 346 | /* |
Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 347 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory |
Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 348 | * specified by pool_data |
Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 349 | */ |
| 350 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) |
| 351 | { |
Figo.zhang | 5e2f89b | 2009-08-08 21:01:22 +0800 | [diff] [blame] | 352 | size_t size = (size_t)pool_data; |
Matthew Dobson | 5318408 | 2006-03-26 01:37:46 -0800 | [diff] [blame] | 353 | return kmalloc(size, gfp_mask); |
| 354 | } |
| 355 | EXPORT_SYMBOL(mempool_kmalloc); |
| 356 | |
| 357 | void mempool_kfree(void *element, void *pool_data) |
| 358 | { |
| 359 | kfree(element); |
| 360 | } |
| 361 | EXPORT_SYMBOL(mempool_kfree); |
| 362 | |
| 363 | /* |
Matthew Dobson | 6e0678f | 2006-03-26 01:37:44 -0800 | [diff] [blame] | 364 | * A simple mempool-backed page allocator that allocates pages |
| 365 | * of the order specified by pool_data. |
| 366 | */ |
| 367 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) |
| 368 | { |
| 369 | int order = (int)(long)pool_data; |
| 370 | return alloc_pages(gfp_mask, order); |
| 371 | } |
| 372 | EXPORT_SYMBOL(mempool_alloc_pages); |
| 373 | |
| 374 | void mempool_free_pages(void *element, void *pool_data) |
| 375 | { |
| 376 | int order = (int)(long)pool_data; |
| 377 | __free_pages(element, order); |
| 378 | } |
| 379 | EXPORT_SYMBOL(mempool_free_pages); |