Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/mempool.c |
| 3 | * |
| 4 | * memory buffer pool support. Such pools are mostly used |
| 5 | * for guaranteed, deadlock-free memory allocations during |
| 6 | * extreme VM load. |
| 7 | * |
| 8 | * started by Ingo Molnar, Copyright (C) 2001 |
| 9 | */ |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/mempool.h> |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/writeback.h> |
| 17 | |
| 18 | static void add_element(mempool_t *pool, void *element) |
| 19 | { |
| 20 | BUG_ON(pool->curr_nr >= pool->min_nr); |
| 21 | pool->elements[pool->curr_nr++] = element; |
| 22 | } |
| 23 | |
| 24 | static void *remove_element(mempool_t *pool) |
| 25 | { |
| 26 | BUG_ON(pool->curr_nr <= 0); |
| 27 | return pool->elements[--pool->curr_nr]; |
| 28 | } |
| 29 | |
| 30 | static void free_pool(mempool_t *pool) |
| 31 | { |
| 32 | while (pool->curr_nr) { |
| 33 | void *element = remove_element(pool); |
| 34 | pool->free(element, pool->pool_data); |
| 35 | } |
| 36 | kfree(pool->elements); |
| 37 | kfree(pool); |
| 38 | } |
| 39 | |
| 40 | /** |
| 41 | * mempool_create - create a memory pool |
| 42 | * @min_nr: the minimum number of elements guaranteed to be |
| 43 | * allocated for this pool. |
| 44 | * @alloc_fn: user-defined element-allocation function. |
| 45 | * @free_fn: user-defined element-freeing function. |
| 46 | * @pool_data: optional private data available to the user-defined functions. |
| 47 | * |
| 48 | * this function creates and allocates a guaranteed size, preallocated |
| 49 | * memory pool. The pool can be used from the mempool_alloc and mempool_free |
| 50 | * functions. This function might sleep. Both the alloc_fn() and the free_fn() |
| 51 | * functions might sleep - as long as the mempool_alloc function is not called |
| 52 | * from IRQ contexts. |
| 53 | */ |
| 54 | mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
| 55 | mempool_free_t *free_fn, void *pool_data) |
| 56 | { |
| 57 | mempool_t *pool; |
| 58 | |
| 59 | pool = kmalloc(sizeof(*pool), GFP_KERNEL); |
| 60 | if (!pool) |
| 61 | return NULL; |
| 62 | memset(pool, 0, sizeof(*pool)); |
| 63 | pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL); |
| 64 | if (!pool->elements) { |
| 65 | kfree(pool); |
| 66 | return NULL; |
| 67 | } |
| 68 | spin_lock_init(&pool->lock); |
| 69 | pool->min_nr = min_nr; |
| 70 | pool->pool_data = pool_data; |
| 71 | init_waitqueue_head(&pool->wait); |
| 72 | pool->alloc = alloc_fn; |
| 73 | pool->free = free_fn; |
| 74 | |
| 75 | /* |
| 76 | * First pre-allocate the guaranteed number of buffers. |
| 77 | */ |
| 78 | while (pool->curr_nr < pool->min_nr) { |
| 79 | void *element; |
| 80 | |
| 81 | element = pool->alloc(GFP_KERNEL, pool->pool_data); |
| 82 | if (unlikely(!element)) { |
| 83 | free_pool(pool); |
| 84 | return NULL; |
| 85 | } |
| 86 | add_element(pool, element); |
| 87 | } |
| 88 | return pool; |
| 89 | } |
| 90 | EXPORT_SYMBOL(mempool_create); |
| 91 | |
| 92 | /** |
| 93 | * mempool_resize - resize an existing memory pool |
| 94 | * @pool: pointer to the memory pool which was allocated via |
| 95 | * mempool_create(). |
| 96 | * @new_min_nr: the new minimum number of elements guaranteed to be |
| 97 | * allocated for this pool. |
| 98 | * @gfp_mask: the usual allocation bitmask. |
| 99 | * |
| 100 | * This function shrinks/grows the pool. In the case of growing, |
| 101 | * it cannot be guaranteed that the pool will be grown to the new |
| 102 | * size immediately, but new mempool_free() calls will refill it. |
| 103 | * |
| 104 | * Note, the caller must guarantee that no mempool_destroy is called |
| 105 | * while this function is running. mempool_alloc() & mempool_free() |
| 106 | * might be called (eg. from IRQ contexts) while this function executes. |
| 107 | */ |
| 108 | int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) |
| 109 | { |
| 110 | void *element; |
| 111 | void **new_elements; |
| 112 | unsigned long flags; |
| 113 | |
| 114 | BUG_ON(new_min_nr <= 0); |
| 115 | |
| 116 | spin_lock_irqsave(&pool->lock, flags); |
| 117 | if (new_min_nr <= pool->min_nr) { |
| 118 | while (new_min_nr < pool->curr_nr) { |
| 119 | element = remove_element(pool); |
| 120 | spin_unlock_irqrestore(&pool->lock, flags); |
| 121 | pool->free(element, pool->pool_data); |
| 122 | spin_lock_irqsave(&pool->lock, flags); |
| 123 | } |
| 124 | pool->min_nr = new_min_nr; |
| 125 | goto out_unlock; |
| 126 | } |
| 127 | spin_unlock_irqrestore(&pool->lock, flags); |
| 128 | |
| 129 | /* Grow the pool */ |
| 130 | new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); |
| 131 | if (!new_elements) |
| 132 | return -ENOMEM; |
| 133 | |
| 134 | spin_lock_irqsave(&pool->lock, flags); |
| 135 | if (unlikely(new_min_nr <= pool->min_nr)) { |
| 136 | /* Raced, other resize will do our work */ |
| 137 | spin_unlock_irqrestore(&pool->lock, flags); |
| 138 | kfree(new_elements); |
| 139 | goto out; |
| 140 | } |
| 141 | memcpy(new_elements, pool->elements, |
| 142 | pool->curr_nr * sizeof(*new_elements)); |
| 143 | kfree(pool->elements); |
| 144 | pool->elements = new_elements; |
| 145 | pool->min_nr = new_min_nr; |
| 146 | |
| 147 | while (pool->curr_nr < pool->min_nr) { |
| 148 | spin_unlock_irqrestore(&pool->lock, flags); |
| 149 | element = pool->alloc(gfp_mask, pool->pool_data); |
| 150 | if (!element) |
| 151 | goto out; |
| 152 | spin_lock_irqsave(&pool->lock, flags); |
| 153 | if (pool->curr_nr < pool->min_nr) { |
| 154 | add_element(pool, element); |
| 155 | } else { |
| 156 | spin_unlock_irqrestore(&pool->lock, flags); |
| 157 | pool->free(element, pool->pool_data); /* Raced */ |
| 158 | goto out; |
| 159 | } |
| 160 | } |
| 161 | out_unlock: |
| 162 | spin_unlock_irqrestore(&pool->lock, flags); |
| 163 | out: |
| 164 | return 0; |
| 165 | } |
| 166 | EXPORT_SYMBOL(mempool_resize); |
| 167 | |
| 168 | /** |
| 169 | * mempool_destroy - deallocate a memory pool |
| 170 | * @pool: pointer to the memory pool which was allocated via |
| 171 | * mempool_create(). |
| 172 | * |
| 173 | * this function only sleeps if the free_fn() function sleeps. The caller |
| 174 | * has to guarantee that all elements have been returned to the pool (ie: |
| 175 | * freed) prior to calling mempool_destroy(). |
| 176 | */ |
| 177 | void mempool_destroy(mempool_t *pool) |
| 178 | { |
| 179 | if (pool->curr_nr != pool->min_nr) |
| 180 | BUG(); /* There were outstanding elements */ |
| 181 | free_pool(pool); |
| 182 | } |
| 183 | EXPORT_SYMBOL(mempool_destroy); |
| 184 | |
| 185 | /** |
| 186 | * mempool_alloc - allocate an element from a specific memory pool |
| 187 | * @pool: pointer to the memory pool which was allocated via |
| 188 | * mempool_create(). |
| 189 | * @gfp_mask: the usual allocation bitmask. |
| 190 | * |
| 191 | * this function only sleeps if the alloc_fn function sleeps or |
| 192 | * returns NULL. Note that due to preallocation, this function |
| 193 | * *never* fails when called from process contexts. (it might |
| 194 | * fail if called from an IRQ context.) |
| 195 | */ |
| 196 | void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) |
| 197 | { |
| 198 | void *element; |
| 199 | unsigned long flags; |
| 200 | DEFINE_WAIT(wait); |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 201 | int gfp_temp; |
| 202 | |
| 203 | might_sleep_if(gfp_mask & __GFP_WAIT); |
Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 204 | |
| 205 | gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ |
| 206 | gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ |
| 207 | gfp_mask |= __GFP_NOWARN; /* failures are OK */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 209 | gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); |
| 210 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | repeat_alloc: |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 212 | |
| 213 | element = pool->alloc(gfp_temp, pool->pool_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | if (likely(element != NULL)) |
| 215 | return element; |
| 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | spin_lock_irqsave(&pool->lock, flags); |
| 218 | if (likely(pool->curr_nr)) { |
| 219 | element = remove_element(pool); |
| 220 | spin_unlock_irqrestore(&pool->lock, flags); |
| 221 | return element; |
| 222 | } |
| 223 | spin_unlock_irqrestore(&pool->lock, flags); |
| 224 | |
| 225 | /* We must not sleep in the GFP_ATOMIC case */ |
| 226 | if (!(gfp_mask & __GFP_WAIT)) |
| 227 | return NULL; |
| 228 | |
Nick Piggin | 20a7777 | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 229 | /* Now start performing page reclaim */ |
| 230 | gfp_temp = gfp_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); |
akpm@osdl.org | d59dd46 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 232 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | if (!pool->curr_nr) |
| 234 | io_schedule(); |
| 235 | finish_wait(&pool->wait, &wait); |
| 236 | |
| 237 | goto repeat_alloc; |
| 238 | } |
| 239 | EXPORT_SYMBOL(mempool_alloc); |
| 240 | |
| 241 | /** |
| 242 | * mempool_free - return an element to the pool. |
| 243 | * @element: pool element pointer. |
| 244 | * @pool: pointer to the memory pool which was allocated via |
| 245 | * mempool_create(). |
| 246 | * |
| 247 | * this function only sleeps if the free_fn() function sleeps. |
| 248 | */ |
| 249 | void mempool_free(void *element, mempool_t *pool) |
| 250 | { |
| 251 | unsigned long flags; |
| 252 | |
akpm@osdl.org | d59dd46 | 2005-05-01 08:58:47 -0700 | [diff] [blame] | 253 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | if (pool->curr_nr < pool->min_nr) { |
| 255 | spin_lock_irqsave(&pool->lock, flags); |
| 256 | if (pool->curr_nr < pool->min_nr) { |
| 257 | add_element(pool, element); |
| 258 | spin_unlock_irqrestore(&pool->lock, flags); |
| 259 | wake_up(&pool->wait); |
| 260 | return; |
| 261 | } |
| 262 | spin_unlock_irqrestore(&pool->lock, flags); |
| 263 | } |
| 264 | pool->free(element, pool->pool_data); |
| 265 | } |
| 266 | EXPORT_SYMBOL(mempool_free); |
| 267 | |
| 268 | /* |
| 269 | * A commonly used alloc and free fn. |
| 270 | */ |
| 271 | void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) |
| 272 | { |
| 273 | kmem_cache_t *mem = (kmem_cache_t *) pool_data; |
| 274 | return kmem_cache_alloc(mem, gfp_mask); |
| 275 | } |
| 276 | EXPORT_SYMBOL(mempool_alloc_slab); |
| 277 | |
| 278 | void mempool_free_slab(void *element, void *pool_data) |
| 279 | { |
| 280 | kmem_cache_t *mem = (kmem_cache_t *) pool_data; |
| 281 | kmem_cache_free(mem, element); |
| 282 | } |
| 283 | EXPORT_SYMBOL(mempool_free_slab); |