Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Basic general purpose allocator for managing special purpose memory |
| 3 | * not managed by the regular kmalloc/kfree interface. |
| 4 | * Uses for this includes on-device special memory, uncached memory |
| 5 | * etc. |
| 6 | * |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 7 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
| 8 | * |
| 9 | * This source code is licensed under the GNU General Public License, |
| 10 | * Version 2. See the file COPYING for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 14 | #include <linux/genalloc.h> |
| 15 | |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 16 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 17 | /** |
| 18 | * gen_pool_create - create a new special memory pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 19 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
| 20 | * @nid: node id of the node the pool structure should be allocated on, or -1 |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 21 | * |
| 22 | * Create a new special memory pool that can be used to manage special purpose |
| 23 | * memory not managed by the regular kmalloc/kfree interface. |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 24 | */ |
| 25 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 26 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 27 | struct gen_pool *pool; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 28 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 29 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
| 30 | if (pool != NULL) { |
| 31 | rwlock_init(&pool->lock); |
| 32 | INIT_LIST_HEAD(&pool->chunks); |
| 33 | pool->min_alloc_order = min_alloc_order; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 34 | } |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 35 | return pool; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 36 | } |
| 37 | EXPORT_SYMBOL(gen_pool_create); |
| 38 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 39 | /** |
| 40 | * gen_pool_add - add a new chunk of special memory to the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 41 | * @pool: pool to add new memory chunk to |
| 42 | * @addr: starting address of memory chunk to add to pool |
| 43 | * @size: size in bytes of the memory chunk to add to pool |
| 44 | * @nid: node id of the node the chunk structure and bitmap should be |
| 45 | * allocated on, or -1 |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 46 | * |
| 47 | * Add a new chunk of special memory to the specified pool. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 48 | */ |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 49 | int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, |
| 50 | int nid) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 51 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 52 | struct gen_pool_chunk *chunk; |
| 53 | int nbits = size >> pool->min_alloc_order; |
| 54 | int nbytes = sizeof(struct gen_pool_chunk) + |
| 55 | (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 56 | |
Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 57 | chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 58 | if (unlikely(chunk == NULL)) |
| 59 | return -1; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 60 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 61 | spin_lock_init(&chunk->lock); |
| 62 | chunk->start_addr = addr; |
| 63 | chunk->end_addr = addr + size; |
| 64 | |
| 65 | write_lock(&pool->lock); |
| 66 | list_add(&chunk->next_chunk, &pool->chunks); |
| 67 | write_unlock(&pool->lock); |
| 68 | |
| 69 | return 0; |
| 70 | } |
| 71 | EXPORT_SYMBOL(gen_pool_add); |
| 72 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 73 | /** |
| 74 | * gen_pool_destroy - destroy a special memory pool |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 75 | * @pool: pool to destroy |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 76 | * |
| 77 | * Destroy the specified special memory pool. Verifies that there are no |
| 78 | * outstanding allocations. |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 79 | */ |
| 80 | void gen_pool_destroy(struct gen_pool *pool) |
| 81 | { |
| 82 | struct list_head *_chunk, *_next_chunk; |
| 83 | struct gen_pool_chunk *chunk; |
| 84 | int order = pool->min_alloc_order; |
| 85 | int bit, end_bit; |
| 86 | |
| 87 | |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 88 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
| 89 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
| 90 | list_del(&chunk->next_chunk); |
| 91 | |
| 92 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
| 93 | bit = find_next_bit(chunk->bits, end_bit, 0); |
| 94 | BUG_ON(bit < end_bit); |
| 95 | |
| 96 | kfree(chunk); |
| 97 | } |
| 98 | kfree(pool); |
| 99 | return; |
| 100 | } |
| 101 | EXPORT_SYMBOL(gen_pool_destroy); |
| 102 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 103 | /** |
| 104 | * gen_pool_alloc - allocate special memory from the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 105 | * @pool: pool to allocate from |
| 106 | * @size: number of bytes to allocate from the pool |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 107 | * |
| 108 | * Allocate the requested number of bytes from the specified pool. |
| 109 | * Uses a first-fit algorithm. |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 110 | */ |
| 111 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
| 112 | { |
| 113 | struct list_head *_chunk; |
| 114 | struct gen_pool_chunk *chunk; |
| 115 | unsigned long addr, flags; |
| 116 | int order = pool->min_alloc_order; |
| 117 | int nbits, bit, start_bit, end_bit; |
| 118 | |
| 119 | if (size == 0) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 120 | return 0; |
| 121 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 122 | nbits = (size + (1UL << order) - 1) >> order; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 123 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 124 | read_lock(&pool->lock); |
| 125 | list_for_each(_chunk, &pool->chunks) { |
| 126 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
| 127 | |
| 128 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
| 129 | end_bit -= nbits + 1; |
| 130 | |
| 131 | spin_lock_irqsave(&chunk->lock, flags); |
| 132 | bit = -1; |
| 133 | while (bit + 1 < end_bit) { |
| 134 | bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); |
| 135 | if (bit >= end_bit) |
| 136 | break; |
| 137 | |
| 138 | start_bit = bit; |
| 139 | if (nbits > 1) { |
| 140 | bit = find_next_bit(chunk->bits, bit + nbits, |
| 141 | bit + 1); |
| 142 | if (bit - start_bit < nbits) |
| 143 | continue; |
| 144 | } |
| 145 | |
| 146 | addr = chunk->start_addr + |
| 147 | ((unsigned long)start_bit << order); |
| 148 | while (nbits--) |
Andrew Morton | 96c62d5 | 2007-02-20 13:58:12 -0800 | [diff] [blame] | 149 | __set_bit(start_bit++, chunk->bits); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 150 | spin_unlock_irqrestore(&chunk->lock, flags); |
| 151 | read_unlock(&pool->lock); |
| 152 | return addr; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 153 | } |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 154 | spin_unlock_irqrestore(&chunk->lock, flags); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 155 | } |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 156 | read_unlock(&pool->lock); |
| 157 | return 0; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 158 | } |
| 159 | EXPORT_SYMBOL(gen_pool_alloc); |
| 160 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 161 | /** |
| 162 | * gen_pool_free - free allocated special memory back to the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 163 | * @pool: pool to free to |
| 164 | * @addr: starting address of memory to free back to pool |
| 165 | * @size: size in bytes of memory to free |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 166 | * |
| 167 | * Free previously allocated special memory back to the specified pool. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 168 | */ |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 169 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 170 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 171 | struct list_head *_chunk; |
| 172 | struct gen_pool_chunk *chunk; |
| 173 | unsigned long flags; |
| 174 | int order = pool->min_alloc_order; |
| 175 | int bit, nbits; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 176 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 177 | nbits = (size + (1UL << order) - 1) >> order; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 178 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 179 | read_lock(&pool->lock); |
| 180 | list_for_each(_chunk, &pool->chunks) { |
| 181 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 182 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 183 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
| 184 | BUG_ON(addr + size > chunk->end_addr); |
| 185 | spin_lock_irqsave(&chunk->lock, flags); |
| 186 | bit = (addr - chunk->start_addr) >> order; |
| 187 | while (nbits--) |
Andrew Morton | 96c62d5 | 2007-02-20 13:58:12 -0800 | [diff] [blame] | 188 | __clear_bit(bit++, chunk->bits); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 189 | spin_unlock_irqrestore(&chunk->lock, flags); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 190 | break; |
| 191 | } |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 192 | } |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 193 | BUG_ON(nbits > 0); |
| 194 | read_unlock(&pool->lock); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 195 | } |
| 196 | EXPORT_SYMBOL(gen_pool_free); |