Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 1 | /* |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 2 | * Basic general purpose allocator for managing special purpose |
| 3 | * memory, for example, memory that is not managed by the regular |
| 4 | * kmalloc/kfree interface. Uses for this includes on-device special |
| 5 | * memory, uncached memory etc. |
| 6 | * |
| 7 | * It is safe to use the allocator in NMI handlers and other special |
| 8 | * unblockable contexts that could otherwise deadlock on locks. This |
| 9 | * is implemented by using atomic operations and retries on any |
| 10 | * conflicts. The disadvantage is that there may be livelocks in |
| 11 | * extreme cases. For better scalability, one allocator can be used |
| 12 | * for each CPU. |
| 13 | * |
| 14 | * The lockless operation only works if there is enough memory |
| 15 | * available. If new memory is added to the pool a lock has to be |
| 16 | * still taken. So any user relying on locklessness has to ensure |
| 17 | * that sufficient memory is preallocated. |
| 18 | * |
| 19 | * The basic atomic operation of this allocator is cmpxchg on long. |
| 20 | * On architectures that don't have NMI-safe cmpxchg implementation, |
| 21 | * the allocator can NOT be used in NMI handler. So code uses the |
| 22 | * allocator in NMI handler should depend on |
| 23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 24 | * |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
| 26 | * |
| 27 | * This source code is licensed under the GNU General Public License, |
| 28 | * Version 2. See the file COPYING for more details. |
| 29 | */ |
| 30 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 32 | #include <linux/export.h> |
Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 33 | #include <linux/bitmap.h> |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 34 | #include <linux/rculist.h> |
| 35 | #include <linux/interrupt.h> |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 36 | #include <linux/genalloc.h> |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 37 | #include <linux/of_address.h> |
| 38 | #include <linux/of_device.h> |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 39 | |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 40 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) |
| 41 | { |
| 42 | return chunk->end_addr - chunk->start_addr + 1; |
| 43 | } |
| 44 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 45 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) |
| 46 | { |
| 47 | unsigned long val, nval; |
| 48 | |
| 49 | nval = *addr; |
| 50 | do { |
| 51 | val = nval; |
| 52 | if (val & mask_to_set) |
| 53 | return -EBUSY; |
| 54 | cpu_relax(); |
| 55 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); |
| 56 | |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) |
| 61 | { |
| 62 | unsigned long val, nval; |
| 63 | |
| 64 | nval = *addr; |
| 65 | do { |
| 66 | val = nval; |
| 67 | if ((val & mask_to_clear) != mask_to_clear) |
| 68 | return -EBUSY; |
| 69 | cpu_relax(); |
| 70 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * bitmap_set_ll - set the specified number of bits at the specified position |
| 77 | * @map: pointer to a bitmap |
| 78 | * @start: a bit position in @map |
| 79 | * @nr: number of bits to set |
| 80 | * |
| 81 | * Set @nr bits start from @start in @map lock-lessly. Several users |
| 82 | * can set/clear the same bitmap simultaneously without lock. If two |
| 83 | * users set the same bit, one user will return remain bits, otherwise |
| 84 | * return 0. |
| 85 | */ |
| 86 | static int bitmap_set_ll(unsigned long *map, int start, int nr) |
| 87 | { |
| 88 | unsigned long *p = map + BIT_WORD(start); |
| 89 | const int size = start + nr; |
| 90 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); |
| 91 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); |
| 92 | |
| 93 | while (nr - bits_to_set >= 0) { |
| 94 | if (set_bits_ll(p, mask_to_set)) |
| 95 | return nr; |
| 96 | nr -= bits_to_set; |
| 97 | bits_to_set = BITS_PER_LONG; |
| 98 | mask_to_set = ~0UL; |
| 99 | p++; |
| 100 | } |
| 101 | if (nr) { |
| 102 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); |
| 103 | if (set_bits_ll(p, mask_to_set)) |
| 104 | return nr; |
| 105 | } |
| 106 | |
| 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * bitmap_clear_ll - clear the specified number of bits at the specified position |
| 112 | * @map: pointer to a bitmap |
| 113 | * @start: a bit position in @map |
| 114 | * @nr: number of bits to set |
| 115 | * |
| 116 | * Clear @nr bits start from @start in @map lock-lessly. Several users |
| 117 | * can set/clear the same bitmap simultaneously without lock. If two |
| 118 | * users clear the same bit, one user will return remain bits, |
| 119 | * otherwise return 0. |
| 120 | */ |
| 121 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) |
| 122 | { |
| 123 | unsigned long *p = map + BIT_WORD(start); |
| 124 | const int size = start + nr; |
| 125 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); |
| 126 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); |
| 127 | |
| 128 | while (nr - bits_to_clear >= 0) { |
| 129 | if (clear_bits_ll(p, mask_to_clear)) |
| 130 | return nr; |
| 131 | nr -= bits_to_clear; |
| 132 | bits_to_clear = BITS_PER_LONG; |
| 133 | mask_to_clear = ~0UL; |
| 134 | p++; |
| 135 | } |
| 136 | if (nr) { |
| 137 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); |
| 138 | if (clear_bits_ll(p, mask_to_clear)) |
| 139 | return nr; |
| 140 | } |
| 141 | |
| 142 | return 0; |
| 143 | } |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 144 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 145 | /** |
| 146 | * gen_pool_create - create a new special memory pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 147 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
| 148 | * @nid: node id of the node the pool structure should be allocated on, or -1 |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 149 | * |
| 150 | * Create a new special memory pool that can be used to manage special purpose |
| 151 | * memory not managed by the regular kmalloc/kfree interface. |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 152 | */ |
| 153 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 154 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 155 | struct gen_pool *pool; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 156 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 157 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
| 158 | if (pool != NULL) { |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 159 | spin_lock_init(&pool->lock); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 160 | INIT_LIST_HEAD(&pool->chunks); |
| 161 | pool->min_alloc_order = min_alloc_order; |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 162 | pool->algo = gen_pool_first_fit; |
| 163 | pool->data = NULL; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 164 | } |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 165 | return pool; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 166 | } |
| 167 | EXPORT_SYMBOL(gen_pool_create); |
| 168 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 169 | /** |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 170 | * gen_pool_add_virt - add a new chunk of special memory to the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 171 | * @pool: pool to add new memory chunk to |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 172 | * @virt: virtual starting address of memory chunk to add to pool |
| 173 | * @phys: physical starting address of memory chunk to add to pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 174 | * @size: size in bytes of the memory chunk to add to pool |
| 175 | * @nid: node id of the node the chunk structure and bitmap should be |
| 176 | * allocated on, or -1 |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 177 | * |
| 178 | * Add a new chunk of special memory to the specified pool. |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 179 | * |
| 180 | * Returns 0 on success or a -ve errno on failure. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 181 | */ |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 182 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
| 183 | size_t size, int nid) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 184 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 185 | struct gen_pool_chunk *chunk; |
| 186 | int nbits = size >> pool->min_alloc_order; |
| 187 | int nbytes = sizeof(struct gen_pool_chunk) + |
Thadeu Lima de Souza Cascardo | eedce14 | 2012-10-25 13:37:51 -0700 | [diff] [blame] | 188 | BITS_TO_LONGS(nbits) * sizeof(long); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 189 | |
Joe Perches | ade34a3 | 2013-09-11 14:23:06 -0700 | [diff] [blame] | 190 | chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 191 | if (unlikely(chunk == NULL)) |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 192 | return -ENOMEM; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 193 | |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 194 | chunk->phys_addr = phys; |
| 195 | chunk->start_addr = virt; |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 196 | chunk->end_addr = virt + size - 1; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 197 | atomic_set(&chunk->avail, size); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 198 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 199 | spin_lock(&pool->lock); |
| 200 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
| 201 | spin_unlock(&pool->lock); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 202 | |
| 203 | return 0; |
| 204 | } |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 205 | EXPORT_SYMBOL(gen_pool_add_virt); |
| 206 | |
| 207 | /** |
| 208 | * gen_pool_virt_to_phys - return the physical address of memory |
| 209 | * @pool: pool to allocate from |
| 210 | * @addr: starting address of memory |
| 211 | * |
| 212 | * Returns the physical address on success, or -1 on error. |
| 213 | */ |
| 214 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
| 215 | { |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 216 | struct gen_pool_chunk *chunk; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 217 | phys_addr_t paddr = -1; |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 218 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 219 | rcu_read_lock(); |
| 220 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 221 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 222 | paddr = chunk->phys_addr + (addr - chunk->start_addr); |
| 223 | break; |
| 224 | } |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 225 | } |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 226 | rcu_read_unlock(); |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 227 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 228 | return paddr; |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 229 | } |
| 230 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 231 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 232 | /** |
| 233 | * gen_pool_destroy - destroy a special memory pool |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 234 | * @pool: pool to destroy |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 235 | * |
| 236 | * Destroy the specified special memory pool. Verifies that there are no |
| 237 | * outstanding allocations. |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 238 | */ |
| 239 | void gen_pool_destroy(struct gen_pool *pool) |
| 240 | { |
| 241 | struct list_head *_chunk, *_next_chunk; |
| 242 | struct gen_pool_chunk *chunk; |
| 243 | int order = pool->min_alloc_order; |
| 244 | int bit, end_bit; |
| 245 | |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 246 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
| 247 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
| 248 | list_del(&chunk->next_chunk); |
| 249 | |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 250 | end_bit = chunk_size(chunk) >> order; |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 251 | bit = find_next_bit(chunk->bits, end_bit, 0); |
| 252 | BUG_ON(bit < end_bit); |
| 253 | |
| 254 | kfree(chunk); |
| 255 | } |
| 256 | kfree(pool); |
| 257 | return; |
| 258 | } |
| 259 | EXPORT_SYMBOL(gen_pool_destroy); |
| 260 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 261 | /** |
| 262 | * gen_pool_alloc - allocate special memory from the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 263 | * @pool: pool to allocate from |
| 264 | * @size: number of bytes to allocate from the pool |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 265 | * |
| 266 | * Allocate the requested number of bytes from the specified pool. |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 267 | * Uses the pool allocation function (with first-fit algorithm by default). |
| 268 | * Can not be used in NMI handler on architectures without |
| 269 | * NMI-safe cmpxchg implementation. |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 270 | */ |
| 271 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
| 272 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 273 | struct gen_pool_chunk *chunk; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 274 | unsigned long addr = 0; |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 275 | int order = pool->min_alloc_order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 276 | int nbits, start_bit = 0, end_bit, remain; |
| 277 | |
| 278 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 279 | BUG_ON(in_nmi()); |
| 280 | #endif |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 281 | |
| 282 | if (size == 0) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 283 | return 0; |
| 284 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 285 | nbits = (size + (1UL << order) - 1) >> order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 286 | rcu_read_lock(); |
| 287 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
| 288 | if (size > atomic_read(&chunk->avail)) |
| 289 | continue; |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 290 | |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 291 | end_bit = chunk_size(chunk) >> order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 292 | retry: |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 293 | start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, |
| 294 | pool->data); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 295 | if (start_bit >= end_bit) |
Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 296 | continue; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 297 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
| 298 | if (remain) { |
| 299 | remain = bitmap_clear_ll(chunk->bits, start_bit, |
| 300 | nbits - remain); |
| 301 | BUG_ON(remain); |
| 302 | goto retry; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 303 | } |
Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 304 | |
| 305 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 306 | size = nbits << order; |
| 307 | atomic_sub(size, &chunk->avail); |
| 308 | break; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 309 | } |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 310 | rcu_read_unlock(); |
| 311 | return addr; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 312 | } |
| 313 | EXPORT_SYMBOL(gen_pool_alloc); |
| 314 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 315 | /** |
Nicolin Chen | 684f0d3 | 2013-11-12 15:09:52 -0800 | [diff] [blame] | 316 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
| 317 | * @pool: pool to allocate from |
| 318 | * @size: number of bytes to allocate from the pool |
Lad, Prabhakar | 0368dfd | 2014-01-29 14:05:37 -0800 | [diff] [blame] | 319 | * @dma: dma-view physical address return value. Use NULL if unneeded. |
Nicolin Chen | 684f0d3 | 2013-11-12 15:09:52 -0800 | [diff] [blame] | 320 | * |
| 321 | * Allocate the requested number of bytes from the specified pool. |
| 322 | * Uses the pool allocation function (with first-fit algorithm by default). |
| 323 | * Can not be used in NMI handler on architectures without |
| 324 | * NMI-safe cmpxchg implementation. |
| 325 | */ |
| 326 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) |
| 327 | { |
| 328 | unsigned long vaddr; |
| 329 | |
| 330 | if (!pool) |
| 331 | return NULL; |
| 332 | |
| 333 | vaddr = gen_pool_alloc(pool, size); |
| 334 | if (!vaddr) |
| 335 | return NULL; |
| 336 | |
Lad, Prabhakar | 0368dfd | 2014-01-29 14:05:37 -0800 | [diff] [blame] | 337 | if (dma) |
| 338 | *dma = gen_pool_virt_to_phys(pool, vaddr); |
Nicolin Chen | 684f0d3 | 2013-11-12 15:09:52 -0800 | [diff] [blame] | 339 | |
| 340 | return (void *)vaddr; |
| 341 | } |
| 342 | EXPORT_SYMBOL(gen_pool_dma_alloc); |
| 343 | |
| 344 | /** |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 345 | * gen_pool_free - free allocated special memory back to the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 346 | * @pool: pool to free to |
| 347 | * @addr: starting address of memory to free back to pool |
| 348 | * @size: size in bytes of memory to free |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 349 | * |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 350 | * Free previously allocated special memory back to the specified |
| 351 | * pool. Can not be used in NMI handler on architectures without |
| 352 | * NMI-safe cmpxchg implementation. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 353 | */ |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 354 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 355 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 356 | struct gen_pool_chunk *chunk; |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 357 | int order = pool->min_alloc_order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 358 | int start_bit, nbits, remain; |
| 359 | |
| 360 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 361 | BUG_ON(in_nmi()); |
| 362 | #endif |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 363 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 364 | nbits = (size + (1UL << order) - 1) >> order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 365 | rcu_read_lock(); |
| 366 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 367 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
| 368 | BUG_ON(addr + size - 1 > chunk->end_addr); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 369 | start_bit = (addr - chunk->start_addr) >> order; |
| 370 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
| 371 | BUG_ON(remain); |
| 372 | size = nbits << order; |
| 373 | atomic_add(size, &chunk->avail); |
| 374 | rcu_read_unlock(); |
| 375 | return; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 376 | } |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 377 | } |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 378 | rcu_read_unlock(); |
| 379 | BUG(); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 380 | } |
| 381 | EXPORT_SYMBOL(gen_pool_free); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 382 | |
| 383 | /** |
| 384 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool |
| 385 | * @pool: the generic memory pool |
| 386 | * @func: func to call |
| 387 | * @data: additional data used by @func |
| 388 | * |
| 389 | * Call @func for every chunk of generic memory pool. The @func is |
| 390 | * called with rcu_read_lock held. |
| 391 | */ |
| 392 | void gen_pool_for_each_chunk(struct gen_pool *pool, |
| 393 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), |
| 394 | void *data) |
| 395 | { |
| 396 | struct gen_pool_chunk *chunk; |
| 397 | |
| 398 | rcu_read_lock(); |
| 399 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) |
| 400 | func(pool, chunk, data); |
| 401 | rcu_read_unlock(); |
| 402 | } |
| 403 | EXPORT_SYMBOL(gen_pool_for_each_chunk); |
| 404 | |
| 405 | /** |
Laura Abbott | 9efb3a4 | 2014-10-09 15:26:38 -0700 | [diff] [blame] | 406 | * addr_in_gen_pool - checks if an address falls within the range of a pool |
| 407 | * @pool: the generic memory pool |
| 408 | * @start: start address |
| 409 | * @size: size of the region |
| 410 | * |
| 411 | * Check if the range of addresses falls within the specified pool. Returns |
| 412 | * true if the entire range is contained in the pool and false otherwise. |
| 413 | */ |
| 414 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, |
| 415 | size_t size) |
| 416 | { |
| 417 | bool found = false; |
| 418 | unsigned long end = start + size; |
| 419 | struct gen_pool_chunk *chunk; |
| 420 | |
| 421 | rcu_read_lock(); |
| 422 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { |
| 423 | if (start >= chunk->start_addr && start <= chunk->end_addr) { |
| 424 | if (end <= chunk->end_addr) { |
| 425 | found = true; |
| 426 | break; |
| 427 | } |
| 428 | } |
| 429 | } |
| 430 | rcu_read_unlock(); |
| 431 | return found; |
| 432 | } |
| 433 | |
| 434 | /** |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 435 | * gen_pool_avail - get available free space of the pool |
| 436 | * @pool: pool to get available free space |
| 437 | * |
| 438 | * Return available free space of the specified pool. |
| 439 | */ |
| 440 | size_t gen_pool_avail(struct gen_pool *pool) |
| 441 | { |
| 442 | struct gen_pool_chunk *chunk; |
| 443 | size_t avail = 0; |
| 444 | |
| 445 | rcu_read_lock(); |
| 446 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
| 447 | avail += atomic_read(&chunk->avail); |
| 448 | rcu_read_unlock(); |
| 449 | return avail; |
| 450 | } |
| 451 | EXPORT_SYMBOL_GPL(gen_pool_avail); |
| 452 | |
| 453 | /** |
| 454 | * gen_pool_size - get size in bytes of memory managed by the pool |
| 455 | * @pool: pool to get size |
| 456 | * |
| 457 | * Return size in bytes of memory managed by the pool. |
| 458 | */ |
| 459 | size_t gen_pool_size(struct gen_pool *pool) |
| 460 | { |
| 461 | struct gen_pool_chunk *chunk; |
| 462 | size_t size = 0; |
| 463 | |
| 464 | rcu_read_lock(); |
| 465 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 466 | size += chunk_size(chunk); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 467 | rcu_read_unlock(); |
| 468 | return size; |
| 469 | } |
| 470 | EXPORT_SYMBOL_GPL(gen_pool_size); |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 471 | |
| 472 | /** |
| 473 | * gen_pool_set_algo - set the allocation algorithm |
| 474 | * @pool: pool to change allocation algorithm |
| 475 | * @algo: custom algorithm function |
| 476 | * @data: additional data used by @algo |
| 477 | * |
| 478 | * Call @algo for each memory allocation in the pool. |
| 479 | * If @algo is NULL use gen_pool_first_fit as default |
| 480 | * memory allocation function. |
| 481 | */ |
| 482 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) |
| 483 | { |
| 484 | rcu_read_lock(); |
| 485 | |
| 486 | pool->algo = algo; |
| 487 | if (!pool->algo) |
| 488 | pool->algo = gen_pool_first_fit; |
| 489 | |
| 490 | pool->data = data; |
| 491 | |
| 492 | rcu_read_unlock(); |
| 493 | } |
| 494 | EXPORT_SYMBOL(gen_pool_set_algo); |
| 495 | |
| 496 | /** |
| 497 | * gen_pool_first_fit - find the first available region |
| 498 | * of memory matching the size requirement (no alignment constraint) |
| 499 | * @map: The address to base the search on |
| 500 | * @size: The bitmap size in bits |
| 501 | * @start: The bitnumber to start searching at |
| 502 | * @nr: The number of zeroed bits we're looking for |
| 503 | * @data: additional data - unused |
| 504 | */ |
| 505 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, |
| 506 | unsigned long start, unsigned int nr, void *data) |
| 507 | { |
| 508 | return bitmap_find_next_zero_area(map, size, start, nr, 0); |
| 509 | } |
| 510 | EXPORT_SYMBOL(gen_pool_first_fit); |
| 511 | |
| 512 | /** |
Laura Abbott | 505e3be | 2014-10-09 15:26:35 -0700 | [diff] [blame] | 513 | * gen_pool_first_fit_order_align - find the first available region |
| 514 | * of memory matching the size requirement. The region will be aligned |
| 515 | * to the order of the size specified. |
| 516 | * @map: The address to base the search on |
| 517 | * @size: The bitmap size in bits |
| 518 | * @start: The bitnumber to start searching at |
| 519 | * @nr: The number of zeroed bits we're looking for |
| 520 | * @data: additional data - unused |
| 521 | */ |
| 522 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, |
| 523 | unsigned long size, unsigned long start, |
| 524 | unsigned int nr, void *data) |
| 525 | { |
| 526 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; |
| 527 | |
| 528 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
| 529 | } |
| 530 | EXPORT_SYMBOL(gen_pool_first_fit_order_align); |
| 531 | |
| 532 | /** |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 533 | * gen_pool_best_fit - find the best fitting region of memory |
| 534 | * macthing the size requirement (no alignment constraint) |
| 535 | * @map: The address to base the search on |
| 536 | * @size: The bitmap size in bits |
| 537 | * @start: The bitnumber to start searching at |
| 538 | * @nr: The number of zeroed bits we're looking for |
| 539 | * @data: additional data - unused |
| 540 | * |
| 541 | * Iterate over the bitmap to find the smallest free region |
| 542 | * which we can allocate the memory. |
| 543 | */ |
| 544 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, |
| 545 | unsigned long start, unsigned int nr, void *data) |
| 546 | { |
| 547 | unsigned long start_bit = size; |
| 548 | unsigned long len = size + 1; |
| 549 | unsigned long index; |
| 550 | |
| 551 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); |
| 552 | |
| 553 | while (index < size) { |
| 554 | int next_bit = find_next_bit(map, size, index + nr); |
| 555 | if ((next_bit - index) < len) { |
| 556 | len = next_bit - index; |
| 557 | start_bit = index; |
| 558 | if (len == nr) |
| 559 | return start_bit; |
| 560 | } |
| 561 | index = bitmap_find_next_zero_area(map, size, |
| 562 | next_bit + 1, nr, 0); |
| 563 | } |
| 564 | |
| 565 | return start_bit; |
| 566 | } |
| 567 | EXPORT_SYMBOL(gen_pool_best_fit); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 568 | |
| 569 | static void devm_gen_pool_release(struct device *dev, void *res) |
| 570 | { |
| 571 | gen_pool_destroy(*(struct gen_pool **)res); |
| 572 | } |
| 573 | |
| 574 | /** |
| 575 | * devm_gen_pool_create - managed gen_pool_create |
| 576 | * @dev: device that provides the gen_pool |
| 577 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
| 578 | * @nid: node id of the node the pool structure should be allocated on, or -1 |
| 579 | * |
| 580 | * Create a new special memory pool that can be used to manage special purpose |
| 581 | * memory not managed by the regular kmalloc/kfree interface. The pool will be |
| 582 | * automatically destroyed by the device management code. |
| 583 | */ |
| 584 | struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, |
| 585 | int nid) |
| 586 | { |
| 587 | struct gen_pool **ptr, *pool; |
| 588 | |
| 589 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); |
| 590 | |
| 591 | pool = gen_pool_create(min_alloc_order, nid); |
| 592 | if (pool) { |
| 593 | *ptr = pool; |
| 594 | devres_add(dev, ptr); |
| 595 | } else { |
| 596 | devres_free(ptr); |
| 597 | } |
| 598 | |
| 599 | return pool; |
| 600 | } |
Michal Simek | b724aa2 | 2014-12-02 15:59:45 -0800 | [diff] [blame] | 601 | EXPORT_SYMBOL(devm_gen_pool_create); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 602 | |
| 603 | /** |
| 604 | * dev_get_gen_pool - Obtain the gen_pool (if any) for a device |
| 605 | * @dev: device to retrieve the gen_pool from |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 606 | * |
| 607 | * Returns the gen_pool for the device if one is present, or NULL. |
| 608 | */ |
| 609 | struct gen_pool *dev_get_gen_pool(struct device *dev) |
| 610 | { |
| 611 | struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, |
| 612 | NULL); |
| 613 | |
| 614 | if (!p) |
| 615 | return NULL; |
| 616 | return *p; |
| 617 | } |
| 618 | EXPORT_SYMBOL_GPL(dev_get_gen_pool); |
| 619 | |
| 620 | #ifdef CONFIG_OF |
| 621 | /** |
| 622 | * of_get_named_gen_pool - find a pool by phandle property |
| 623 | * @np: device node |
| 624 | * @propname: property name containing phandle(s) |
| 625 | * @index: index into the phandle array |
| 626 | * |
| 627 | * Returns the pool that contains the chunk starting at the physical |
| 628 | * address of the device tree node pointed at by the phandle property, |
| 629 | * or NULL if not found. |
| 630 | */ |
| 631 | struct gen_pool *of_get_named_gen_pool(struct device_node *np, |
| 632 | const char *propname, int index) |
| 633 | { |
| 634 | struct platform_device *pdev; |
| 635 | struct device_node *np_pool; |
| 636 | |
| 637 | np_pool = of_parse_phandle(np, propname, index); |
| 638 | if (!np_pool) |
| 639 | return NULL; |
| 640 | pdev = of_find_device_by_node(np_pool); |
Vladimir Zapolskiy | 6f3aabd | 2014-09-25 16:05:25 -0700 | [diff] [blame] | 641 | of_node_put(np_pool); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 642 | if (!pdev) |
| 643 | return NULL; |
| 644 | return dev_get_gen_pool(&pdev->dev); |
| 645 | } |
| 646 | EXPORT_SYMBOL_GPL(of_get_named_gen_pool); |
| 647 | #endif /* CONFIG_OF */ |