Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 1 | /* |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 2 | * Basic general purpose allocator for managing special purpose |
| 3 | * memory, for example, memory that is not managed by the regular |
| 4 | * kmalloc/kfree interface. Uses for this includes on-device special |
| 5 | * memory, uncached memory etc. |
| 6 | * |
| 7 | * It is safe to use the allocator in NMI handlers and other special |
| 8 | * unblockable contexts that could otherwise deadlock on locks. This |
| 9 | * is implemented by using atomic operations and retries on any |
| 10 | * conflicts. The disadvantage is that there may be livelocks in |
| 11 | * extreme cases. For better scalability, one allocator can be used |
| 12 | * for each CPU. |
| 13 | * |
| 14 | * The lockless operation only works if there is enough memory |
| 15 | * available. If new memory is added to the pool a lock has to be |
| 16 | * still taken. So any user relying on locklessness has to ensure |
| 17 | * that sufficient memory is preallocated. |
| 18 | * |
| 19 | * The basic atomic operation of this allocator is cmpxchg on long. |
| 20 | * On architectures that don't have NMI-safe cmpxchg implementation, |
| 21 | * the allocator can NOT be used in NMI handler. So code uses the |
| 22 | * allocator in NMI handler should depend on |
| 23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 24 | * |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
| 26 | * |
| 27 | * This source code is licensed under the GNU General Public License, |
| 28 | * Version 2. See the file COPYING for more details. |
| 29 | */ |
| 30 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 32 | #include <linux/export.h> |
Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 33 | #include <linux/bitmap.h> |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 34 | #include <linux/rculist.h> |
| 35 | #include <linux/interrupt.h> |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 36 | #include <linux/genalloc.h> |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 37 | #include <linux/of_device.h> |
Olof Johansson | 4964ffc | 2019-01-05 13:21:18 -0800 | [diff] [blame] | 38 | #include <linux/vmalloc.h> |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 39 | |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 40 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) |
| 41 | { |
| 42 | return chunk->end_addr - chunk->start_addr + 1; |
| 43 | } |
| 44 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 45 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) |
| 46 | { |
| 47 | unsigned long val, nval; |
| 48 | |
| 49 | nval = *addr; |
| 50 | do { |
| 51 | val = nval; |
| 52 | if (val & mask_to_set) |
| 53 | return -EBUSY; |
| 54 | cpu_relax(); |
| 55 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); |
| 56 | |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) |
| 61 | { |
| 62 | unsigned long val, nval; |
| 63 | |
| 64 | nval = *addr; |
| 65 | do { |
| 66 | val = nval; |
| 67 | if ((val & mask_to_clear) != mask_to_clear) |
| 68 | return -EBUSY; |
| 69 | cpu_relax(); |
| 70 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * bitmap_set_ll - set the specified number of bits at the specified position |
| 77 | * @map: pointer to a bitmap |
| 78 | * @start: a bit position in @map |
| 79 | * @nr: number of bits to set |
| 80 | * |
| 81 | * Set @nr bits start from @start in @map lock-lessly. Several users |
| 82 | * can set/clear the same bitmap simultaneously without lock. If two |
| 83 | * users set the same bit, one user will return remain bits, otherwise |
| 84 | * return 0. |
| 85 | */ |
| 86 | static int bitmap_set_ll(unsigned long *map, int start, int nr) |
| 87 | { |
| 88 | unsigned long *p = map + BIT_WORD(start); |
| 89 | const int size = start + nr; |
| 90 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); |
| 91 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); |
| 92 | |
| 93 | while (nr - bits_to_set >= 0) { |
| 94 | if (set_bits_ll(p, mask_to_set)) |
| 95 | return nr; |
| 96 | nr -= bits_to_set; |
| 97 | bits_to_set = BITS_PER_LONG; |
| 98 | mask_to_set = ~0UL; |
| 99 | p++; |
| 100 | } |
| 101 | if (nr) { |
| 102 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); |
| 103 | if (set_bits_ll(p, mask_to_set)) |
| 104 | return nr; |
| 105 | } |
| 106 | |
| 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * bitmap_clear_ll - clear the specified number of bits at the specified position |
| 112 | * @map: pointer to a bitmap |
| 113 | * @start: a bit position in @map |
| 114 | * @nr: number of bits to set |
| 115 | * |
| 116 | * Clear @nr bits start from @start in @map lock-lessly. Several users |
| 117 | * can set/clear the same bitmap simultaneously without lock. If two |
| 118 | * users clear the same bit, one user will return remain bits, |
| 119 | * otherwise return 0. |
| 120 | */ |
| 121 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) |
| 122 | { |
| 123 | unsigned long *p = map + BIT_WORD(start); |
| 124 | const int size = start + nr; |
| 125 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); |
| 126 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); |
| 127 | |
| 128 | while (nr - bits_to_clear >= 0) { |
| 129 | if (clear_bits_ll(p, mask_to_clear)) |
| 130 | return nr; |
| 131 | nr -= bits_to_clear; |
| 132 | bits_to_clear = BITS_PER_LONG; |
| 133 | mask_to_clear = ~0UL; |
| 134 | p++; |
| 135 | } |
| 136 | if (nr) { |
| 137 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); |
| 138 | if (clear_bits_ll(p, mask_to_clear)) |
| 139 | return nr; |
| 140 | } |
| 141 | |
| 142 | return 0; |
| 143 | } |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 144 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 145 | /** |
| 146 | * gen_pool_create - create a new special memory pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 147 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
| 148 | * @nid: node id of the node the pool structure should be allocated on, or -1 |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 149 | * |
| 150 | * Create a new special memory pool that can be used to manage special purpose |
| 151 | * memory not managed by the regular kmalloc/kfree interface. |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 152 | */ |
| 153 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 154 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 155 | struct gen_pool *pool; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 156 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 157 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
| 158 | if (pool != NULL) { |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 159 | spin_lock_init(&pool->lock); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 160 | INIT_LIST_HEAD(&pool->chunks); |
| 161 | pool->min_alloc_order = min_alloc_order; |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 162 | pool->algo = gen_pool_first_fit; |
| 163 | pool->data = NULL; |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 164 | pool->name = NULL; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 165 | } |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 166 | return pool; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 167 | } |
| 168 | EXPORT_SYMBOL(gen_pool_create); |
| 169 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 170 | /** |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 171 | * gen_pool_add_virt - add a new chunk of special memory to the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 172 | * @pool: pool to add new memory chunk to |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 173 | * @virt: virtual starting address of memory chunk to add to pool |
| 174 | * @phys: physical starting address of memory chunk to add to pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 175 | * @size: size in bytes of the memory chunk to add to pool |
| 176 | * @nid: node id of the node the chunk structure and bitmap should be |
| 177 | * allocated on, or -1 |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 178 | * |
| 179 | * Add a new chunk of special memory to the specified pool. |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 180 | * |
| 181 | * Returns 0 on success or a -ve errno on failure. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 182 | */ |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 183 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
| 184 | size_t size, int nid) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 185 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 186 | struct gen_pool_chunk *chunk; |
| 187 | int nbits = size >> pool->min_alloc_order; |
| 188 | int nbytes = sizeof(struct gen_pool_chunk) + |
Thadeu Lima de Souza Cascardo | eedce14 | 2012-10-25 13:37:51 -0700 | [diff] [blame] | 189 | BITS_TO_LONGS(nbits) * sizeof(long); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 190 | |
Huang Shijie | d84f622 | 2019-01-03 15:26:51 -0800 | [diff] [blame] | 191 | chunk = vzalloc_node(nbytes, nid); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 192 | if (unlikely(chunk == NULL)) |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 193 | return -ENOMEM; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 194 | |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 195 | chunk->phys_addr = phys; |
| 196 | chunk->start_addr = virt; |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 197 | chunk->end_addr = virt + size - 1; |
Stephen Bates | a77c116 | 2017-11-17 15:28:16 -0800 | [diff] [blame] | 198 | atomic_long_set(&chunk->avail, size); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 199 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 200 | spin_lock(&pool->lock); |
| 201 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
| 202 | spin_unlock(&pool->lock); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 203 | |
| 204 | return 0; |
| 205 | } |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 206 | EXPORT_SYMBOL(gen_pool_add_virt); |
| 207 | |
| 208 | /** |
| 209 | * gen_pool_virt_to_phys - return the physical address of memory |
| 210 | * @pool: pool to allocate from |
| 211 | * @addr: starting address of memory |
| 212 | * |
| 213 | * Returns the physical address on success, or -1 on error. |
| 214 | */ |
| 215 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
| 216 | { |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 217 | struct gen_pool_chunk *chunk; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 218 | phys_addr_t paddr = -1; |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 219 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 220 | rcu_read_lock(); |
| 221 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 222 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 223 | paddr = chunk->phys_addr + (addr - chunk->start_addr); |
| 224 | break; |
| 225 | } |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 226 | } |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 227 | rcu_read_unlock(); |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 228 | |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 229 | return paddr; |
Jean-Christophe PLAGNIOL-VILLARD | 3c8f370 | 2011-05-24 17:13:34 -0700 | [diff] [blame] | 230 | } |
| 231 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 232 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 233 | /** |
| 234 | * gen_pool_destroy - destroy a special memory pool |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 235 | * @pool: pool to destroy |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 236 | * |
| 237 | * Destroy the specified special memory pool. Verifies that there are no |
| 238 | * outstanding allocations. |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 239 | */ |
| 240 | void gen_pool_destroy(struct gen_pool *pool) |
| 241 | { |
| 242 | struct list_head *_chunk, *_next_chunk; |
| 243 | struct gen_pool_chunk *chunk; |
| 244 | int order = pool->min_alloc_order; |
| 245 | int bit, end_bit; |
| 246 | |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 247 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
| 248 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
| 249 | list_del(&chunk->next_chunk); |
| 250 | |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 251 | end_bit = chunk_size(chunk) >> order; |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 252 | bit = find_next_bit(chunk->bits, end_bit, 0); |
| 253 | BUG_ON(bit < end_bit); |
| 254 | |
Huang Shijie | d84f622 | 2019-01-03 15:26:51 -0800 | [diff] [blame] | 255 | vfree(chunk); |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 256 | } |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 257 | kfree_const(pool->name); |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 258 | kfree(pool); |
Steve Wise | 322acc9 | 2006-10-02 02:17:00 -0700 | [diff] [blame] | 259 | } |
| 260 | EXPORT_SYMBOL(gen_pool_destroy); |
| 261 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 262 | /** |
| 263 | * gen_pool_alloc - allocate special memory from the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 264 | * @pool: pool to allocate from |
| 265 | * @size: number of bytes to allocate from the pool |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 266 | * |
| 267 | * Allocate the requested number of bytes from the specified pool. |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 268 | * Uses the pool allocation function (with first-fit algorithm by default). |
| 269 | * Can not be used in NMI handler on architectures without |
| 270 | * NMI-safe cmpxchg implementation. |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 271 | */ |
| 272 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
| 273 | { |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 274 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); |
| 275 | } |
| 276 | EXPORT_SYMBOL(gen_pool_alloc); |
| 277 | |
| 278 | /** |
| 279 | * gen_pool_alloc_algo - allocate special memory from the pool |
| 280 | * @pool: pool to allocate from |
| 281 | * @size: number of bytes to allocate from the pool |
| 282 | * @algo: algorithm passed from caller |
| 283 | * @data: data passed to algorithm |
| 284 | * |
| 285 | * Allocate the requested number of bytes from the specified pool. |
| 286 | * Uses the pool allocation function (with first-fit algorithm by default). |
| 287 | * Can not be used in NMI handler on architectures without |
| 288 | * NMI-safe cmpxchg implementation. |
| 289 | */ |
| 290 | unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, |
| 291 | genpool_algo_t algo, void *data) |
| 292 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 293 | struct gen_pool_chunk *chunk; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 294 | unsigned long addr = 0; |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 295 | int order = pool->min_alloc_order; |
Daniel Mentz | 62e931f | 2016-10-27 17:46:59 -0700 | [diff] [blame] | 296 | int nbits, start_bit, end_bit, remain; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 297 | |
| 298 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 299 | BUG_ON(in_nmi()); |
| 300 | #endif |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 301 | |
| 302 | if (size == 0) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 303 | return 0; |
| 304 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 305 | nbits = (size + (1UL << order) - 1) >> order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 306 | rcu_read_lock(); |
| 307 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
Stephen Bates | a77c116 | 2017-11-17 15:28:16 -0800 | [diff] [blame] | 308 | if (size > atomic_long_read(&chunk->avail)) |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 309 | continue; |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 310 | |
Daniel Mentz | 62e931f | 2016-10-27 17:46:59 -0700 | [diff] [blame] | 311 | start_bit = 0; |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 312 | end_bit = chunk_size(chunk) >> order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 313 | retry: |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 314 | start_bit = algo(chunk->bits, end_bit, start_bit, |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 315 | nbits, data, pool, chunk->start_addr); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 316 | if (start_bit >= end_bit) |
Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 317 | continue; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 318 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
| 319 | if (remain) { |
| 320 | remain = bitmap_clear_ll(chunk->bits, start_bit, |
| 321 | nbits - remain); |
| 322 | BUG_ON(remain); |
| 323 | goto retry; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 324 | } |
Akinobu Mita | 243797f | 2009-12-15 16:48:31 -0800 | [diff] [blame] | 325 | |
| 326 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 327 | size = nbits << order; |
Stephen Bates | a77c116 | 2017-11-17 15:28:16 -0800 | [diff] [blame] | 328 | atomic_long_sub(size, &chunk->avail); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 329 | break; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 330 | } |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 331 | rcu_read_unlock(); |
| 332 | return addr; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 333 | } |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 334 | EXPORT_SYMBOL(gen_pool_alloc_algo); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 335 | |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 336 | /** |
Nicolin Chen | 684f0d3 | 2013-11-12 15:09:52 -0800 | [diff] [blame] | 337 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
| 338 | * @pool: pool to allocate from |
| 339 | * @size: number of bytes to allocate from the pool |
Lad, Prabhakar | 0368dfd | 2014-01-29 14:05:37 -0800 | [diff] [blame] | 340 | * @dma: dma-view physical address return value. Use NULL if unneeded. |
Nicolin Chen | 684f0d3 | 2013-11-12 15:09:52 -0800 | [diff] [blame] | 341 | * |
| 342 | * Allocate the requested number of bytes from the specified pool. |
| 343 | * Uses the pool allocation function (with first-fit algorithm by default). |
| 344 | * Can not be used in NMI handler on architectures without |
| 345 | * NMI-safe cmpxchg implementation. |
| 346 | */ |
| 347 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) |
| 348 | { |
| 349 | unsigned long vaddr; |
| 350 | |
| 351 | if (!pool) |
| 352 | return NULL; |
| 353 | |
| 354 | vaddr = gen_pool_alloc(pool, size); |
| 355 | if (!vaddr) |
| 356 | return NULL; |
| 357 | |
Lad, Prabhakar | 0368dfd | 2014-01-29 14:05:37 -0800 | [diff] [blame] | 358 | if (dma) |
| 359 | *dma = gen_pool_virt_to_phys(pool, vaddr); |
Nicolin Chen | 684f0d3 | 2013-11-12 15:09:52 -0800 | [diff] [blame] | 360 | |
| 361 | return (void *)vaddr; |
| 362 | } |
| 363 | EXPORT_SYMBOL(gen_pool_dma_alloc); |
| 364 | |
| 365 | /** |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 366 | * gen_pool_free - free allocated special memory back to the pool |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 367 | * @pool: pool to free to |
| 368 | * @addr: starting address of memory to free back to pool |
| 369 | * @size: size in bytes of memory to free |
Dean Nelson | a58cbd7 | 2006-10-02 02:17:01 -0700 | [diff] [blame] | 370 | * |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 371 | * Free previously allocated special memory back to the specified |
| 372 | * pool. Can not be used in NMI handler on architectures without |
| 373 | * NMI-safe cmpxchg implementation. |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 374 | */ |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 375 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 376 | { |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 377 | struct gen_pool_chunk *chunk; |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 378 | int order = pool->min_alloc_order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 379 | int start_bit, nbits, remain; |
| 380 | |
| 381 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 382 | BUG_ON(in_nmi()); |
| 383 | #endif |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 384 | |
Dean Nelson | 929f972 | 2006-06-23 02:03:21 -0700 | [diff] [blame] | 385 | nbits = (size + (1UL << order) - 1) >> order; |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 386 | rcu_read_lock(); |
| 387 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 388 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { |
| 389 | BUG_ON(addr + size - 1 > chunk->end_addr); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 390 | start_bit = (addr - chunk->start_addr) >> order; |
| 391 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
| 392 | BUG_ON(remain); |
| 393 | size = nbits << order; |
Stephen Bates | a77c116 | 2017-11-17 15:28:16 -0800 | [diff] [blame] | 394 | atomic_long_add(size, &chunk->avail); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 395 | rcu_read_unlock(); |
| 396 | return; |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 397 | } |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 398 | } |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 399 | rcu_read_unlock(); |
| 400 | BUG(); |
Jes Sorensen | f14f75b | 2005-06-21 17:15:02 -0700 | [diff] [blame] | 401 | } |
| 402 | EXPORT_SYMBOL(gen_pool_free); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 403 | |
| 404 | /** |
| 405 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool |
| 406 | * @pool: the generic memory pool |
| 407 | * @func: func to call |
| 408 | * @data: additional data used by @func |
| 409 | * |
| 410 | * Call @func for every chunk of generic memory pool. The @func is |
| 411 | * called with rcu_read_lock held. |
| 412 | */ |
| 413 | void gen_pool_for_each_chunk(struct gen_pool *pool, |
| 414 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), |
| 415 | void *data) |
| 416 | { |
| 417 | struct gen_pool_chunk *chunk; |
| 418 | |
| 419 | rcu_read_lock(); |
| 420 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) |
| 421 | func(pool, chunk, data); |
| 422 | rcu_read_unlock(); |
| 423 | } |
| 424 | EXPORT_SYMBOL(gen_pool_for_each_chunk); |
| 425 | |
| 426 | /** |
Laura Abbott | 9efb3a4 | 2014-10-09 15:26:38 -0700 | [diff] [blame] | 427 | * addr_in_gen_pool - checks if an address falls within the range of a pool |
| 428 | * @pool: the generic memory pool |
| 429 | * @start: start address |
| 430 | * @size: size of the region |
| 431 | * |
| 432 | * Check if the range of addresses falls within the specified pool. Returns |
| 433 | * true if the entire range is contained in the pool and false otherwise. |
| 434 | */ |
| 435 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, |
| 436 | size_t size) |
| 437 | { |
| 438 | bool found = false; |
Toshi Kikuchi | ad3d5d2 | 2015-02-12 15:02:18 -0800 | [diff] [blame] | 439 | unsigned long end = start + size - 1; |
Laura Abbott | 9efb3a4 | 2014-10-09 15:26:38 -0700 | [diff] [blame] | 440 | struct gen_pool_chunk *chunk; |
| 441 | |
| 442 | rcu_read_lock(); |
| 443 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { |
| 444 | if (start >= chunk->start_addr && start <= chunk->end_addr) { |
| 445 | if (end <= chunk->end_addr) { |
| 446 | found = true; |
| 447 | break; |
| 448 | } |
| 449 | } |
| 450 | } |
| 451 | rcu_read_unlock(); |
| 452 | return found; |
| 453 | } |
| 454 | |
| 455 | /** |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 456 | * gen_pool_avail - get available free space of the pool |
| 457 | * @pool: pool to get available free space |
| 458 | * |
| 459 | * Return available free space of the specified pool. |
| 460 | */ |
| 461 | size_t gen_pool_avail(struct gen_pool *pool) |
| 462 | { |
| 463 | struct gen_pool_chunk *chunk; |
| 464 | size_t avail = 0; |
| 465 | |
| 466 | rcu_read_lock(); |
| 467 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
Stephen Bates | a77c116 | 2017-11-17 15:28:16 -0800 | [diff] [blame] | 468 | avail += atomic_long_read(&chunk->avail); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 469 | rcu_read_unlock(); |
| 470 | return avail; |
| 471 | } |
| 472 | EXPORT_SYMBOL_GPL(gen_pool_avail); |
| 473 | |
| 474 | /** |
| 475 | * gen_pool_size - get size in bytes of memory managed by the pool |
| 476 | * @pool: pool to get size |
| 477 | * |
| 478 | * Return size in bytes of memory managed by the pool. |
| 479 | */ |
| 480 | size_t gen_pool_size(struct gen_pool *pool) |
| 481 | { |
| 482 | struct gen_pool_chunk *chunk; |
| 483 | size_t size = 0; |
| 484 | |
| 485 | rcu_read_lock(); |
| 486 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
Joonyoung Shim | 674470d | 2013-09-11 14:21:43 -0700 | [diff] [blame] | 487 | size += chunk_size(chunk); |
Huang Ying | 7f18427 | 2011-07-13 13:14:24 +0800 | [diff] [blame] | 488 | rcu_read_unlock(); |
| 489 | return size; |
| 490 | } |
| 491 | EXPORT_SYMBOL_GPL(gen_pool_size); |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 492 | |
| 493 | /** |
| 494 | * gen_pool_set_algo - set the allocation algorithm |
| 495 | * @pool: pool to change allocation algorithm |
| 496 | * @algo: custom algorithm function |
| 497 | * @data: additional data used by @algo |
| 498 | * |
| 499 | * Call @algo for each memory allocation in the pool. |
| 500 | * If @algo is NULL use gen_pool_first_fit as default |
| 501 | * memory allocation function. |
| 502 | */ |
| 503 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) |
| 504 | { |
| 505 | rcu_read_lock(); |
| 506 | |
| 507 | pool->algo = algo; |
| 508 | if (!pool->algo) |
| 509 | pool->algo = gen_pool_first_fit; |
| 510 | |
| 511 | pool->data = data; |
| 512 | |
| 513 | rcu_read_unlock(); |
| 514 | } |
| 515 | EXPORT_SYMBOL(gen_pool_set_algo); |
| 516 | |
| 517 | /** |
| 518 | * gen_pool_first_fit - find the first available region |
| 519 | * of memory matching the size requirement (no alignment constraint) |
| 520 | * @map: The address to base the search on |
| 521 | * @size: The bitmap size in bits |
| 522 | * @start: The bitnumber to start searching at |
| 523 | * @nr: The number of zeroed bits we're looking for |
| 524 | * @data: additional data - unused |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 525 | * @pool: pool to find the fit region memory from |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 526 | */ |
| 527 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 528 | unsigned long start, unsigned int nr, void *data, |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 529 | struct gen_pool *pool, unsigned long start_addr) |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 530 | { |
| 531 | return bitmap_find_next_zero_area(map, size, start, nr, 0); |
| 532 | } |
| 533 | EXPORT_SYMBOL(gen_pool_first_fit); |
| 534 | |
| 535 | /** |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 536 | * gen_pool_first_fit_align - find the first available region |
| 537 | * of memory matching the size requirement (alignment constraint) |
| 538 | * @map: The address to base the search on |
| 539 | * @size: The bitmap size in bits |
| 540 | * @start: The bitnumber to start searching at |
| 541 | * @nr: The number of zeroed bits we're looking for |
| 542 | * @data: data for alignment |
| 543 | * @pool: pool to get order from |
| 544 | */ |
| 545 | unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, |
| 546 | unsigned long start, unsigned int nr, void *data, |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 547 | struct gen_pool *pool, unsigned long start_addr) |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 548 | { |
| 549 | struct genpool_data_align *alignment; |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 550 | unsigned long align_mask, align_off; |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 551 | int order; |
| 552 | |
| 553 | alignment = data; |
| 554 | order = pool->min_alloc_order; |
| 555 | align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 556 | align_off = (start_addr & (alignment->align - 1)) >> order; |
| 557 | |
| 558 | return bitmap_find_next_zero_area_off(map, size, start, nr, |
| 559 | align_mask, align_off); |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 560 | } |
| 561 | EXPORT_SYMBOL(gen_pool_first_fit_align); |
| 562 | |
| 563 | /** |
Zhao Qiang | b26981c | 2015-11-30 10:48:53 +0800 | [diff] [blame] | 564 | * gen_pool_fixed_alloc - reserve a specific region |
| 565 | * @map: The address to base the search on |
| 566 | * @size: The bitmap size in bits |
| 567 | * @start: The bitnumber to start searching at |
| 568 | * @nr: The number of zeroed bits we're looking for |
| 569 | * @data: data for alignment |
| 570 | * @pool: pool to get order from |
| 571 | */ |
| 572 | unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, |
| 573 | unsigned long start, unsigned int nr, void *data, |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 574 | struct gen_pool *pool, unsigned long start_addr) |
Zhao Qiang | b26981c | 2015-11-30 10:48:53 +0800 | [diff] [blame] | 575 | { |
| 576 | struct genpool_data_fixed *fixed_data; |
| 577 | int order; |
| 578 | unsigned long offset_bit; |
| 579 | unsigned long start_bit; |
| 580 | |
| 581 | fixed_data = data; |
| 582 | order = pool->min_alloc_order; |
| 583 | offset_bit = fixed_data->offset >> order; |
Zhao Qiang | 0e6e01f | 2015-11-30 10:48:54 +0800 | [diff] [blame] | 584 | if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) |
Zhao Qiang | b26981c | 2015-11-30 10:48:53 +0800 | [diff] [blame] | 585 | return size; |
| 586 | |
| 587 | start_bit = bitmap_find_next_zero_area(map, size, |
| 588 | start + offset_bit, nr, 0); |
| 589 | if (start_bit != offset_bit) |
| 590 | start_bit = size; |
| 591 | return start_bit; |
| 592 | } |
| 593 | EXPORT_SYMBOL(gen_pool_fixed_alloc); |
| 594 | |
| 595 | /** |
Laura Abbott | 505e3be | 2014-10-09 15:26:35 -0700 | [diff] [blame] | 596 | * gen_pool_first_fit_order_align - find the first available region |
| 597 | * of memory matching the size requirement. The region will be aligned |
| 598 | * to the order of the size specified. |
| 599 | * @map: The address to base the search on |
| 600 | * @size: The bitmap size in bits |
| 601 | * @start: The bitnumber to start searching at |
| 602 | * @nr: The number of zeroed bits we're looking for |
| 603 | * @data: additional data - unused |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 604 | * @pool: pool to find the fit region memory from |
Laura Abbott | 505e3be | 2014-10-09 15:26:35 -0700 | [diff] [blame] | 605 | */ |
| 606 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, |
| 607 | unsigned long size, unsigned long start, |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 608 | unsigned int nr, void *data, struct gen_pool *pool, |
| 609 | unsigned long start_addr) |
Laura Abbott | 505e3be | 2014-10-09 15:26:35 -0700 | [diff] [blame] | 610 | { |
| 611 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; |
| 612 | |
| 613 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
| 614 | } |
| 615 | EXPORT_SYMBOL(gen_pool_first_fit_order_align); |
| 616 | |
| 617 | /** |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 618 | * gen_pool_best_fit - find the best fitting region of memory |
| 619 | * macthing the size requirement (no alignment constraint) |
| 620 | * @map: The address to base the search on |
| 621 | * @size: The bitmap size in bits |
| 622 | * @start: The bitnumber to start searching at |
| 623 | * @nr: The number of zeroed bits we're looking for |
| 624 | * @data: additional data - unused |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 625 | * @pool: pool to find the fit region memory from |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 626 | * |
| 627 | * Iterate over the bitmap to find the smallest free region |
| 628 | * which we can allocate the memory. |
| 629 | */ |
| 630 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, |
Zhao Qiang | de2dd0e | 2015-11-30 10:48:52 +0800 | [diff] [blame] | 631 | unsigned long start, unsigned int nr, void *data, |
Alexey Skidanov | bd5d9cd | 2019-01-03 15:26:44 -0800 | [diff] [blame] | 632 | struct gen_pool *pool, unsigned long start_addr) |
Benjamin Gaignard | ca279cf | 2012-10-04 17:13:20 -0700 | [diff] [blame] | 633 | { |
| 634 | unsigned long start_bit = size; |
| 635 | unsigned long len = size + 1; |
| 636 | unsigned long index; |
| 637 | |
| 638 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); |
| 639 | |
| 640 | while (index < size) { |
| 641 | int next_bit = find_next_bit(map, size, index + nr); |
| 642 | if ((next_bit - index) < len) { |
| 643 | len = next_bit - index; |
| 644 | start_bit = index; |
| 645 | if (len == nr) |
| 646 | return start_bit; |
| 647 | } |
| 648 | index = bitmap_find_next_zero_area(map, size, |
| 649 | next_bit + 1, nr, 0); |
| 650 | } |
| 651 | |
| 652 | return start_bit; |
| 653 | } |
| 654 | EXPORT_SYMBOL(gen_pool_best_fit); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 655 | |
| 656 | static void devm_gen_pool_release(struct device *dev, void *res) |
| 657 | { |
| 658 | gen_pool_destroy(*(struct gen_pool **)res); |
| 659 | } |
| 660 | |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 661 | static int devm_gen_pool_match(struct device *dev, void *res, void *data) |
| 662 | { |
| 663 | struct gen_pool **p = res; |
| 664 | |
| 665 | /* NULL data matches only a pool without an assigned name */ |
| 666 | if (!data && !(*p)->name) |
| 667 | return 1; |
| 668 | |
| 669 | if (!data || !(*p)->name) |
| 670 | return 0; |
| 671 | |
| 672 | return !strcmp((*p)->name, data); |
| 673 | } |
| 674 | |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 675 | /** |
Vladimir Zapolskiy | 7385817 | 2015-09-04 15:47:43 -0700 | [diff] [blame] | 676 | * gen_pool_get - Obtain the gen_pool (if any) for a device |
| 677 | * @dev: device to retrieve the gen_pool from |
| 678 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device |
| 679 | * |
| 680 | * Returns the gen_pool for the device if one is present, or NULL. |
| 681 | */ |
| 682 | struct gen_pool *gen_pool_get(struct device *dev, const char *name) |
| 683 | { |
| 684 | struct gen_pool **p; |
| 685 | |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 686 | p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, |
| 687 | (void *)name); |
Vladimir Zapolskiy | 7385817 | 2015-09-04 15:47:43 -0700 | [diff] [blame] | 688 | if (!p) |
| 689 | return NULL; |
| 690 | return *p; |
| 691 | } |
| 692 | EXPORT_SYMBOL_GPL(gen_pool_get); |
| 693 | |
| 694 | /** |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 695 | * devm_gen_pool_create - managed gen_pool_create |
| 696 | * @dev: device that provides the gen_pool |
| 697 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents |
Vladimir Zapolskiy | 7385817 | 2015-09-04 15:47:43 -0700 | [diff] [blame] | 698 | * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes |
| 699 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 700 | * |
| 701 | * Create a new special memory pool that can be used to manage special purpose |
| 702 | * memory not managed by the regular kmalloc/kfree interface. The pool will be |
| 703 | * automatically destroyed by the device management code. |
| 704 | */ |
| 705 | struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, |
Vladimir Zapolskiy | 7385817 | 2015-09-04 15:47:43 -0700 | [diff] [blame] | 706 | int nid, const char *name) |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 707 | { |
| 708 | struct gen_pool **ptr, *pool; |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 709 | const char *pool_name = NULL; |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 710 | |
Vladimir Zapolskiy | 7385817 | 2015-09-04 15:47:43 -0700 | [diff] [blame] | 711 | /* Check that genpool to be created is uniquely addressed on device */ |
| 712 | if (gen_pool_get(dev, name)) |
| 713 | return ERR_PTR(-EINVAL); |
| 714 | |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 715 | if (name) { |
| 716 | pool_name = kstrdup_const(name, GFP_KERNEL); |
| 717 | if (!pool_name) |
| 718 | return ERR_PTR(-ENOMEM); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 719 | } |
| 720 | |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 721 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); |
| 722 | if (!ptr) |
| 723 | goto free_pool_name; |
| 724 | |
| 725 | pool = gen_pool_create(min_alloc_order, nid); |
| 726 | if (!pool) |
| 727 | goto free_devres; |
| 728 | |
| 729 | *ptr = pool; |
| 730 | pool->name = pool_name; |
| 731 | devres_add(dev, ptr); |
| 732 | |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 733 | return pool; |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 734 | |
| 735 | free_devres: |
| 736 | devres_free(ptr); |
| 737 | free_pool_name: |
| 738 | kfree_const(pool_name); |
| 739 | |
| 740 | return ERR_PTR(-ENOMEM); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 741 | } |
Michal Simek | b724aa2 | 2014-12-02 15:59:45 -0800 | [diff] [blame] | 742 | EXPORT_SYMBOL(devm_gen_pool_create); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 743 | |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 744 | #ifdef CONFIG_OF |
| 745 | /** |
Vladimir Zapolskiy | abdd4a7 | 2015-06-30 15:00:07 -0700 | [diff] [blame] | 746 | * of_gen_pool_get - find a pool by phandle property |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 747 | * @np: device node |
| 748 | * @propname: property name containing phandle(s) |
| 749 | * @index: index into the phandle array |
| 750 | * |
| 751 | * Returns the pool that contains the chunk starting at the physical |
| 752 | * address of the device tree node pointed at by the phandle property, |
| 753 | * or NULL if not found. |
| 754 | */ |
Vladimir Zapolskiy | abdd4a7 | 2015-06-30 15:00:07 -0700 | [diff] [blame] | 755 | struct gen_pool *of_gen_pool_get(struct device_node *np, |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 756 | const char *propname, int index) |
| 757 | { |
| 758 | struct platform_device *pdev; |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 759 | struct device_node *np_pool, *parent; |
| 760 | const char *name = NULL; |
| 761 | struct gen_pool *pool = NULL; |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 762 | |
| 763 | np_pool = of_parse_phandle(np, propname, index); |
| 764 | if (!np_pool) |
| 765 | return NULL; |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 766 | |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 767 | pdev = of_find_device_by_node(np_pool); |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 768 | if (!pdev) { |
| 769 | /* Check if named gen_pool is created by parent node device */ |
| 770 | parent = of_get_parent(np_pool); |
| 771 | pdev = of_find_device_by_node(parent); |
| 772 | of_node_put(parent); |
| 773 | |
| 774 | of_property_read_string(np_pool, "label", &name); |
| 775 | if (!name) |
| 776 | name = np_pool->name; |
| 777 | } |
| 778 | if (pdev) |
| 779 | pool = gen_pool_get(&pdev->dev, name); |
Vladimir Zapolskiy | 6f3aabd | 2014-09-25 16:05:25 -0700 | [diff] [blame] | 780 | of_node_put(np_pool); |
Vladimir Zapolskiy | c98c363 | 2015-09-04 15:47:47 -0700 | [diff] [blame] | 781 | |
| 782 | return pool; |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 783 | } |
Vladimir Zapolskiy | abdd4a7 | 2015-06-30 15:00:07 -0700 | [diff] [blame] | 784 | EXPORT_SYMBOL_GPL(of_gen_pool_get); |
Philipp Zabel | 9375db0 | 2013-04-29 16:17:10 -0700 | [diff] [blame] | 785 | #endif /* CONFIG_OF */ |