blob: 6bc04aab6ec717ea3144303e4a0294f528b83cf9 [file] [log] [blame]
Jes Sorensenf14f75b2005-06-21 17:15:02 -07001/*
Huang Ying7f184272011-07-13 13:14:24 +08002 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
6 *
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
13 *
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
18 *
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
Jes Sorensenf14f75b2005-06-21 17:15:02 -070024 *
Jes Sorensenf14f75b2005-06-21 17:15:02 -070025 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26 *
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
29 */
30
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050032#include <linux/export.h>
Akinobu Mita243797f2009-12-15 16:48:31 -080033#include <linux/bitmap.h>
Huang Ying7f184272011-07-13 13:14:24 +080034#include <linux/rculist.h>
35#include <linux/interrupt.h>
Jes Sorensenf14f75b2005-06-21 17:15:02 -070036#include <linux/genalloc.h>
37
Huang Ying7f184272011-07-13 13:14:24 +080038static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39{
40 unsigned long val, nval;
41
42 nval = *addr;
43 do {
44 val = nval;
45 if (val & mask_to_set)
46 return -EBUSY;
47 cpu_relax();
48 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
49
50 return 0;
51}
52
53static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
54{
55 unsigned long val, nval;
56
57 nval = *addr;
58 do {
59 val = nval;
60 if ((val & mask_to_clear) != mask_to_clear)
61 return -EBUSY;
62 cpu_relax();
63 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
64
65 return 0;
66}
67
68/*
69 * bitmap_set_ll - set the specified number of bits at the specified position
70 * @map: pointer to a bitmap
71 * @start: a bit position in @map
72 * @nr: number of bits to set
73 *
74 * Set @nr bits start from @start in @map lock-lessly. Several users
75 * can set/clear the same bitmap simultaneously without lock. If two
76 * users set the same bit, one user will return remain bits, otherwise
77 * return 0.
78 */
79static int bitmap_set_ll(unsigned long *map, int start, int nr)
80{
81 unsigned long *p = map + BIT_WORD(start);
82 const int size = start + nr;
83 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
84 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
85
86 while (nr - bits_to_set >= 0) {
87 if (set_bits_ll(p, mask_to_set))
88 return nr;
89 nr -= bits_to_set;
90 bits_to_set = BITS_PER_LONG;
91 mask_to_set = ~0UL;
92 p++;
93 }
94 if (nr) {
95 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
96 if (set_bits_ll(p, mask_to_set))
97 return nr;
98 }
99
100 return 0;
101}
102
103/*
104 * bitmap_clear_ll - clear the specified number of bits at the specified position
105 * @map: pointer to a bitmap
106 * @start: a bit position in @map
107 * @nr: number of bits to set
108 *
109 * Clear @nr bits start from @start in @map lock-lessly. Several users
110 * can set/clear the same bitmap simultaneously without lock. If two
111 * users clear the same bit, one user will return remain bits,
112 * otherwise return 0.
113 */
114static int bitmap_clear_ll(unsigned long *map, int start, int nr)
115{
116 unsigned long *p = map + BIT_WORD(start);
117 const int size = start + nr;
118 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
119 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
120
121 while (nr - bits_to_clear >= 0) {
122 if (clear_bits_ll(p, mask_to_clear))
123 return nr;
124 nr -= bits_to_clear;
125 bits_to_clear = BITS_PER_LONG;
126 mask_to_clear = ~0UL;
127 p++;
128 }
129 if (nr) {
130 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
131 if (clear_bits_ll(p, mask_to_clear))
132 return nr;
133 }
134
135 return 0;
136}
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700137
Dean Nelsona58cbd72006-10-02 02:17:01 -0700138/**
139 * gen_pool_create - create a new special memory pool
Dean Nelson929f9722006-06-23 02:03:21 -0700140 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
141 * @nid: node id of the node the pool structure should be allocated on, or -1
Dean Nelsona58cbd72006-10-02 02:17:01 -0700142 *
143 * Create a new special memory pool that can be used to manage special purpose
144 * memory not managed by the regular kmalloc/kfree interface.
Dean Nelson929f9722006-06-23 02:03:21 -0700145 */
146struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700147{
Dean Nelson929f9722006-06-23 02:03:21 -0700148 struct gen_pool *pool;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700149
Dean Nelson929f9722006-06-23 02:03:21 -0700150 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
151 if (pool != NULL) {
Huang Ying7f184272011-07-13 13:14:24 +0800152 spin_lock_init(&pool->lock);
Dean Nelson929f9722006-06-23 02:03:21 -0700153 INIT_LIST_HEAD(&pool->chunks);
154 pool->min_alloc_order = min_alloc_order;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700155 }
Dean Nelson929f9722006-06-23 02:03:21 -0700156 return pool;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700157}
158EXPORT_SYMBOL(gen_pool_create);
159
Dean Nelsona58cbd72006-10-02 02:17:01 -0700160/**
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700161 * gen_pool_add_virt - add a new chunk of special memory to the pool
Dean Nelson929f9722006-06-23 02:03:21 -0700162 * @pool: pool to add new memory chunk to
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700163 * @virt: virtual starting address of memory chunk to add to pool
164 * @phys: physical starting address of memory chunk to add to pool
Dean Nelson929f9722006-06-23 02:03:21 -0700165 * @size: size in bytes of the memory chunk to add to pool
166 * @nid: node id of the node the chunk structure and bitmap should be
167 * allocated on, or -1
Dean Nelsona58cbd72006-10-02 02:17:01 -0700168 *
169 * Add a new chunk of special memory to the specified pool.
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700170 *
171 * Returns 0 on success or a -ve errno on failure.
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700172 */
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700173int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
174 size_t size, int nid)
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700175{
Dean Nelson929f9722006-06-23 02:03:21 -0700176 struct gen_pool_chunk *chunk;
177 int nbits = size >> pool->min_alloc_order;
178 int nbytes = sizeof(struct gen_pool_chunk) +
179 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700180
Christoph Lameter94f60302007-07-17 04:03:29 -0700181 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
Dean Nelson929f9722006-06-23 02:03:21 -0700182 if (unlikely(chunk == NULL))
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700183 return -ENOMEM;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700184
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700185 chunk->phys_addr = phys;
186 chunk->start_addr = virt;
187 chunk->end_addr = virt + size;
Huang Ying7f184272011-07-13 13:14:24 +0800188 atomic_set(&chunk->avail, size);
Dean Nelson929f9722006-06-23 02:03:21 -0700189
Huang Ying7f184272011-07-13 13:14:24 +0800190 spin_lock(&pool->lock);
191 list_add_rcu(&chunk->next_chunk, &pool->chunks);
192 spin_unlock(&pool->lock);
Dean Nelson929f9722006-06-23 02:03:21 -0700193
194 return 0;
195}
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700196EXPORT_SYMBOL(gen_pool_add_virt);
197
198/**
199 * gen_pool_virt_to_phys - return the physical address of memory
200 * @pool: pool to allocate from
201 * @addr: starting address of memory
202 *
203 * Returns the physical address on success, or -1 on error.
204 */
205phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
206{
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700207 struct gen_pool_chunk *chunk;
Huang Ying7f184272011-07-13 13:14:24 +0800208 phys_addr_t paddr = -1;
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700209
Huang Ying7f184272011-07-13 13:14:24 +0800210 rcu_read_lock();
211 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
212 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
213 paddr = chunk->phys_addr + (addr - chunk->start_addr);
214 break;
215 }
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700216 }
Huang Ying7f184272011-07-13 13:14:24 +0800217 rcu_read_unlock();
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700218
Huang Ying7f184272011-07-13 13:14:24 +0800219 return paddr;
Jean-Christophe PLAGNIOL-VILLARD3c8f3702011-05-24 17:13:34 -0700220}
221EXPORT_SYMBOL(gen_pool_virt_to_phys);
Dean Nelson929f9722006-06-23 02:03:21 -0700222
Dean Nelsona58cbd72006-10-02 02:17:01 -0700223/**
224 * gen_pool_destroy - destroy a special memory pool
Steve Wise322acc92006-10-02 02:17:00 -0700225 * @pool: pool to destroy
Dean Nelsona58cbd72006-10-02 02:17:01 -0700226 *
227 * Destroy the specified special memory pool. Verifies that there are no
228 * outstanding allocations.
Steve Wise322acc92006-10-02 02:17:00 -0700229 */
230void gen_pool_destroy(struct gen_pool *pool)
231{
232 struct list_head *_chunk, *_next_chunk;
233 struct gen_pool_chunk *chunk;
234 int order = pool->min_alloc_order;
235 int bit, end_bit;
236
Steve Wise322acc92006-10-02 02:17:00 -0700237 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
238 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
239 list_del(&chunk->next_chunk);
240
241 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
242 bit = find_next_bit(chunk->bits, end_bit, 0);
243 BUG_ON(bit < end_bit);
244
245 kfree(chunk);
246 }
247 kfree(pool);
248 return;
249}
250EXPORT_SYMBOL(gen_pool_destroy);
251
Dean Nelsona58cbd72006-10-02 02:17:01 -0700252/**
253 * gen_pool_alloc - allocate special memory from the pool
Dean Nelson929f9722006-06-23 02:03:21 -0700254 * @pool: pool to allocate from
255 * @size: number of bytes to allocate from the pool
Dean Nelsona58cbd72006-10-02 02:17:01 -0700256 *
257 * Allocate the requested number of bytes from the specified pool.
Huang Ying7f184272011-07-13 13:14:24 +0800258 * Uses a first-fit algorithm. Can not be used in NMI handler on
259 * architectures without NMI-safe cmpxchg implementation.
Dean Nelson929f9722006-06-23 02:03:21 -0700260 */
261unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
262{
Dean Nelson929f9722006-06-23 02:03:21 -0700263 struct gen_pool_chunk *chunk;
Huang Ying7f184272011-07-13 13:14:24 +0800264 unsigned long addr = 0;
Dean Nelson929f9722006-06-23 02:03:21 -0700265 int order = pool->min_alloc_order;
Huang Ying7f184272011-07-13 13:14:24 +0800266 int nbits, start_bit = 0, end_bit, remain;
267
268#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
269 BUG_ON(in_nmi());
270#endif
Dean Nelson929f9722006-06-23 02:03:21 -0700271
272 if (size == 0)
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700273 return 0;
274
Dean Nelson929f9722006-06-23 02:03:21 -0700275 nbits = (size + (1UL << order) - 1) >> order;
Huang Ying7f184272011-07-13 13:14:24 +0800276 rcu_read_lock();
277 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
278 if (size > atomic_read(&chunk->avail))
279 continue;
Dean Nelson929f9722006-06-23 02:03:21 -0700280
281 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
Huang Ying7f184272011-07-13 13:14:24 +0800282retry:
283 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
284 start_bit, nbits, 0);
285 if (start_bit >= end_bit)
Akinobu Mita243797f2009-12-15 16:48:31 -0800286 continue;
Huang Ying7f184272011-07-13 13:14:24 +0800287 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
288 if (remain) {
289 remain = bitmap_clear_ll(chunk->bits, start_bit,
290 nbits - remain);
291 BUG_ON(remain);
292 goto retry;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700293 }
Akinobu Mita243797f2009-12-15 16:48:31 -0800294
295 addr = chunk->start_addr + ((unsigned long)start_bit << order);
Huang Ying7f184272011-07-13 13:14:24 +0800296 size = nbits << order;
297 atomic_sub(size, &chunk->avail);
298 break;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700299 }
Huang Ying7f184272011-07-13 13:14:24 +0800300 rcu_read_unlock();
301 return addr;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700302}
303EXPORT_SYMBOL(gen_pool_alloc);
304
Dean Nelsona58cbd72006-10-02 02:17:01 -0700305/**
306 * gen_pool_free - free allocated special memory back to the pool
Dean Nelson929f9722006-06-23 02:03:21 -0700307 * @pool: pool to free to
308 * @addr: starting address of memory to free back to pool
309 * @size: size in bytes of memory to free
Dean Nelsona58cbd72006-10-02 02:17:01 -0700310 *
Huang Ying7f184272011-07-13 13:14:24 +0800311 * Free previously allocated special memory back to the specified
312 * pool. Can not be used in NMI handler on architectures without
313 * NMI-safe cmpxchg implementation.
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700314 */
Dean Nelson929f9722006-06-23 02:03:21 -0700315void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700316{
Dean Nelson929f9722006-06-23 02:03:21 -0700317 struct gen_pool_chunk *chunk;
Dean Nelson929f9722006-06-23 02:03:21 -0700318 int order = pool->min_alloc_order;
Huang Ying7f184272011-07-13 13:14:24 +0800319 int start_bit, nbits, remain;
320
321#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
322 BUG_ON(in_nmi());
323#endif
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700324
Dean Nelson929f9722006-06-23 02:03:21 -0700325 nbits = (size + (1UL << order) - 1) >> order;
Huang Ying7f184272011-07-13 13:14:24 +0800326 rcu_read_lock();
327 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
Dean Nelson929f9722006-06-23 02:03:21 -0700328 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
329 BUG_ON(addr + size > chunk->end_addr);
Huang Ying7f184272011-07-13 13:14:24 +0800330 start_bit = (addr - chunk->start_addr) >> order;
331 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
332 BUG_ON(remain);
333 size = nbits << order;
334 atomic_add(size, &chunk->avail);
335 rcu_read_unlock();
336 return;
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700337 }
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700338 }
Huang Ying7f184272011-07-13 13:14:24 +0800339 rcu_read_unlock();
340 BUG();
Jes Sorensenf14f75b2005-06-21 17:15:02 -0700341}
342EXPORT_SYMBOL(gen_pool_free);
Huang Ying7f184272011-07-13 13:14:24 +0800343
344/**
345 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
346 * @pool: the generic memory pool
347 * @func: func to call
348 * @data: additional data used by @func
349 *
350 * Call @func for every chunk of generic memory pool. The @func is
351 * called with rcu_read_lock held.
352 */
353void gen_pool_for_each_chunk(struct gen_pool *pool,
354 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
355 void *data)
356{
357 struct gen_pool_chunk *chunk;
358
359 rcu_read_lock();
360 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
361 func(pool, chunk, data);
362 rcu_read_unlock();
363}
364EXPORT_SYMBOL(gen_pool_for_each_chunk);
365
366/**
367 * gen_pool_avail - get available free space of the pool
368 * @pool: pool to get available free space
369 *
370 * Return available free space of the specified pool.
371 */
372size_t gen_pool_avail(struct gen_pool *pool)
373{
374 struct gen_pool_chunk *chunk;
375 size_t avail = 0;
376
377 rcu_read_lock();
378 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
379 avail += atomic_read(&chunk->avail);
380 rcu_read_unlock();
381 return avail;
382}
383EXPORT_SYMBOL_GPL(gen_pool_avail);
384
385/**
386 * gen_pool_size - get size in bytes of memory managed by the pool
387 * @pool: pool to get size
388 *
389 * Return size in bytes of memory managed by the pool.
390 */
391size_t gen_pool_size(struct gen_pool *pool)
392{
393 struct gen_pool_chunk *chunk;
394 size_t size = 0;
395
396 rcu_read_lock();
397 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
398 size += chunk->end_addr - chunk->start_addr;
399 rcu_read_unlock();
400 return size;
401}
402EXPORT_SYMBOL_GPL(gen_pool_size);