Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 1 | #ifndef __CMA_H__ |
| 2 | #define __CMA_H__ |
| 3 | |
| 4 | /* |
| 5 | * There is always at least global CMA area and a few optional |
| 6 | * areas configured in kernel .config. |
| 7 | */ |
| 8 | #ifdef CONFIG_CMA_AREAS |
| 9 | #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) |
| 10 | |
| 11 | #else |
| 12 | #define MAX_CMA_AREAS (0) |
| 13 | |
| 14 | #endif |
| 15 | |
| 16 | struct cma; |
| 17 | |
Pintu Kumar | e48322a | 2014-12-18 16:17:15 -0800 | [diff] [blame] | 18 | extern unsigned long totalcma_pages; |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 19 | extern phys_addr_t cma_get_base(const struct cma *cma); |
| 20 | extern unsigned long cma_get_size(const struct cma *cma); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 21 | |
Weijie Yang | dda02fd | 2014-10-24 17:47:57 +0800 | [diff] [blame] | 22 | extern int __init cma_declare_contiguous(phys_addr_t base, |
| 23 | phys_addr_t size, phys_addr_t limit, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 24 | phys_addr_t alignment, unsigned int order_per_bit, |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 25 | bool fixed, struct cma **res_cma); |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 26 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
| 27 | unsigned int order_per_bit, |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 28 | struct cma **res_cma); |
Rohit Vaswani | 67a2e213 | 2015-10-22 13:32:11 -0700 | [diff] [blame] | 29 | extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 30 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 31 | #endif |