Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 1 | #ifndef __CMA_H__ |
| 2 | #define __CMA_H__ |
| 3 | |
| 4 | /* |
| 5 | * There is always at least global CMA area and a few optional |
| 6 | * areas configured in kernel .config. |
| 7 | */ |
| 8 | #ifdef CONFIG_CMA_AREAS |
| 9 | #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) |
| 10 | |
| 11 | #else |
| 12 | #define MAX_CMA_AREAS (0) |
| 13 | |
| 14 | #endif |
| 15 | |
| 16 | struct cma; |
| 17 | |
| 18 | extern phys_addr_t cma_get_base(struct cma *cma); |
| 19 | extern unsigned long cma_get_size(struct cma *cma); |
| 20 | |
| 21 | extern int __init cma_declare_contiguous(phys_addr_t size, |
| 22 | phys_addr_t base, phys_addr_t limit, |
| 23 | phys_addr_t alignment, unsigned int order_per_bit, |
Joonsoo Kim | c1f733a | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 24 | bool fixed, struct cma **res_cma); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 25 | extern int cma_init_reserved_mem(phys_addr_t size, |
| 26 | phys_addr_t base, int order_per_bit, |
| 27 | struct cma **res_cma); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 28 | extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); |
| 29 | extern bool cma_release(struct cma *cma, struct page *pages, int count); |
| 30 | #endif |