blob: 0430ed05d3b9f0511975fbef5ca89ec14faf3e92 [file] [log] [blame]
Joonsoo Kima2541292014-08-06 16:05:25 -07001#ifndef __CMA_H__
2#define __CMA_H__
3
4/*
5 * There is always at least global CMA area and a few optional
6 * areas configured in kernel .config.
7 */
8#ifdef CONFIG_CMA_AREAS
9#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
10
11#else
12#define MAX_CMA_AREAS (0)
13
14#endif
15
16struct cma;
17
18extern phys_addr_t cma_get_base(struct cma *cma);
19extern unsigned long cma_get_size(struct cma *cma);
20
21extern int __init cma_declare_contiguous(phys_addr_t size,
22 phys_addr_t base, phys_addr_t limit,
23 phys_addr_t alignment, unsigned int order_per_bit,
Joonsoo Kimc1f733a2014-08-06 16:05:32 -070024 bool fixed, struct cma **res_cma);
Marek Szyprowskide9e14e2014-10-13 15:51:09 -070025extern int cma_init_reserved_mem(phys_addr_t size,
26 phys_addr_t base, int order_per_bit,
27 struct cma **res_cma);
Joonsoo Kima2541292014-08-06 16:05:25 -070028extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
29extern bool cma_release(struct cma *cma, struct page *pages, int count);
30#endif