| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 1 | #ifndef __LINUX_CMA_H | 
|  | 2 | #define __LINUX_CMA_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Contiguous Memory Allocator for DMA mapping framework | 
|  | 6 | * Copyright (c) 2010-2011 by Samsung Electronics. | 
|  | 7 | * Written by: | 
|  | 8 | *	Marek Szyprowski <m.szyprowski@samsung.com> | 
|  | 9 | *	Michal Nazarewicz <mina86@mina86.com> | 
|  | 10 | * | 
|  | 11 | * This program is free software; you can redistribute it and/or | 
|  | 12 | * modify it under the terms of the GNU General Public License as | 
|  | 13 | * published by the Free Software Foundation; either version 2 of the | 
|  | 14 | * License or (at your optional) any later version of the license. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | /* | 
|  | 18 | * Contiguous Memory Allocator | 
|  | 19 | * | 
|  | 20 | *   The Contiguous Memory Allocator (CMA) makes it possible to | 
|  | 21 | *   allocate big contiguous chunks of memory after the system has | 
|  | 22 | *   booted. | 
|  | 23 | * | 
|  | 24 | * Why is it needed? | 
|  | 25 | * | 
|  | 26 | *   Various devices on embedded systems have no scatter-getter and/or | 
|  | 27 | *   IO map support and require contiguous blocks of memory to | 
|  | 28 | *   operate.  They include devices such as cameras, hardware video | 
|  | 29 | *   coders, etc. | 
|  | 30 | * | 
|  | 31 | *   Such devices often require big memory buffers (a full HD frame | 
|  | 32 | *   is, for instance, more then 2 mega pixels large, i.e. more than 6 | 
|  | 33 | *   MB of memory), which makes mechanisms such as kmalloc() or | 
|  | 34 | *   alloc_page() ineffective. | 
|  | 35 | * | 
|  | 36 | *   At the same time, a solution where a big memory region is | 
|  | 37 | *   reserved for a device is suboptimal since often more memory is | 
|  | 38 | *   reserved then strictly required and, moreover, the memory is | 
|  | 39 | *   inaccessible to page system even if device drivers don't use it. | 
|  | 40 | * | 
|  | 41 | *   CMA tries to solve this issue by operating on memory regions | 
|  | 42 | *   where only movable pages can be allocated from.  This way, kernel | 
|  | 43 | *   can use the memory for pagecache and when device driver requests | 
|  | 44 | *   it, allocated pages can be migrated. | 
|  | 45 | * | 
|  | 46 | * Driver usage | 
|  | 47 | * | 
|  | 48 | *   CMA should not be used by the device drivers directly. It is | 
|  | 49 | *   only a helper framework for dma-mapping subsystem. | 
|  | 50 | * | 
|  | 51 | *   For more information, see kernel-docs in drivers/base/dma-contiguous.c | 
|  | 52 | */ | 
|  | 53 |  | 
|  | 54 | #ifdef __KERNEL__ | 
|  | 55 |  | 
|  | 56 | struct cma; | 
|  | 57 | struct page; | 
|  | 58 | struct device; | 
|  | 59 |  | 
| Aneesh Kumar K.V | f825c73 | 2013-07-02 11:15:15 +0530 | [diff] [blame] | 60 | #ifdef CONFIG_DMA_CMA | 
| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 61 |  | 
|  | 62 | /* | 
|  | 63 | * There is always at least global CMA area and a few optional device | 
|  | 64 | * private areas configured in kernel .config. | 
|  | 65 | */ | 
|  | 66 | #define MAX_CMA_AREAS	(1 + CONFIG_CMA_AREAS) | 
|  | 67 |  | 
|  | 68 | extern struct cma *dma_contiguous_default_area; | 
|  | 69 |  | 
| Marek Szyprowski | a254738 | 2013-07-29 14:31:45 +0200 | [diff] [blame] | 70 | static inline struct cma *dev_get_cma_area(struct device *dev) | 
|  | 71 | { | 
|  | 72 | if (dev && dev->cma_area) | 
|  | 73 | return dev->cma_area; | 
|  | 74 | return dma_contiguous_default_area; | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | static inline void dev_set_cma_area(struct device *dev, struct cma *cma) | 
|  | 78 | { | 
|  | 79 | if (dev) | 
|  | 80 | dev->cma_area = cma; | 
|  | 81 | } | 
|  | 82 |  | 
|  | 83 | static inline void dma_contiguous_set_default(struct cma *cma) | 
|  | 84 | { | 
|  | 85 | dma_contiguous_default_area = cma; | 
|  | 86 | } | 
|  | 87 |  | 
| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 88 | void dma_contiguous_reserve(phys_addr_t addr_limit); | 
| Marek Szyprowski | a254738 | 2013-07-29 14:31:45 +0200 | [diff] [blame] | 89 |  | 
|  | 90 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | 
|  | 91 | phys_addr_t limit, struct cma **res_cma); | 
|  | 92 |  | 
|  | 93 | /** | 
|  | 94 | * dma_declare_contiguous() - reserve area for contiguous memory handling | 
|  | 95 | *			      for particular device | 
|  | 96 | * @dev:   Pointer to device structure. | 
|  | 97 | * @size:  Size of the reserved memory. | 
|  | 98 | * @base:  Start address of the reserved memory (optional, 0 for any). | 
|  | 99 | * @limit: End address of the reserved memory (optional, 0 for any). | 
|  | 100 | * | 
|  | 101 | * This function reserves memory for specified device. It should be | 
|  | 102 | * called by board specific code when early allocator (memblock or bootmem) | 
|  | 103 | * is still activate. | 
|  | 104 | */ | 
|  | 105 |  | 
|  | 106 | static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, | 
|  | 107 | phys_addr_t base, phys_addr_t limit) | 
|  | 108 | { | 
|  | 109 | struct cma *cma; | 
|  | 110 | int ret; | 
|  | 111 | ret = dma_contiguous_reserve_area(size, base, limit, &cma); | 
|  | 112 | if (ret == 0) | 
|  | 113 | dev_set_cma_area(dev, cma); | 
|  | 114 |  | 
|  | 115 | return ret; | 
|  | 116 | } | 
| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 117 |  | 
|  | 118 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 
|  | 119 | unsigned int order); | 
|  | 120 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | 
|  | 121 | int count); | 
|  | 122 |  | 
|  | 123 | #else | 
|  | 124 |  | 
|  | 125 | #define MAX_CMA_AREAS	(0) | 
|  | 126 |  | 
| Marek Szyprowski | a254738 | 2013-07-29 14:31:45 +0200 | [diff] [blame] | 127 | static inline struct cma *dev_get_cma_area(struct device *dev) | 
|  | 128 | { | 
|  | 129 | return NULL; | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } | 
|  | 133 |  | 
|  | 134 | static inline void dma_contiguous_set_default(struct cma *cma) { } | 
|  | 135 |  | 
| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 136 | static inline void dma_contiguous_reserve(phys_addr_t limit) { } | 
|  | 137 |  | 
| Marek Szyprowski | a254738 | 2013-07-29 14:31:45 +0200 | [diff] [blame] | 138 | static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | 
|  | 139 | phys_addr_t limit, struct cma **res_cma) { | 
|  | 140 | return -ENOSYS; | 
|  | 141 | } | 
|  | 142 |  | 
| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 143 | static inline | 
| Vitaly Andrianov | 4009793 | 2012-12-05 09:29:25 -0500 | [diff] [blame] | 144 | int dma_declare_contiguous(struct device *dev, phys_addr_t size, | 
| Marek Szyprowski | c64be2b | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 145 | phys_addr_t base, phys_addr_t limit) | 
|  | 146 | { | 
|  | 147 | return -ENOSYS; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | static inline | 
|  | 151 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 
|  | 152 | unsigned int order) | 
|  | 153 | { | 
|  | 154 | return NULL; | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | static inline | 
|  | 158 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | 
|  | 159 | int count) | 
|  | 160 | { | 
|  | 161 | return false; | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | #endif | 
|  | 165 |  | 
|  | 166 | #endif | 
|  | 167 |  | 
|  | 168 | #endif |