blob: cc9c46c31514822ba43fedbea9e6058e515964d6 [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001#ifndef __LINUX_CMA_H
2#define __LINUX_CMA_H
3
4/*
5 * Contiguous Memory Allocator for DMA mapping framework
6 * Copyright (c) 2010-2011 by Samsung Electronics.
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License or (at your optional) any later version of the license.
15 */
16
17/*
18 * Contiguous Memory Allocator
19 *
20 * The Contiguous Memory Allocator (CMA) makes it possible to
21 * allocate big contiguous chunks of memory after the system has
22 * booted.
23 *
24 * Why is it needed?
25 *
26 * Various devices on embedded systems have no scatter-getter and/or
27 * IO map support and require contiguous blocks of memory to
28 * operate. They include devices such as cameras, hardware video
29 * coders, etc.
30 *
31 * Such devices often require big memory buffers (a full HD frame
32 * is, for instance, more then 2 mega pixels large, i.e. more than 6
33 * MB of memory), which makes mechanisms such as kmalloc() or
34 * alloc_page() ineffective.
35 *
36 * At the same time, a solution where a big memory region is
37 * reserved for a device is suboptimal since often more memory is
38 * reserved then strictly required and, moreover, the memory is
39 * inaccessible to page system even if device drivers don't use it.
40 *
41 * CMA tries to solve this issue by operating on memory regions
42 * where only movable pages can be allocated from. This way, kernel
43 * can use the memory for pagecache and when device driver requests
44 * it, allocated pages can be migrated.
45 *
46 * Driver usage
47 *
48 * CMA should not be used by the device drivers directly. It is
49 * only a helper framework for dma-mapping subsystem.
50 *
51 * For more information, see kernel-docs in drivers/base/dma-contiguous.c
52 */
53
54#ifdef __KERNEL__
55
56struct cma;
57struct page;
58struct device;
59
60#ifdef CONFIG_CMA
61
62/*
63 * There is always at least global CMA area and a few optional device
64 * private areas configured in kernel .config.
65 */
66#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
67
Laura Abbott2dcd9e62013-06-03 19:14:08 -070068
69phys_addr_t cma_get_base(struct device *dev);
70
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010071extern struct cma *dma_contiguous_def_area;
Marek Szyprowski55bb0332011-12-29 13:09:51 +010072
73void dma_contiguous_reserve(phys_addr_t addr_limit);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010074
75int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
Laura Abbott6bc7adb2013-04-26 15:51:06 -070076 phys_addr_t limit, const char *name,
77 bool in_system);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010078
79int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
80
81/**
82 * dma_declare_contiguous() - reserve area for contiguous memory handling
83 * for particular device
84 * @dev: Pointer to device structure.
85 * @size: Size of the reserved memory.
86 * @base: Start address of the reserved memory (optional, 0 for any).
87 * @limit: End address of the reserved memory (optional, 0 for any).
88 *
89 * This function reserves memory for specified device. It should be
90 * called by board specific code when early allocator (memblock or bootmem)
91 * is still activate.
92 */
93
94static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
95 phys_addr_t base, phys_addr_t limit)
96{
97 int ret;
Laura Abbott6bc7adb2013-04-26 15:51:06 -070098 ret = dma_contiguous_reserve_area(size, &base, limit, NULL, true);
99 if (ret == 0)
100 ret = dma_contiguous_add_device(dev, base);
101 return ret;
102}
103
104static inline int dma_declare_contiguous_reserved(struct device *dev,
105 phys_addr_t size,
106 phys_addr_t base,
107 phys_addr_t limit)
108{
109 int ret;
110 ret = dma_contiguous_reserve_area(size, &base, limit, NULL, false);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100111 if (ret == 0)
112 ret = dma_contiguous_add_device(dev, base);
113 return ret;
114}
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100115
Rohit Vaswani42d9d152015-09-17 17:28:13 -0700116struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100117 unsigned int order);
118bool dma_release_from_contiguous(struct device *dev, struct page *pages,
119 int count);
120
121#else
122
123#define MAX_CMA_AREAS (0)
124
125static inline void dma_contiguous_reserve(phys_addr_t limit) { }
126
127static inline
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500128int dma_declare_contiguous(struct device *dev, phys_addr_t size,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100129 phys_addr_t base, phys_addr_t limit)
130{
131 return -ENOSYS;
132}
133
134static inline
Rohit Vaswani42d9d152015-09-17 17:28:13 -0700135struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100136 unsigned int order)
137{
138 return NULL;
139}
140
141static inline
142bool dma_release_from_contiguous(struct device *dev, struct page *pages,
143 int count)
144{
145 return false;
146}
147
Laura Abbott2dcd9e62013-06-03 19:14:08 -0700148
149static inline phys_addr_t cma_get_base(struct device *dev)
150{
151 return 0;
152}
153
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100154#endif
155
156#endif
157
158#endif