blob: e167a1e1bccb062efef2595fcd5299301a97df80 [file] [log] [blame]
Marek Szyprowskic64be2b2011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#define pr_fmt(fmt) "cma: " fmt
15
16#ifdef CONFIG_CMA_DEBUG
17#ifndef DEBUG
18# define DEBUG
19#endif
20#endif
21
22#include <asm/page.h>
23#include <asm/dma-contiguous.h>
24
25#include <linux/memblock.h>
26#include <linux/err.h>
Laurent Pinchart446c82f2012-10-18 09:29:44 +020027#include <linux/sizes.h>
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010028#include <linux/dma-contiguous.h>
Joonsoo Kima2541292014-08-06 16:05:25 -070029#include <linux/cma.h>
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010030
31#ifdef CONFIG_CMA_SIZE_MBYTES
32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
33#else
34#define CMA_SIZE_MBYTES 0
35#endif
36
Joonsoo Kima2541292014-08-06 16:05:25 -070037struct cma *dma_contiguous_default_area;
38
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010039/*
40 * Default global CMA area size can be defined in kernel's .config.
Michael Opdenacker73678802013-09-18 06:04:48 +020041 * This is useful mainly for distro maintainers to create a kernel
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010042 * that works correctly for most supported systems.
43 * The size can be set in bytes or as a percentage of the total memory
44 * in the system.
45 *
46 * Users, who want to set the size of global CMA area for their system
47 * should use cma= kernel parameter.
48 */
Tan Xiaojuna785ce92015-09-24 11:27:47 +080049static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
Vitaly Andrianov40097932012-12-05 09:29:25 -050050static phys_addr_t size_cmdline = -1;
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -070051static phys_addr_t base_cmdline;
52static phys_addr_t limit_cmdline;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010053
54static int __init early_cma(char *p)
55{
56 pr_debug("%s(%s)\n", __func__, p);
57 size_cmdline = memparse(p, &p);
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -070058 if (*p != '@')
59 return 0;
60 base_cmdline = memparse(p + 1, &p);
61 if (*p != '-') {
62 limit_cmdline = base_cmdline + size_cmdline;
63 return 0;
64 }
65 limit_cmdline = memparse(p + 1, &p);
66
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010067 return 0;
68}
69early_param("cma", early_cma);
70
71#ifdef CONFIG_CMA_SIZE_PERCENTAGE
72
Vitaly Andrianov40097932012-12-05 09:29:25 -050073static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010074{
75 struct memblock_region *reg;
76 unsigned long total_pages = 0;
77
78 /*
79 * We cannot use memblock_phys_mem_size() here, because
80 * memblock_analyze() has not been called yet.
81 */
82 for_each_memblock(memory, reg)
83 total_pages += memblock_region_memory_end_pfn(reg) -
84 memblock_region_memory_base_pfn(reg);
85
86 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
87}
88
89#else
90
Vitaly Andrianov40097932012-12-05 09:29:25 -050091static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +010092{
93 return 0;
94}
95
96#endif
97
98/**
Marek Szyprowskia2547382013-07-29 14:31:45 +020099 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100100 * @limit: End address of the reserved memory (optional, 0 for any).
101 *
102 * This function reserves memory from early allocator. It should be
103 * called by arch specific code once the early allocator (memblock or bootmem)
104 * has been activated and all other subsystems have already allocated/reserved
105 * memory.
106 */
107void __init dma_contiguous_reserve(phys_addr_t limit)
108{
Vitaly Andrianov40097932012-12-05 09:29:25 -0500109 phys_addr_t selected_size = 0;
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700110 phys_addr_t selected_base = 0;
111 phys_addr_t selected_limit = limit;
112 bool fixed = false;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100113
114 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
115
116 if (size_cmdline != -1) {
117 selected_size = size_cmdline;
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700118 selected_base = base_cmdline;
119 selected_limit = min_not_zero(limit_cmdline, limit);
120 if (base_cmdline + size_cmdline == limit_cmdline)
121 fixed = true;
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100122 } else {
123#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
124 selected_size = size_bytes;
125#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
126 selected_size = cma_early_percent_memory();
127#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
128 selected_size = min(size_bytes, cma_early_percent_memory());
129#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
130 selected_size = max(size_bytes, cma_early_percent_memory());
131#endif
132 }
133
Marek Szyprowskia2547382013-07-29 14:31:45 +0200134 if (selected_size && !dma_contiguous_default_area) {
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100135 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
Vitaly Andrianov40097932012-12-05 09:29:25 -0500136 (unsigned long)selected_size / SZ_1M);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100137
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700138 dma_contiguous_reserve_area(selected_size, selected_base,
139 selected_limit,
140 &dma_contiguous_default_area,
141 fixed);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100142 }
Akinobu Mita5ea3b1b2014-06-04 16:06:54 -0700143}
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100144
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700145/**
146 * dma_contiguous_reserve_area() - reserve custom contiguous area
147 * @size: Size of the reserved area (in bytes),
148 * @base: Base address of the reserved area optional, use 0 for any
149 * @limit: End address of the reserved memory (optional, 0 for any).
150 * @res_cma: Pointer to store the created cma region.
151 * @fixed: hint about where to place the reserved area
152 *
153 * This function reserves memory from early allocator. It should be
154 * called by arch specific code once the early allocator (memblock or bootmem)
155 * has been activated and all other subsystems have already allocated/reserved
156 * memory. This function allows to create custom reserved areas for specific
157 * devices.
158 *
159 * If @fixed is true, reserve contiguous area at exactly @base. If false,
160 * reserve in range from @base to @limit.
161 */
162int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
163 phys_addr_t limit, struct cma **res_cma,
164 bool fixed)
165{
166 int ret;
167
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700168 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700169 if (ret)
170 return ret;
171
172 /* Architecture specific contiguous memory fixup. */
Joonsoo Kima2541292014-08-06 16:05:25 -0700173 dma_contiguous_early_fixup(cma_get_base(*res_cma),
174 cma_get_size(*res_cma));
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700175
176 return 0;
177}
178
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100179/**
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700180 * dma_alloc_from_contiguous() - allocate pages from contiguous area
181 * @dev: Pointer to device for which the allocation is performed.
182 * @count: Requested number of pages.
183 * @align: Requested alignment of pages (in PAGE_SIZE order).
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100184 *
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700185 * This function allocates memory buffer for specified device. It uses
186 * device specific contiguous memory area if available or the default
187 * global one. Requires architecture specific dev_get_cma_area() helper
188 * function.
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100189 */
Rohit Vaswani67a2e2132015-10-22 13:32:11 -0700190struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700191 unsigned int align)
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100192{
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700193 if (align > CONFIG_CMA_ALIGNMENT)
194 align = CONFIG_CMA_ALIGNMENT;
195
Joonsoo Kima2541292014-08-06 16:05:25 -0700196 return cma_alloc(dev_get_cma_area(dev), count, align);
Marek Szyprowskic64be2b2011-12-29 13:09:51 +0100197}
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700198
199/**
200 * dma_release_from_contiguous() - release allocated pages
201 * @dev: Pointer to device for which the pages were allocated.
202 * @pages: Allocated pages.
203 * @count: Number of allocated pages.
204 *
205 * This function releases memory allocated by dma_alloc_from_contiguous().
206 * It returns false when provided pages do not belong to contiguous area and
207 * true otherwise.
208 */
209bool dma_release_from_contiguous(struct device *dev, struct page *pages,
210 int count)
211{
Joonsoo Kima2541292014-08-06 16:05:25 -0700212 return cma_release(dev_get_cma_area(dev), pages, count);
Joonsoo Kim3162bbd2014-08-06 16:05:19 -0700213}
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700214
215/*
216 * Support for reserved memory regions defined in device tree
217 */
218#ifdef CONFIG_OF_RESERVED_MEM
219#include <linux/of.h>
220#include <linux/of_fdt.h>
221#include <linux/of_reserved_mem.h>
222
223#undef pr_fmt
224#define pr_fmt(fmt) fmt
225
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700226static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700227{
228 dev_set_cma_area(dev, rmem->priv);
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700229 return 0;
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700230}
231
232static void rmem_cma_device_release(struct reserved_mem *rmem,
233 struct device *dev)
234{
235 dev_set_cma_area(dev, NULL);
236}
237
238static const struct reserved_mem_ops rmem_cma_ops = {
239 .device_init = rmem_cma_device_init,
240 .device_release = rmem_cma_device_release,
241};
242
243static int __init rmem_cma_setup(struct reserved_mem *rmem)
244{
245 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
246 phys_addr_t mask = align - 1;
247 unsigned long node = rmem->fdt_node;
248 struct cma *cma;
249 int err;
250
251 if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
252 of_get_flat_dt_prop(node, "no-map", NULL))
253 return -EINVAL;
254
255 if ((rmem->base & mask) || (rmem->size & mask)) {
256 pr_err("Reserved memory: incorrect alignment of CMA region\n");
257 return -EINVAL;
258 }
259
260 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
261 if (err) {
262 pr_err("Reserved memory: unable to setup CMA region\n");
263 return err;
264 }
265 /* Architecture specific contiguous memory fixup. */
266 dma_contiguous_early_fixup(rmem->base, rmem->size);
267
268 if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
269 dma_contiguous_set_default(cma);
270
271 rmem->ops = &rmem_cma_ops;
272 rmem->priv = cma;
273
274 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
275 &rmem->base, (unsigned long)rmem->size / SZ_1M);
276
277 return 0;
278}
279RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
280#endif