blob: ed914806babbc71c6863cef3c21ca9d3288231b2 [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
Laura Abbott9e821fb2013-02-26 10:26:30 -080012 *
13 * The Linux Foundation chooses to take subject only to the GPLv2 license
14 * terms, and distributes only under these terms.
Marek Szyprowski55bb0332011-12-29 13:09:51 +010015 */
16
17#define pr_fmt(fmt) "cma: " fmt
18
19#ifdef CONFIG_CMA_DEBUG
20#ifndef DEBUG
21# define DEBUG
22#endif
23#endif
24
25#include <asm/page.h>
26#include <asm/dma-contiguous.h>
27
28#include <linux/memblock.h>
29#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010030#include <linux/of.h>
31#include <linux/of_fdt.h>
32#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010033#include <linux/mm.h>
34#include <linux/mutex.h>
35#include <linux/page-isolation.h>
36#include <linux/slab.h>
37#include <linux/swap.h>
38#include <linux/mm_types.h>
39#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080040#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010041
42#ifndef SZ_1M
43#define SZ_1M (1 << 20)
44#endif
45
46struct cma {
47 unsigned long base_pfn;
48 unsigned long count;
49 unsigned long *bitmap;
50};
51
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010052static DEFINE_MUTEX(cma_mutex);
53
54struct cma *dma_contiguous_def_area;
55phys_addr_t dma_contiguous_def_base;
56
57static struct cma_area {
58 phys_addr_t base;
59 unsigned long size;
60 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -080061 const char *name;
Laura Abbott9e821fb2013-02-26 10:26:30 -080062} cma_areas[MAX_CMA_AREAS];
63static unsigned cma_area_count;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010064
65
66static struct cma_map {
67 phys_addr_t base;
68 struct device *dev;
69} cma_maps[MAX_CMA_AREAS] __initdata;
70static unsigned cma_map_count __initdata;
71
72static struct cma *cma_get_area(phys_addr_t base)
73{
74 int i;
75 for (i = 0; i < cma_area_count; i++)
76 if (cma_areas[i].base == base)
77 return cma_areas[i].cma;
78 return NULL;
79}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010080
Laura Abbott52c4dce2013-02-26 10:38:34 -080081static struct cma *cma_get_area_by_name(const char *name)
82{
83 int i;
84 if (!name)
85 return NULL;
86
87 for (i = 0; i < cma_area_count; i++)
88 if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
89 return cma_areas[i].cma;
90 return NULL;
91}
92
93
94
Marek Szyprowski55bb0332011-12-29 13:09:51 +010095#ifdef CONFIG_CMA_SIZE_MBYTES
96#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
97#else
98#define CMA_SIZE_MBYTES 0
99#endif
100
101/*
102 * Default global CMA area size can be defined in kernel's .config.
103 * This is usefull mainly for distro maintainers to create a kernel
104 * that works correctly for most supported systems.
105 * The size can be set in bytes or as a percentage of the total memory
106 * in the system.
107 *
108 * Users, who want to set the size of global CMA area for their system
109 * should use cma= kernel parameter.
110 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500111static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
112static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100113
114static int __init early_cma(char *p)
115{
116 pr_debug("%s(%s)\n", __func__, p);
117 size_cmdline = memparse(p, &p);
118 return 0;
119}
120early_param("cma", early_cma);
121
122#ifdef CONFIG_CMA_SIZE_PERCENTAGE
123
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500124static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100125{
126 struct memblock_region *reg;
127 unsigned long total_pages = 0;
128
129 /*
130 * We cannot use memblock_phys_mem_size() here, because
131 * memblock_analyze() has not been called yet.
132 */
133 for_each_memblock(memory, reg)
134 total_pages += memblock_region_memory_end_pfn(reg) -
135 memblock_region_memory_base_pfn(reg);
136
137 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
138}
139
140#else
141
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500142static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100143{
144 return 0;
145}
146
147#endif
148
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100149static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
150{
151 unsigned long pfn = base_pfn;
152 unsigned i = count >> pageblock_order;
153 struct zone *zone;
154
155 WARN_ON_ONCE(!pfn_valid(pfn));
156 zone = page_zone(pfn_to_page(pfn));
157
158 do {
159 unsigned j;
160 base_pfn = pfn;
161 for (j = pageblock_nr_pages; j; --j, pfn++) {
162 WARN_ON_ONCE(!pfn_valid(pfn));
163 if (page_zone(pfn_to_page(pfn)) != zone)
164 return -EINVAL;
165 }
166 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
167 } while (--i);
168 return 0;
169}
170
171static __init struct cma *cma_create_area(unsigned long base_pfn,
172 unsigned long count)
173{
174 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
175 struct cma *cma;
176 int ret = -ENOMEM;
177
178 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
179
180 cma = kmalloc(sizeof *cma, GFP_KERNEL);
181 if (!cma)
182 return ERR_PTR(-ENOMEM);
183
184 cma->base_pfn = base_pfn;
185 cma->count = count;
186 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
187
188 if (!cma->bitmap)
189 goto no_mem;
190
191 ret = cma_activate_area(base_pfn, count);
192 if (ret)
193 goto error;
194
195 pr_debug("%s: returned %p\n", __func__, (void *)cma);
196 return cma;
197
198error:
199 kfree(cma->bitmap);
200no_mem:
201 kfree(cma);
202 return ERR_PTR(ret);
203}
204
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100205/*****************************************************************************/
206
207#ifdef CONFIG_OF
208int __init cma_fdt_scan(unsigned long node, const char *uname,
209 int depth, void *data)
210{
211 phys_addr_t base, size;
212 unsigned long len;
213 __be32 *prop;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800214 char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100215
Laura Abbott670688a2013-03-14 19:13:49 -0700216 if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100217 return 0;
218
219 prop = of_get_flat_dt_prop(node, "reg", &len);
220 if (!prop || (len != 2 * sizeof(unsigned long)))
221 return 0;
222
223 base = be32_to_cpu(prop[0]);
224 size = be32_to_cpu(prop[1]);
225
Laura Abbott52c4dce2013-02-26 10:38:34 -0800226 name = of_get_flat_dt_prop(node, "label", NULL);
227
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100228 pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
229 (unsigned long)base, (unsigned long)size / SZ_1M);
Laura Abbott52c4dce2013-02-26 10:38:34 -0800230 dma_contiguous_reserve_area(size, &base, 0, name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100231
232 return 0;
233}
234#endif
235
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100236/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100237 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100238 * @limit: End address of the reserved memory (optional, 0 for any).
239 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100240 * This function reserves memory from early allocator. It should be
241 * called by arch specific code once the early allocator (memblock or bootmem)
242 * has been activated and all other subsystems have already allocated/reserved
243 * memory. It reserves contiguous areas for global, device independent
244 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100245 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100246void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100247{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100248 phys_addr_t sel_size = 0;
249
250 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
251
252 if (size_cmdline != -1) {
253 sel_size = size_cmdline;
254 } else {
255#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
256 sel_size = size_bytes;
257#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
258 sel_size = cma_early_percent_memory();
259#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
260 sel_size = min(size_bytes, cma_early_percent_memory());
261#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
262 sel_size = max(size_bytes, cma_early_percent_memory());
263#endif
264 }
265
266 if (sel_size) {
267 phys_addr_t base = 0;
268 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
269 (unsigned long)sel_size / SZ_1M);
270
Laura Abbott52c4dce2013-02-26 10:38:34 -0800271 if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
272 == 0)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100273 dma_contiguous_def_base = base;
274 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100275#ifdef CONFIG_OF
276 of_scan_flat_dt(cma_fdt_scan, NULL);
277#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100278};
279
280/**
281 * dma_contiguous_reserve_area() - reserve custom contiguous area
282 * @size: Size of the reserved area (in bytes),
283 * @base: Pointer to the base address of the reserved area, also used to return
284 * base address of the actually reserved area, optional, use pointer to
285 * 0 for any
286 * @limit: End address of the reserved memory (optional, 0 for any).
287 *
288 * This function reserves memory from early allocator. It should be
289 * called by arch specific code once the early allocator (memblock or bootmem)
290 * has been activated and all other subsystems have already allocated/reserved
291 * memory. This function allows to create custom reserved areas for specific
292 * devices.
293 */
294int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
Laura Abbott52c4dce2013-02-26 10:38:34 -0800295 phys_addr_t limit, const char *name)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100296{
297 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500298 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100299 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100300
301 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
302 (unsigned long)size, (unsigned long)base,
303 (unsigned long)limit);
304
305 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100306 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100307 pr_err("Not enough slots for CMA reserved regions!\n");
308 return -ENOSPC;
309 }
310
311 if (!size)
312 return -EINVAL;
313
314 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200315 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100316 base = ALIGN(base, alignment);
317 size = ALIGN(size, alignment);
318 limit &= ~(alignment - 1);
319
320 /* Reserve memory */
321 if (base) {
322 if (memblock_is_region_reserved(base, size) ||
323 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100324 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100325 goto err;
326 }
327 } else {
328 /*
329 * Use __memblock_alloc_base() since
330 * memblock_alloc_base() panic()s.
331 */
332 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
333 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100334 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100335 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100336 } else {
337 base = addr;
338 }
339 }
340
341 /*
342 * Each reserved area must be initialised later, when more kernel
343 * subsystems (like slab allocator) are available.
344 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100345 cma_areas[cma_area_count].base = base;
346 cma_areas[cma_area_count].size = size;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800347 cma_areas[cma_area_count].name = name;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100348 cma_area_count++;
349 *res_base = base;
350
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500351 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100352 (unsigned long)base);
353
354 /* Architecture specific contiguous memory fixup. */
355 dma_contiguous_early_fixup(base, size);
356 return 0;
357err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500358 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100359 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100360}
361
362/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100363 * dma_contiguous_add_device() - add device to custom contiguous reserved area
364 * @dev: Pointer to device structure.
365 * @base: Pointer to the base address of the reserved area returned by
366 * dma_contiguous_reserve_area() function, also used to return
367 *
368 * This function assigns the given device to the contiguous memory area
369 * reserved earlier by dma_contiguous_reserve_area() function.
370 */
371int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
372{
373 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
374 pr_err("Not enough slots for CMA reserved regions!\n");
375 return -ENOSPC;
376 }
377 cma_maps[cma_map_count].dev = dev;
378 cma_maps[cma_map_count].base = base;
379 cma_map_count++;
380 return 0;
381}
382
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100383#ifdef CONFIG_OF
384static void cma_assign_device_from_dt(struct device *dev)
385{
386 struct device_node *node;
387 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800388 const char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100389 u32 value;
390
391 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
392 if (!node)
393 return;
394 if (of_property_read_u32(node, "reg", &value) && !value)
395 return;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800396
397 if (of_property_read_string(node, "label", &name))
398 return;
399
400 cma = cma_get_area_by_name(name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100401 if (!cma)
402 return;
403
404 dev_set_cma_area(dev, cma);
405 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
406}
407
408static int cma_device_init_notifier_call(struct notifier_block *nb,
409 unsigned long event, void *data)
410{
411 struct device *dev = data;
412 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
413 cma_assign_device_from_dt(dev);
414 return NOTIFY_DONE;
415}
416
417static struct notifier_block cma_dev_init_nb = {
418 .notifier_call = cma_device_init_notifier_call,
419};
420#endif
421
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100422static int __init cma_init_reserved_areas(void)
423{
424 struct cma *cma;
425 int i;
426
427 for (i = 0; i < cma_area_count; i++) {
428 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
429 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
430
431 cma = cma_create_area(base, count);
432 if (!IS_ERR(cma))
433 cma_areas[i].cma = cma;
434 }
435
436 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
437
438 for (i = 0; i < cma_map_count; i++) {
439 cma = cma_get_area(cma_maps[i].base);
440 dev_set_cma_area(cma_maps[i].dev, cma);
441 }
442
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100443#ifdef CONFIG_OF
444 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
445#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100446 return 0;
447}
448core_initcall(cma_init_reserved_areas);
449
450/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100451 * dma_alloc_from_contiguous() - allocate pages from contiguous area
452 * @dev: Pointer to device for which the allocation is performed.
453 * @count: Requested number of pages.
454 * @align: Requested alignment of pages (in PAGE_SIZE order).
455 *
456 * This function allocates memory buffer for specified device. It uses
457 * device specific contiguous memory area if available or the default
458 * global one. Requires architecture specific get_dev_cma_area() helper
459 * function.
460 */
461struct page *dma_alloc_from_contiguous(struct device *dev, int count,
462 unsigned int align)
463{
464 unsigned long mask, pfn, pageno, start = 0;
465 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200466 struct page *page = NULL;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100467 int ret;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800468 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100469
470 if (!cma || !cma->count)
471 return NULL;
472
473 if (align > CONFIG_CMA_ALIGNMENT)
474 align = CONFIG_CMA_ALIGNMENT;
475
476 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
477 count, align);
478
479 if (!count)
480 return NULL;
481
482 mask = (1 << align) - 1;
483
484 mutex_lock(&cma_mutex);
485
486 for (;;) {
487 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
488 start, count, mask);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200489 if (pageno >= cma->count)
490 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100491
492 pfn = cma->base_pfn + pageno;
493 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
494 if (ret == 0) {
495 bitmap_set(cma->bitmap, pageno, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200496 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100497 break;
498 } else if (ret != -EBUSY) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200499 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100500 }
Liam Markcc2d4bd2013-01-16 10:14:40 -0800501 tries++;
502 trace_dma_alloc_contiguous_retry(tries);
503
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100504 pr_debug("%s(): memory range at %p is busy, retrying\n",
505 __func__, pfn_to_page(pfn));
506 /* try again with a bit different memory target */
507 start = pageno + mask + 1;
508 }
509
510 mutex_unlock(&cma_mutex);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200511 pr_debug("%s(): returned %p\n", __func__, page);
512 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100513}
514
515/**
516 * dma_release_from_contiguous() - release allocated pages
517 * @dev: Pointer to device for which the pages were allocated.
518 * @pages: Allocated pages.
519 * @count: Number of allocated pages.
520 *
521 * This function releases memory allocated by dma_alloc_from_contiguous().
522 * It returns false when provided pages do not belong to contiguous area and
523 * true otherwise.
524 */
525bool dma_release_from_contiguous(struct device *dev, struct page *pages,
526 int count)
527{
528 struct cma *cma = dev_get_cma_area(dev);
529 unsigned long pfn;
530
531 if (!cma || !pages)
532 return false;
533
534 pr_debug("%s(page %p)\n", __func__, (void *)pages);
535
536 pfn = page_to_pfn(pages);
537
538 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
539 return false;
540
541 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
542
543 mutex_lock(&cma_mutex);
544 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
545 free_contig_range(pfn, count);
546 mutex_unlock(&cma_mutex);
547
548 return true;
549}