blob: 3a8bbc5b2d735fa5f42f07a3238607db1b1fc30f [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
Laura Abbott9e821fb2013-02-26 10:26:30 -080012 *
13 * The Linux Foundation chooses to take subject only to the GPLv2 license
14 * terms, and distributes only under these terms.
Marek Szyprowski55bb0332011-12-29 13:09:51 +010015 */
16
17#define pr_fmt(fmt) "cma: " fmt
18
19#ifdef CONFIG_CMA_DEBUG
20#ifndef DEBUG
21# define DEBUG
22#endif
23#endif
24
25#include <asm/page.h>
26#include <asm/dma-contiguous.h>
27
28#include <linux/memblock.h>
29#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010030#include <linux/of.h>
31#include <linux/of_fdt.h>
32#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010033#include <linux/mm.h>
34#include <linux/mutex.h>
35#include <linux/page-isolation.h>
36#include <linux/slab.h>
37#include <linux/swap.h>
38#include <linux/mm_types.h>
39#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080040#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010041
42#ifndef SZ_1M
43#define SZ_1M (1 << 20)
44#endif
45
46struct cma {
47 unsigned long base_pfn;
48 unsigned long count;
49 unsigned long *bitmap;
50};
51
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010052static DEFINE_MUTEX(cma_mutex);
53
54struct cma *dma_contiguous_def_area;
55phys_addr_t dma_contiguous_def_base;
56
57static struct cma_area {
58 phys_addr_t base;
59 unsigned long size;
60 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -080061 const char *name;
Laura Abbott9e821fb2013-02-26 10:26:30 -080062} cma_areas[MAX_CMA_AREAS];
63static unsigned cma_area_count;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010064
65
66static struct cma_map {
67 phys_addr_t base;
68 struct device *dev;
69} cma_maps[MAX_CMA_AREAS] __initdata;
70static unsigned cma_map_count __initdata;
71
72static struct cma *cma_get_area(phys_addr_t base)
73{
74 int i;
75 for (i = 0; i < cma_area_count; i++)
76 if (cma_areas[i].base == base)
77 return cma_areas[i].cma;
78 return NULL;
79}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010080
Laura Abbott52c4dce2013-02-26 10:38:34 -080081static struct cma *cma_get_area_by_name(const char *name)
82{
83 int i;
84 if (!name)
85 return NULL;
86
87 for (i = 0; i < cma_area_count; i++)
88 if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
89 return cma_areas[i].cma;
90 return NULL;
91}
92
93
94
Marek Szyprowski55bb0332011-12-29 13:09:51 +010095#ifdef CONFIG_CMA_SIZE_MBYTES
96#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
97#else
98#define CMA_SIZE_MBYTES 0
99#endif
100
101/*
102 * Default global CMA area size can be defined in kernel's .config.
103 * This is usefull mainly for distro maintainers to create a kernel
104 * that works correctly for most supported systems.
105 * The size can be set in bytes or as a percentage of the total memory
106 * in the system.
107 *
108 * Users, who want to set the size of global CMA area for their system
109 * should use cma= kernel parameter.
110 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500111static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
112static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100113
114static int __init early_cma(char *p)
115{
116 pr_debug("%s(%s)\n", __func__, p);
117 size_cmdline = memparse(p, &p);
118 return 0;
119}
120early_param("cma", early_cma);
121
122#ifdef CONFIG_CMA_SIZE_PERCENTAGE
123
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500124static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100125{
126 struct memblock_region *reg;
127 unsigned long total_pages = 0;
128
129 /*
130 * We cannot use memblock_phys_mem_size() here, because
131 * memblock_analyze() has not been called yet.
132 */
133 for_each_memblock(memory, reg)
134 total_pages += memblock_region_memory_end_pfn(reg) -
135 memblock_region_memory_base_pfn(reg);
136
137 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
138}
139
140#else
141
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500142static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100143{
144 return 0;
145}
146
147#endif
148
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100149static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
150{
151 unsigned long pfn = base_pfn;
152 unsigned i = count >> pageblock_order;
153 struct zone *zone;
154
155 WARN_ON_ONCE(!pfn_valid(pfn));
156 zone = page_zone(pfn_to_page(pfn));
157
158 do {
159 unsigned j;
160 base_pfn = pfn;
161 for (j = pageblock_nr_pages; j; --j, pfn++) {
162 WARN_ON_ONCE(!pfn_valid(pfn));
163 if (page_zone(pfn_to_page(pfn)) != zone)
164 return -EINVAL;
165 }
166 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
167 } while (--i);
168 return 0;
169}
170
171static __init struct cma *cma_create_area(unsigned long base_pfn,
172 unsigned long count)
173{
174 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
175 struct cma *cma;
176 int ret = -ENOMEM;
177
178 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
179
180 cma = kmalloc(sizeof *cma, GFP_KERNEL);
181 if (!cma)
182 return ERR_PTR(-ENOMEM);
183
184 cma->base_pfn = base_pfn;
185 cma->count = count;
186 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
187
188 if (!cma->bitmap)
189 goto no_mem;
190
191 ret = cma_activate_area(base_pfn, count);
192 if (ret)
193 goto error;
194
195 pr_debug("%s: returned %p\n", __func__, (void *)cma);
196 return cma;
197
198error:
199 kfree(cma->bitmap);
200no_mem:
201 kfree(cma);
202 return ERR_PTR(ret);
203}
204
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100205/*****************************************************************************/
206
207#ifdef CONFIG_OF
208int __init cma_fdt_scan(unsigned long node, const char *uname,
209 int depth, void *data)
210{
211 phys_addr_t base, size;
212 unsigned long len;
213 __be32 *prop;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800214 char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100215
216 if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
Laura Abbotta16ec582013-02-26 10:33:04 -0800217 !of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100218 return 0;
219
220 prop = of_get_flat_dt_prop(node, "reg", &len);
221 if (!prop || (len != 2 * sizeof(unsigned long)))
222 return 0;
223
224 base = be32_to_cpu(prop[0]);
225 size = be32_to_cpu(prop[1]);
226
Laura Abbott52c4dce2013-02-26 10:38:34 -0800227 name = of_get_flat_dt_prop(node, "label", NULL);
228
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100229 pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
230 (unsigned long)base, (unsigned long)size / SZ_1M);
Laura Abbott52c4dce2013-02-26 10:38:34 -0800231 dma_contiguous_reserve_area(size, &base, 0, name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100232
233 return 0;
234}
235#endif
236
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100237/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100238 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100239 * @limit: End address of the reserved memory (optional, 0 for any).
240 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100241 * This function reserves memory from early allocator. It should be
242 * called by arch specific code once the early allocator (memblock or bootmem)
243 * has been activated and all other subsystems have already allocated/reserved
244 * memory. It reserves contiguous areas for global, device independent
245 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100246 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100247void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100248{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100249 phys_addr_t sel_size = 0;
250
251 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
252
253 if (size_cmdline != -1) {
254 sel_size = size_cmdline;
255 } else {
256#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
257 sel_size = size_bytes;
258#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
259 sel_size = cma_early_percent_memory();
260#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
261 sel_size = min(size_bytes, cma_early_percent_memory());
262#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
263 sel_size = max(size_bytes, cma_early_percent_memory());
264#endif
265 }
266
267 if (sel_size) {
268 phys_addr_t base = 0;
269 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
270 (unsigned long)sel_size / SZ_1M);
271
Laura Abbott52c4dce2013-02-26 10:38:34 -0800272 if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
273 == 0)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100274 dma_contiguous_def_base = base;
275 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100276#ifdef CONFIG_OF
277 of_scan_flat_dt(cma_fdt_scan, NULL);
278#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100279};
280
281/**
282 * dma_contiguous_reserve_area() - reserve custom contiguous area
283 * @size: Size of the reserved area (in bytes),
284 * @base: Pointer to the base address of the reserved area, also used to return
285 * base address of the actually reserved area, optional, use pointer to
286 * 0 for any
287 * @limit: End address of the reserved memory (optional, 0 for any).
288 *
289 * This function reserves memory from early allocator. It should be
290 * called by arch specific code once the early allocator (memblock or bootmem)
291 * has been activated and all other subsystems have already allocated/reserved
292 * memory. This function allows to create custom reserved areas for specific
293 * devices.
294 */
295int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
Laura Abbott52c4dce2013-02-26 10:38:34 -0800296 phys_addr_t limit, const char *name)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100297{
298 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500299 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100300 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100301
302 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
303 (unsigned long)size, (unsigned long)base,
304 (unsigned long)limit);
305
306 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100307 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100308 pr_err("Not enough slots for CMA reserved regions!\n");
309 return -ENOSPC;
310 }
311
312 if (!size)
313 return -EINVAL;
314
315 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200316 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100317 base = ALIGN(base, alignment);
318 size = ALIGN(size, alignment);
319 limit &= ~(alignment - 1);
320
321 /* Reserve memory */
322 if (base) {
323 if (memblock_is_region_reserved(base, size) ||
324 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100325 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100326 goto err;
327 }
328 } else {
329 /*
330 * Use __memblock_alloc_base() since
331 * memblock_alloc_base() panic()s.
332 */
333 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
334 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100335 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100336 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100337 } else {
338 base = addr;
339 }
340 }
341
342 /*
343 * Each reserved area must be initialised later, when more kernel
344 * subsystems (like slab allocator) are available.
345 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100346 cma_areas[cma_area_count].base = base;
347 cma_areas[cma_area_count].size = size;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800348 cma_areas[cma_area_count].name = name;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100349 cma_area_count++;
350 *res_base = base;
351
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500352 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100353 (unsigned long)base);
354
355 /* Architecture specific contiguous memory fixup. */
356 dma_contiguous_early_fixup(base, size);
357 return 0;
358err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500359 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100360 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100361}
362
363/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100364 * dma_contiguous_add_device() - add device to custom contiguous reserved area
365 * @dev: Pointer to device structure.
366 * @base: Pointer to the base address of the reserved area returned by
367 * dma_contiguous_reserve_area() function, also used to return
368 *
369 * This function assigns the given device to the contiguous memory area
370 * reserved earlier by dma_contiguous_reserve_area() function.
371 */
372int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
373{
374 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
375 pr_err("Not enough slots for CMA reserved regions!\n");
376 return -ENOSPC;
377 }
378 cma_maps[cma_map_count].dev = dev;
379 cma_maps[cma_map_count].base = base;
380 cma_map_count++;
381 return 0;
382}
383
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100384#ifdef CONFIG_OF
385static void cma_assign_device_from_dt(struct device *dev)
386{
387 struct device_node *node;
388 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800389 const char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100390 u32 value;
391
392 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
393 if (!node)
394 return;
395 if (of_property_read_u32(node, "reg", &value) && !value)
396 return;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800397
398 if (of_property_read_string(node, "label", &name))
399 return;
400
401 cma = cma_get_area_by_name(name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100402 if (!cma)
403 return;
404
405 dev_set_cma_area(dev, cma);
406 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
407}
408
409static int cma_device_init_notifier_call(struct notifier_block *nb,
410 unsigned long event, void *data)
411{
412 struct device *dev = data;
413 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
414 cma_assign_device_from_dt(dev);
415 return NOTIFY_DONE;
416}
417
418static struct notifier_block cma_dev_init_nb = {
419 .notifier_call = cma_device_init_notifier_call,
420};
421#endif
422
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100423static int __init cma_init_reserved_areas(void)
424{
425 struct cma *cma;
426 int i;
427
428 for (i = 0; i < cma_area_count; i++) {
429 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
430 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
431
432 cma = cma_create_area(base, count);
433 if (!IS_ERR(cma))
434 cma_areas[i].cma = cma;
435 }
436
437 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
438
439 for (i = 0; i < cma_map_count; i++) {
440 cma = cma_get_area(cma_maps[i].base);
441 dev_set_cma_area(cma_maps[i].dev, cma);
442 }
443
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100444#ifdef CONFIG_OF
445 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
446#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100447 return 0;
448}
449core_initcall(cma_init_reserved_areas);
450
451/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100452 * dma_alloc_from_contiguous() - allocate pages from contiguous area
453 * @dev: Pointer to device for which the allocation is performed.
454 * @count: Requested number of pages.
455 * @align: Requested alignment of pages (in PAGE_SIZE order).
456 *
457 * This function allocates memory buffer for specified device. It uses
458 * device specific contiguous memory area if available or the default
459 * global one. Requires architecture specific get_dev_cma_area() helper
460 * function.
461 */
462struct page *dma_alloc_from_contiguous(struct device *dev, int count,
463 unsigned int align)
464{
465 unsigned long mask, pfn, pageno, start = 0;
466 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200467 struct page *page = NULL;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100468 int ret;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800469 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100470
471 if (!cma || !cma->count)
472 return NULL;
473
474 if (align > CONFIG_CMA_ALIGNMENT)
475 align = CONFIG_CMA_ALIGNMENT;
476
477 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
478 count, align);
479
480 if (!count)
481 return NULL;
482
483 mask = (1 << align) - 1;
484
485 mutex_lock(&cma_mutex);
486
487 for (;;) {
488 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
489 start, count, mask);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200490 if (pageno >= cma->count)
491 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100492
493 pfn = cma->base_pfn + pageno;
494 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
495 if (ret == 0) {
496 bitmap_set(cma->bitmap, pageno, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200497 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100498 break;
499 } else if (ret != -EBUSY) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200500 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100501 }
Liam Markcc2d4bd2013-01-16 10:14:40 -0800502 tries++;
503 trace_dma_alloc_contiguous_retry(tries);
504
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100505 pr_debug("%s(): memory range at %p is busy, retrying\n",
506 __func__, pfn_to_page(pfn));
507 /* try again with a bit different memory target */
508 start = pageno + mask + 1;
509 }
510
511 mutex_unlock(&cma_mutex);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200512 pr_debug("%s(): returned %p\n", __func__, page);
513 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100514}
515
516/**
517 * dma_release_from_contiguous() - release allocated pages
518 * @dev: Pointer to device for which the pages were allocated.
519 * @pages: Allocated pages.
520 * @count: Number of allocated pages.
521 *
522 * This function releases memory allocated by dma_alloc_from_contiguous().
523 * It returns false when provided pages do not belong to contiguous area and
524 * true otherwise.
525 */
526bool dma_release_from_contiguous(struct device *dev, struct page *pages,
527 int count)
528{
529 struct cma *cma = dev_get_cma_area(dev);
530 unsigned long pfn;
531
532 if (!cma || !pages)
533 return false;
534
535 pr_debug("%s(page %p)\n", __func__, (void *)pages);
536
537 pfn = page_to_pfn(pages);
538
539 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
540 return false;
541
542 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
543
544 mutex_lock(&cma_mutex);
545 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
546 free_contig_range(pfn, count);
547 mutex_unlock(&cma_mutex);
548
549 return true;
550}