blob: cd341e87f0503c876c23180733ffbdfa9e721cc4 [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#define pr_fmt(fmt) "cma: " fmt
15
16#ifdef CONFIG_CMA_DEBUG
17#ifndef DEBUG
18# define DEBUG
19#endif
20#endif
21
22#include <asm/page.h>
23#include <asm/dma-contiguous.h>
24
25#include <linux/memblock.h>
26#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010027#include <linux/of.h>
28#include <linux/of_fdt.h>
29#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010030#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/page-isolation.h>
33#include <linux/slab.h>
34#include <linux/swap.h>
35#include <linux/mm_types.h>
36#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080037#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010038
39#ifndef SZ_1M
40#define SZ_1M (1 << 20)
41#endif
42
43struct cma {
44 unsigned long base_pfn;
45 unsigned long count;
46 unsigned long *bitmap;
47};
48
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010049static DEFINE_MUTEX(cma_mutex);
50
51struct cma *dma_contiguous_def_area;
52phys_addr_t dma_contiguous_def_base;
53
54static struct cma_area {
55 phys_addr_t base;
56 unsigned long size;
57 struct cma *cma;
58} cma_areas[MAX_CMA_AREAS] __initdata;
59static unsigned cma_area_count __initdata;
60
61
62static struct cma_map {
63 phys_addr_t base;
64 struct device *dev;
65} cma_maps[MAX_CMA_AREAS] __initdata;
66static unsigned cma_map_count __initdata;
67
68static struct cma *cma_get_area(phys_addr_t base)
69{
70 int i;
71 for (i = 0; i < cma_area_count; i++)
72 if (cma_areas[i].base == base)
73 return cma_areas[i].cma;
74 return NULL;
75}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010076
77#ifdef CONFIG_CMA_SIZE_MBYTES
78#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
79#else
80#define CMA_SIZE_MBYTES 0
81#endif
82
83/*
84 * Default global CMA area size can be defined in kernel's .config.
85 * This is usefull mainly for distro maintainers to create a kernel
86 * that works correctly for most supported systems.
87 * The size can be set in bytes or as a percentage of the total memory
88 * in the system.
89 *
90 * Users, who want to set the size of global CMA area for their system
91 * should use cma= kernel parameter.
92 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -050093static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
94static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +010095
96static int __init early_cma(char *p)
97{
98 pr_debug("%s(%s)\n", __func__, p);
99 size_cmdline = memparse(p, &p);
100 return 0;
101}
102early_param("cma", early_cma);
103
104#ifdef CONFIG_CMA_SIZE_PERCENTAGE
105
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500106static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100107{
108 struct memblock_region *reg;
109 unsigned long total_pages = 0;
110
111 /*
112 * We cannot use memblock_phys_mem_size() here, because
113 * memblock_analyze() has not been called yet.
114 */
115 for_each_memblock(memory, reg)
116 total_pages += memblock_region_memory_end_pfn(reg) -
117 memblock_region_memory_base_pfn(reg);
118
119 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
120}
121
122#else
123
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500124static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100125{
126 return 0;
127}
128
129#endif
130
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100131static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
132{
133 unsigned long pfn = base_pfn;
134 unsigned i = count >> pageblock_order;
135 struct zone *zone;
136
137 WARN_ON_ONCE(!pfn_valid(pfn));
138 zone = page_zone(pfn_to_page(pfn));
139
140 do {
141 unsigned j;
142 base_pfn = pfn;
143 for (j = pageblock_nr_pages; j; --j, pfn++) {
144 WARN_ON_ONCE(!pfn_valid(pfn));
145 if (page_zone(pfn_to_page(pfn)) != zone)
146 return -EINVAL;
147 }
148 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
149 } while (--i);
150 return 0;
151}
152
153static __init struct cma *cma_create_area(unsigned long base_pfn,
154 unsigned long count)
155{
156 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
157 struct cma *cma;
158 int ret = -ENOMEM;
159
160 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
161
162 cma = kmalloc(sizeof *cma, GFP_KERNEL);
163 if (!cma)
164 return ERR_PTR(-ENOMEM);
165
166 cma->base_pfn = base_pfn;
167 cma->count = count;
168 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
169
170 if (!cma->bitmap)
171 goto no_mem;
172
173 ret = cma_activate_area(base_pfn, count);
174 if (ret)
175 goto error;
176
177 pr_debug("%s: returned %p\n", __func__, (void *)cma);
178 return cma;
179
180error:
181 kfree(cma->bitmap);
182no_mem:
183 kfree(cma);
184 return ERR_PTR(ret);
185}
186
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100187/*****************************************************************************/
188
189#ifdef CONFIG_OF
190int __init cma_fdt_scan(unsigned long node, const char *uname,
191 int depth, void *data)
192{
193 phys_addr_t base, size;
194 unsigned long len;
195 __be32 *prop;
196
197 if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
198 !of_get_flat_dt_prop(node, "contiguous-region", NULL))
199 return 0;
200
201 prop = of_get_flat_dt_prop(node, "reg", &len);
202 if (!prop || (len != 2 * sizeof(unsigned long)))
203 return 0;
204
205 base = be32_to_cpu(prop[0]);
206 size = be32_to_cpu(prop[1]);
207
208 pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
209 (unsigned long)base, (unsigned long)size / SZ_1M);
210 dma_contiguous_reserve_area(size, &base, 0);
211
212 return 0;
213}
214#endif
215
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100216/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100217 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100218 * @limit: End address of the reserved memory (optional, 0 for any).
219 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100220 * This function reserves memory from early allocator. It should be
221 * called by arch specific code once the early allocator (memblock or bootmem)
222 * has been activated and all other subsystems have already allocated/reserved
223 * memory. It reserves contiguous areas for global, device independent
224 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100225 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100226void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100227{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100228 phys_addr_t sel_size = 0;
229
230 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
231
232 if (size_cmdline != -1) {
233 sel_size = size_cmdline;
234 } else {
235#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
236 sel_size = size_bytes;
237#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
238 sel_size = cma_early_percent_memory();
239#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
240 sel_size = min(size_bytes, cma_early_percent_memory());
241#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
242 sel_size = max(size_bytes, cma_early_percent_memory());
243#endif
244 }
245
246 if (sel_size) {
247 phys_addr_t base = 0;
248 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
249 (unsigned long)sel_size / SZ_1M);
250
251 if (dma_contiguous_reserve_area(sel_size, &base, limit) == 0)
252 dma_contiguous_def_base = base;
253 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100254#ifdef CONFIG_OF
255 of_scan_flat_dt(cma_fdt_scan, NULL);
256#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100257};
258
259/**
260 * dma_contiguous_reserve_area() - reserve custom contiguous area
261 * @size: Size of the reserved area (in bytes),
262 * @base: Pointer to the base address of the reserved area, also used to return
263 * base address of the actually reserved area, optional, use pointer to
264 * 0 for any
265 * @limit: End address of the reserved memory (optional, 0 for any).
266 *
267 * This function reserves memory from early allocator. It should be
268 * called by arch specific code once the early allocator (memblock or bootmem)
269 * has been activated and all other subsystems have already allocated/reserved
270 * memory. This function allows to create custom reserved areas for specific
271 * devices.
272 */
273int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
274 phys_addr_t limit)
275{
276 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500277 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100278 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100279
280 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
281 (unsigned long)size, (unsigned long)base,
282 (unsigned long)limit);
283
284 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100285 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100286 pr_err("Not enough slots for CMA reserved regions!\n");
287 return -ENOSPC;
288 }
289
290 if (!size)
291 return -EINVAL;
292
293 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200294 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100295 base = ALIGN(base, alignment);
296 size = ALIGN(size, alignment);
297 limit &= ~(alignment - 1);
298
299 /* Reserve memory */
300 if (base) {
301 if (memblock_is_region_reserved(base, size) ||
302 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100303 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100304 goto err;
305 }
306 } else {
307 /*
308 * Use __memblock_alloc_base() since
309 * memblock_alloc_base() panic()s.
310 */
311 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
312 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100313 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100314 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100315 } else {
316 base = addr;
317 }
318 }
319
320 /*
321 * Each reserved area must be initialised later, when more kernel
322 * subsystems (like slab allocator) are available.
323 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100324 cma_areas[cma_area_count].base = base;
325 cma_areas[cma_area_count].size = size;
326 cma_area_count++;
327 *res_base = base;
328
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500329 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100330 (unsigned long)base);
331
332 /* Architecture specific contiguous memory fixup. */
333 dma_contiguous_early_fixup(base, size);
334 return 0;
335err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500336 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100337 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100338}
339
340/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100341 * dma_contiguous_add_device() - add device to custom contiguous reserved area
342 * @dev: Pointer to device structure.
343 * @base: Pointer to the base address of the reserved area returned by
344 * dma_contiguous_reserve_area() function, also used to return
345 *
346 * This function assigns the given device to the contiguous memory area
347 * reserved earlier by dma_contiguous_reserve_area() function.
348 */
349int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
350{
351 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
352 pr_err("Not enough slots for CMA reserved regions!\n");
353 return -ENOSPC;
354 }
355 cma_maps[cma_map_count].dev = dev;
356 cma_maps[cma_map_count].base = base;
357 cma_map_count++;
358 return 0;
359}
360
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100361#ifdef CONFIG_OF
362static void cma_assign_device_from_dt(struct device *dev)
363{
364 struct device_node *node;
365 struct cma *cma;
366 u32 value;
367
368 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
369 if (!node)
370 return;
371 if (of_property_read_u32(node, "reg", &value) && !value)
372 return;
373 cma = cma_get_area(value);
374 if (!cma)
375 return;
376
377 dev_set_cma_area(dev, cma);
378 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
379}
380
381static int cma_device_init_notifier_call(struct notifier_block *nb,
382 unsigned long event, void *data)
383{
384 struct device *dev = data;
385 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
386 cma_assign_device_from_dt(dev);
387 return NOTIFY_DONE;
388}
389
390static struct notifier_block cma_dev_init_nb = {
391 .notifier_call = cma_device_init_notifier_call,
392};
393#endif
394
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100395static int __init cma_init_reserved_areas(void)
396{
397 struct cma *cma;
398 int i;
399
400 for (i = 0; i < cma_area_count; i++) {
401 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
402 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
403
404 cma = cma_create_area(base, count);
405 if (!IS_ERR(cma))
406 cma_areas[i].cma = cma;
407 }
408
409 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
410
411 for (i = 0; i < cma_map_count; i++) {
412 cma = cma_get_area(cma_maps[i].base);
413 dev_set_cma_area(cma_maps[i].dev, cma);
414 }
415
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100416#ifdef CONFIG_OF
417 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
418#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100419 return 0;
420}
421core_initcall(cma_init_reserved_areas);
422
423/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100424 * dma_alloc_from_contiguous() - allocate pages from contiguous area
425 * @dev: Pointer to device for which the allocation is performed.
426 * @count: Requested number of pages.
427 * @align: Requested alignment of pages (in PAGE_SIZE order).
428 *
429 * This function allocates memory buffer for specified device. It uses
430 * device specific contiguous memory area if available or the default
431 * global one. Requires architecture specific get_dev_cma_area() helper
432 * function.
433 */
434struct page *dma_alloc_from_contiguous(struct device *dev, int count,
435 unsigned int align)
436{
437 unsigned long mask, pfn, pageno, start = 0;
438 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200439 struct page *page = NULL;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100440 int ret;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800441 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100442
443 if (!cma || !cma->count)
444 return NULL;
445
446 if (align > CONFIG_CMA_ALIGNMENT)
447 align = CONFIG_CMA_ALIGNMENT;
448
449 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
450 count, align);
451
452 if (!count)
453 return NULL;
454
455 mask = (1 << align) - 1;
456
457 mutex_lock(&cma_mutex);
458
459 for (;;) {
460 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
461 start, count, mask);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200462 if (pageno >= cma->count)
463 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100464
465 pfn = cma->base_pfn + pageno;
466 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
467 if (ret == 0) {
468 bitmap_set(cma->bitmap, pageno, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200469 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100470 break;
471 } else if (ret != -EBUSY) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200472 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100473 }
Liam Markcc2d4bd2013-01-16 10:14:40 -0800474 tries++;
475 trace_dma_alloc_contiguous_retry(tries);
476
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100477 pr_debug("%s(): memory range at %p is busy, retrying\n",
478 __func__, pfn_to_page(pfn));
479 /* try again with a bit different memory target */
480 start = pageno + mask + 1;
481 }
482
483 mutex_unlock(&cma_mutex);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200484 pr_debug("%s(): returned %p\n", __func__, page);
485 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100486}
487
488/**
489 * dma_release_from_contiguous() - release allocated pages
490 * @dev: Pointer to device for which the pages were allocated.
491 * @pages: Allocated pages.
492 * @count: Number of allocated pages.
493 *
494 * This function releases memory allocated by dma_alloc_from_contiguous().
495 * It returns false when provided pages do not belong to contiguous area and
496 * true otherwise.
497 */
498bool dma_release_from_contiguous(struct device *dev, struct page *pages,
499 int count)
500{
501 struct cma *cma = dev_get_cma_area(dev);
502 unsigned long pfn;
503
504 if (!cma || !pages)
505 return false;
506
507 pr_debug("%s(page %p)\n", __func__, (void *)pages);
508
509 pfn = page_to_pfn(pages);
510
511 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
512 return false;
513
514 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
515
516 mutex_lock(&cma_mutex);
517 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
518 free_contig_range(pfn, count);
519 mutex_unlock(&cma_mutex);
520
521 return true;
522}