blob: 88b826131909798f4946e5977093021523158b20 [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
Laura Abbott9e821fb2013-02-26 10:26:30 -080012 *
13 * The Linux Foundation chooses to take subject only to the GPLv2 license
14 * terms, and distributes only under these terms.
Marek Szyprowski55bb0332011-12-29 13:09:51 +010015 */
16
17#define pr_fmt(fmt) "cma: " fmt
18
19#ifdef CONFIG_CMA_DEBUG
20#ifndef DEBUG
21# define DEBUG
22#endif
23#endif
24
25#include <asm/page.h>
26#include <asm/dma-contiguous.h>
27
28#include <linux/memblock.h>
29#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010030#include <linux/of.h>
31#include <linux/of_fdt.h>
32#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010033#include <linux/mm.h>
34#include <linux/mutex.h>
35#include <linux/page-isolation.h>
36#include <linux/slab.h>
37#include <linux/swap.h>
38#include <linux/mm_types.h>
39#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080040#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010041
42#ifndef SZ_1M
43#define SZ_1M (1 << 20)
44#endif
45
46struct cma {
47 unsigned long base_pfn;
48 unsigned long count;
49 unsigned long *bitmap;
50};
51
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010052static DEFINE_MUTEX(cma_mutex);
53
54struct cma *dma_contiguous_def_area;
55phys_addr_t dma_contiguous_def_base;
56
57static struct cma_area {
58 phys_addr_t base;
59 unsigned long size;
60 struct cma *cma;
Laura Abbott9e821fb2013-02-26 10:26:30 -080061} cma_areas[MAX_CMA_AREAS];
62static unsigned cma_area_count;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010063
64
65static struct cma_map {
66 phys_addr_t base;
67 struct device *dev;
68} cma_maps[MAX_CMA_AREAS] __initdata;
69static unsigned cma_map_count __initdata;
70
71static struct cma *cma_get_area(phys_addr_t base)
72{
73 int i;
74 for (i = 0; i < cma_area_count; i++)
75 if (cma_areas[i].base == base)
76 return cma_areas[i].cma;
77 return NULL;
78}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010079
80#ifdef CONFIG_CMA_SIZE_MBYTES
81#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
82#else
83#define CMA_SIZE_MBYTES 0
84#endif
85
86/*
87 * Default global CMA area size can be defined in kernel's .config.
88 * This is usefull mainly for distro maintainers to create a kernel
89 * that works correctly for most supported systems.
90 * The size can be set in bytes or as a percentage of the total memory
91 * in the system.
92 *
93 * Users, who want to set the size of global CMA area for their system
94 * should use cma= kernel parameter.
95 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -050096static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
97static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +010098
99static int __init early_cma(char *p)
100{
101 pr_debug("%s(%s)\n", __func__, p);
102 size_cmdline = memparse(p, &p);
103 return 0;
104}
105early_param("cma", early_cma);
106
107#ifdef CONFIG_CMA_SIZE_PERCENTAGE
108
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500109static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100110{
111 struct memblock_region *reg;
112 unsigned long total_pages = 0;
113
114 /*
115 * We cannot use memblock_phys_mem_size() here, because
116 * memblock_analyze() has not been called yet.
117 */
118 for_each_memblock(memory, reg)
119 total_pages += memblock_region_memory_end_pfn(reg) -
120 memblock_region_memory_base_pfn(reg);
121
122 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
123}
124
125#else
126
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500127static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100128{
129 return 0;
130}
131
132#endif
133
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100134static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
135{
136 unsigned long pfn = base_pfn;
137 unsigned i = count >> pageblock_order;
138 struct zone *zone;
139
140 WARN_ON_ONCE(!pfn_valid(pfn));
141 zone = page_zone(pfn_to_page(pfn));
142
143 do {
144 unsigned j;
145 base_pfn = pfn;
146 for (j = pageblock_nr_pages; j; --j, pfn++) {
147 WARN_ON_ONCE(!pfn_valid(pfn));
148 if (page_zone(pfn_to_page(pfn)) != zone)
149 return -EINVAL;
150 }
151 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
152 } while (--i);
153 return 0;
154}
155
156static __init struct cma *cma_create_area(unsigned long base_pfn,
157 unsigned long count)
158{
159 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
160 struct cma *cma;
161 int ret = -ENOMEM;
162
163 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
164
165 cma = kmalloc(sizeof *cma, GFP_KERNEL);
166 if (!cma)
167 return ERR_PTR(-ENOMEM);
168
169 cma->base_pfn = base_pfn;
170 cma->count = count;
171 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
172
173 if (!cma->bitmap)
174 goto no_mem;
175
176 ret = cma_activate_area(base_pfn, count);
177 if (ret)
178 goto error;
179
180 pr_debug("%s: returned %p\n", __func__, (void *)cma);
181 return cma;
182
183error:
184 kfree(cma->bitmap);
185no_mem:
186 kfree(cma);
187 return ERR_PTR(ret);
188}
189
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100190/*****************************************************************************/
191
192#ifdef CONFIG_OF
193int __init cma_fdt_scan(unsigned long node, const char *uname,
194 int depth, void *data)
195{
196 phys_addr_t base, size;
197 unsigned long len;
198 __be32 *prop;
199
200 if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
201 !of_get_flat_dt_prop(node, "contiguous-region", NULL))
202 return 0;
203
204 prop = of_get_flat_dt_prop(node, "reg", &len);
205 if (!prop || (len != 2 * sizeof(unsigned long)))
206 return 0;
207
208 base = be32_to_cpu(prop[0]);
209 size = be32_to_cpu(prop[1]);
210
211 pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
212 (unsigned long)base, (unsigned long)size / SZ_1M);
213 dma_contiguous_reserve_area(size, &base, 0);
214
215 return 0;
216}
217#endif
218
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100219/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100220 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100221 * @limit: End address of the reserved memory (optional, 0 for any).
222 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100223 * This function reserves memory from early allocator. It should be
224 * called by arch specific code once the early allocator (memblock or bootmem)
225 * has been activated and all other subsystems have already allocated/reserved
226 * memory. It reserves contiguous areas for global, device independent
227 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100228 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100229void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100230{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100231 phys_addr_t sel_size = 0;
232
233 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
234
235 if (size_cmdline != -1) {
236 sel_size = size_cmdline;
237 } else {
238#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
239 sel_size = size_bytes;
240#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
241 sel_size = cma_early_percent_memory();
242#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
243 sel_size = min(size_bytes, cma_early_percent_memory());
244#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
245 sel_size = max(size_bytes, cma_early_percent_memory());
246#endif
247 }
248
249 if (sel_size) {
250 phys_addr_t base = 0;
251 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
252 (unsigned long)sel_size / SZ_1M);
253
254 if (dma_contiguous_reserve_area(sel_size, &base, limit) == 0)
255 dma_contiguous_def_base = base;
256 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100257#ifdef CONFIG_OF
258 of_scan_flat_dt(cma_fdt_scan, NULL);
259#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100260};
261
262/**
263 * dma_contiguous_reserve_area() - reserve custom contiguous area
264 * @size: Size of the reserved area (in bytes),
265 * @base: Pointer to the base address of the reserved area, also used to return
266 * base address of the actually reserved area, optional, use pointer to
267 * 0 for any
268 * @limit: End address of the reserved memory (optional, 0 for any).
269 *
270 * This function reserves memory from early allocator. It should be
271 * called by arch specific code once the early allocator (memblock or bootmem)
272 * has been activated and all other subsystems have already allocated/reserved
273 * memory. This function allows to create custom reserved areas for specific
274 * devices.
275 */
276int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
277 phys_addr_t limit)
278{
279 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500280 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100281 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100282
283 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
284 (unsigned long)size, (unsigned long)base,
285 (unsigned long)limit);
286
287 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100288 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100289 pr_err("Not enough slots for CMA reserved regions!\n");
290 return -ENOSPC;
291 }
292
293 if (!size)
294 return -EINVAL;
295
296 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200297 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100298 base = ALIGN(base, alignment);
299 size = ALIGN(size, alignment);
300 limit &= ~(alignment - 1);
301
302 /* Reserve memory */
303 if (base) {
304 if (memblock_is_region_reserved(base, size) ||
305 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100306 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100307 goto err;
308 }
309 } else {
310 /*
311 * Use __memblock_alloc_base() since
312 * memblock_alloc_base() panic()s.
313 */
314 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
315 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100316 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100317 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100318 } else {
319 base = addr;
320 }
321 }
322
323 /*
324 * Each reserved area must be initialised later, when more kernel
325 * subsystems (like slab allocator) are available.
326 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100327 cma_areas[cma_area_count].base = base;
328 cma_areas[cma_area_count].size = size;
329 cma_area_count++;
330 *res_base = base;
331
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500332 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100333 (unsigned long)base);
334
335 /* Architecture specific contiguous memory fixup. */
336 dma_contiguous_early_fixup(base, size);
337 return 0;
338err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500339 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100340 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100341}
342
343/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100344 * dma_contiguous_add_device() - add device to custom contiguous reserved area
345 * @dev: Pointer to device structure.
346 * @base: Pointer to the base address of the reserved area returned by
347 * dma_contiguous_reserve_area() function, also used to return
348 *
349 * This function assigns the given device to the contiguous memory area
350 * reserved earlier by dma_contiguous_reserve_area() function.
351 */
352int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
353{
354 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
355 pr_err("Not enough slots for CMA reserved regions!\n");
356 return -ENOSPC;
357 }
358 cma_maps[cma_map_count].dev = dev;
359 cma_maps[cma_map_count].base = base;
360 cma_map_count++;
361 return 0;
362}
363
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100364#ifdef CONFIG_OF
365static void cma_assign_device_from_dt(struct device *dev)
366{
367 struct device_node *node;
368 struct cma *cma;
369 u32 value;
370
371 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
372 if (!node)
373 return;
374 if (of_property_read_u32(node, "reg", &value) && !value)
375 return;
376 cma = cma_get_area(value);
377 if (!cma)
378 return;
379
380 dev_set_cma_area(dev, cma);
381 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
382}
383
384static int cma_device_init_notifier_call(struct notifier_block *nb,
385 unsigned long event, void *data)
386{
387 struct device *dev = data;
388 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
389 cma_assign_device_from_dt(dev);
390 return NOTIFY_DONE;
391}
392
393static struct notifier_block cma_dev_init_nb = {
394 .notifier_call = cma_device_init_notifier_call,
395};
396#endif
397
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100398static int __init cma_init_reserved_areas(void)
399{
400 struct cma *cma;
401 int i;
402
403 for (i = 0; i < cma_area_count; i++) {
404 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
405 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
406
407 cma = cma_create_area(base, count);
408 if (!IS_ERR(cma))
409 cma_areas[i].cma = cma;
410 }
411
412 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
413
414 for (i = 0; i < cma_map_count; i++) {
415 cma = cma_get_area(cma_maps[i].base);
416 dev_set_cma_area(cma_maps[i].dev, cma);
417 }
418
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100419#ifdef CONFIG_OF
420 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
421#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100422 return 0;
423}
424core_initcall(cma_init_reserved_areas);
425
426/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100427 * dma_alloc_from_contiguous() - allocate pages from contiguous area
428 * @dev: Pointer to device for which the allocation is performed.
429 * @count: Requested number of pages.
430 * @align: Requested alignment of pages (in PAGE_SIZE order).
431 *
432 * This function allocates memory buffer for specified device. It uses
433 * device specific contiguous memory area if available or the default
434 * global one. Requires architecture specific get_dev_cma_area() helper
435 * function.
436 */
437struct page *dma_alloc_from_contiguous(struct device *dev, int count,
438 unsigned int align)
439{
440 unsigned long mask, pfn, pageno, start = 0;
441 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200442 struct page *page = NULL;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100443 int ret;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800444 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100445
446 if (!cma || !cma->count)
447 return NULL;
448
449 if (align > CONFIG_CMA_ALIGNMENT)
450 align = CONFIG_CMA_ALIGNMENT;
451
452 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
453 count, align);
454
455 if (!count)
456 return NULL;
457
458 mask = (1 << align) - 1;
459
460 mutex_lock(&cma_mutex);
461
462 for (;;) {
463 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
464 start, count, mask);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200465 if (pageno >= cma->count)
466 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100467
468 pfn = cma->base_pfn + pageno;
469 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
470 if (ret == 0) {
471 bitmap_set(cma->bitmap, pageno, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200472 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100473 break;
474 } else if (ret != -EBUSY) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200475 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100476 }
Liam Markcc2d4bd2013-01-16 10:14:40 -0800477 tries++;
478 trace_dma_alloc_contiguous_retry(tries);
479
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100480 pr_debug("%s(): memory range at %p is busy, retrying\n",
481 __func__, pfn_to_page(pfn));
482 /* try again with a bit different memory target */
483 start = pageno + mask + 1;
484 }
485
486 mutex_unlock(&cma_mutex);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200487 pr_debug("%s(): returned %p\n", __func__, page);
488 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100489}
490
491/**
492 * dma_release_from_contiguous() - release allocated pages
493 * @dev: Pointer to device for which the pages were allocated.
494 * @pages: Allocated pages.
495 * @count: Number of allocated pages.
496 *
497 * This function releases memory allocated by dma_alloc_from_contiguous().
498 * It returns false when provided pages do not belong to contiguous area and
499 * true otherwise.
500 */
501bool dma_release_from_contiguous(struct device *dev, struct page *pages,
502 int count)
503{
504 struct cma *cma = dev_get_cma_area(dev);
505 unsigned long pfn;
506
507 if (!cma || !pages)
508 return false;
509
510 pr_debug("%s(page %p)\n", __func__, (void *)pages);
511
512 pfn = page_to_pfn(pages);
513
514 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
515 return false;
516
517 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
518
519 mutex_lock(&cma_mutex);
520 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
521 free_contig_range(pfn, count);
522 mutex_unlock(&cma_mutex);
523
524 return true;
525}