blob: 03956817b1682c31af21c9323c38a29356ea17c8 [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
Laura Abbott9e821fb2013-02-26 10:26:30 -080012 *
13 * The Linux Foundation chooses to take subject only to the GPLv2 license
14 * terms, and distributes only under these terms.
Marek Szyprowski55bb0332011-12-29 13:09:51 +010015 */
16
17#define pr_fmt(fmt) "cma: " fmt
18
19#ifdef CONFIG_CMA_DEBUG
20#ifndef DEBUG
21# define DEBUG
22#endif
23#endif
24
25#include <asm/page.h>
26#include <asm/dma-contiguous.h>
27
28#include <linux/memblock.h>
29#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010030#include <linux/of.h>
31#include <linux/of_fdt.h>
32#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010033#include <linux/mm.h>
34#include <linux/mutex.h>
35#include <linux/page-isolation.h>
36#include <linux/slab.h>
37#include <linux/swap.h>
38#include <linux/mm_types.h>
39#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080040#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010041
42#ifndef SZ_1M
43#define SZ_1M (1 << 20)
44#endif
45
46struct cma {
47 unsigned long base_pfn;
48 unsigned long count;
49 unsigned long *bitmap;
Laura Abbott7761c982014-03-20 14:21:17 -070050 struct mutex lock;
Marek Szyprowski55bb0332011-12-29 13:09:51 +010051};
52
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010053static DEFINE_MUTEX(cma_mutex);
54
55struct cma *dma_contiguous_def_area;
56phys_addr_t dma_contiguous_def_base;
57
58static struct cma_area {
59 phys_addr_t base;
60 unsigned long size;
61 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -080062 const char *name;
Laura Abbott9e821fb2013-02-26 10:26:30 -080063} cma_areas[MAX_CMA_AREAS];
64static unsigned cma_area_count;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010065
66
67static struct cma_map {
68 phys_addr_t base;
69 struct device *dev;
70} cma_maps[MAX_CMA_AREAS] __initdata;
71static unsigned cma_map_count __initdata;
72
73static struct cma *cma_get_area(phys_addr_t base)
74{
75 int i;
76 for (i = 0; i < cma_area_count; i++)
77 if (cma_areas[i].base == base)
78 return cma_areas[i].cma;
79 return NULL;
80}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010081
Laura Abbott52c4dce2013-02-26 10:38:34 -080082static struct cma *cma_get_area_by_name(const char *name)
83{
84 int i;
85 if (!name)
86 return NULL;
87
88 for (i = 0; i < cma_area_count; i++)
89 if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
90 return cma_areas[i].cma;
91 return NULL;
92}
93
94
95
Marek Szyprowski55bb0332011-12-29 13:09:51 +010096#ifdef CONFIG_CMA_SIZE_MBYTES
97#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
98#else
99#define CMA_SIZE_MBYTES 0
100#endif
101
102/*
103 * Default global CMA area size can be defined in kernel's .config.
104 * This is usefull mainly for distro maintainers to create a kernel
105 * that works correctly for most supported systems.
106 * The size can be set in bytes or as a percentage of the total memory
107 * in the system.
108 *
109 * Users, who want to set the size of global CMA area for their system
110 * should use cma= kernel parameter.
111 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500112static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
113static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100114
115static int __init early_cma(char *p)
116{
117 pr_debug("%s(%s)\n", __func__, p);
118 size_cmdline = memparse(p, &p);
119 return 0;
120}
121early_param("cma", early_cma);
122
123#ifdef CONFIG_CMA_SIZE_PERCENTAGE
124
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500125static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100126{
127 struct memblock_region *reg;
128 unsigned long total_pages = 0;
129
130 /*
131 * We cannot use memblock_phys_mem_size() here, because
132 * memblock_analyze() has not been called yet.
133 */
134 for_each_memblock(memory, reg)
135 total_pages += memblock_region_memory_end_pfn(reg) -
136 memblock_region_memory_base_pfn(reg);
137
138 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
139}
140
141#else
142
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500143static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100144{
145 return 0;
146}
147
148#endif
149
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100150static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
151{
152 unsigned long pfn = base_pfn;
153 unsigned i = count >> pageblock_order;
154 struct zone *zone;
155
156 WARN_ON_ONCE(!pfn_valid(pfn));
157 zone = page_zone(pfn_to_page(pfn));
158
159 do {
160 unsigned j;
161 base_pfn = pfn;
162 for (j = pageblock_nr_pages; j; --j, pfn++) {
163 WARN_ON_ONCE(!pfn_valid(pfn));
164 if (page_zone(pfn_to_page(pfn)) != zone)
165 return -EINVAL;
166 }
167 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
168 } while (--i);
169 return 0;
170}
171
172static __init struct cma *cma_create_area(unsigned long base_pfn,
173 unsigned long count)
174{
175 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
176 struct cma *cma;
177 int ret = -ENOMEM;
178
179 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
180
181 cma = kmalloc(sizeof *cma, GFP_KERNEL);
182 if (!cma)
183 return ERR_PTR(-ENOMEM);
184
185 cma->base_pfn = base_pfn;
186 cma->count = count;
187 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
188
189 if (!cma->bitmap)
190 goto no_mem;
191
192 ret = cma_activate_area(base_pfn, count);
193 if (ret)
194 goto error;
Laura Abbott7761c982014-03-20 14:21:17 -0700195 mutex_init(&cma->lock);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100196
197 pr_debug("%s: returned %p\n", __func__, (void *)cma);
198 return cma;
199
200error:
201 kfree(cma->bitmap);
202no_mem:
203 kfree(cma);
204 return ERR_PTR(ret);
205}
206
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100207/*****************************************************************************/
208
209#ifdef CONFIG_OF
210int __init cma_fdt_scan(unsigned long node, const char *uname,
211 int depth, void *data)
212{
213 phys_addr_t base, size;
214 unsigned long len;
215 __be32 *prop;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800216 char *name;
Laura Abbott5df42402013-09-24 10:41:26 -0700217 phys_addr_t limit = MEMBLOCK_ALLOC_ANYWHERE;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100218
Laura Abbott670688a2013-03-14 19:13:49 -0700219 if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100220 return 0;
221
222 prop = of_get_flat_dt_prop(node, "reg", &len);
223 if (!prop || (len != 2 * sizeof(unsigned long)))
224 return 0;
225
226 base = be32_to_cpu(prop[0]);
227 size = be32_to_cpu(prop[1]);
228
Laura Abbott52c4dce2013-02-26 10:38:34 -0800229 name = of_get_flat_dt_prop(node, "label", NULL);
230
Laura Abbott5df42402013-09-24 10:41:26 -0700231 prop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
232 if (prop)
233 limit = be32_to_cpu(prop[0]);
234
235 pr_info("Found %s, memory base %lx, size %ld MiB, limit %pa\n", uname,
236 (unsigned long)base, (unsigned long)size / SZ_1M, &limit);
237 dma_contiguous_reserve_area(size, &base, limit, name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100238
239 return 0;
240}
241#endif
242
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100243/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100244 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100245 * @limit: End address of the reserved memory (optional, 0 for any).
246 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100247 * This function reserves memory from early allocator. It should be
248 * called by arch specific code once the early allocator (memblock or bootmem)
249 * has been activated and all other subsystems have already allocated/reserved
250 * memory. It reserves contiguous areas for global, device independent
251 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100252 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100253void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100254{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100255 phys_addr_t sel_size = 0;
256
257 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
258
259 if (size_cmdline != -1) {
260 sel_size = size_cmdline;
261 } else {
262#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
263 sel_size = size_bytes;
264#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
265 sel_size = cma_early_percent_memory();
266#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
267 sel_size = min(size_bytes, cma_early_percent_memory());
268#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
269 sel_size = max(size_bytes, cma_early_percent_memory());
270#endif
271 }
272
273 if (sel_size) {
274 phys_addr_t base = 0;
275 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
276 (unsigned long)sel_size / SZ_1M);
277
Laura Abbott52c4dce2013-02-26 10:38:34 -0800278 if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
279 == 0)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100280 dma_contiguous_def_base = base;
281 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100282#ifdef CONFIG_OF
283 of_scan_flat_dt(cma_fdt_scan, NULL);
284#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100285};
286
287/**
288 * dma_contiguous_reserve_area() - reserve custom contiguous area
289 * @size: Size of the reserved area (in bytes),
290 * @base: Pointer to the base address of the reserved area, also used to return
291 * base address of the actually reserved area, optional, use pointer to
292 * 0 for any
293 * @limit: End address of the reserved memory (optional, 0 for any).
294 *
295 * This function reserves memory from early allocator. It should be
296 * called by arch specific code once the early allocator (memblock or bootmem)
297 * has been activated and all other subsystems have already allocated/reserved
298 * memory. This function allows to create custom reserved areas for specific
299 * devices.
300 */
301int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
Laura Abbott52c4dce2013-02-26 10:38:34 -0800302 phys_addr_t limit, const char *name)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100303{
304 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500305 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100306 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100307
308 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
309 (unsigned long)size, (unsigned long)base,
310 (unsigned long)limit);
311
312 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100313 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100314 pr_err("Not enough slots for CMA reserved regions!\n");
315 return -ENOSPC;
316 }
317
318 if (!size)
319 return -EINVAL;
320
321 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200322 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100323 base = ALIGN(base, alignment);
324 size = ALIGN(size, alignment);
325 limit &= ~(alignment - 1);
326
327 /* Reserve memory */
328 if (base) {
329 if (memblock_is_region_reserved(base, size) ||
330 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100331 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100332 goto err;
333 }
334 } else {
335 /*
336 * Use __memblock_alloc_base() since
337 * memblock_alloc_base() panic()s.
338 */
339 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
340 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100341 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100342 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100343 } else {
344 base = addr;
345 }
346 }
347
348 /*
349 * Each reserved area must be initialised later, when more kernel
350 * subsystems (like slab allocator) are available.
351 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100352 cma_areas[cma_area_count].base = base;
353 cma_areas[cma_area_count].size = size;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800354 cma_areas[cma_area_count].name = name;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100355 cma_area_count++;
356 *res_base = base;
357
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500358 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100359 (unsigned long)base);
360
361 /* Architecture specific contiguous memory fixup. */
362 dma_contiguous_early_fixup(base, size);
363 return 0;
364err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500365 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100366 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100367}
368
369/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100370 * dma_contiguous_add_device() - add device to custom contiguous reserved area
371 * @dev: Pointer to device structure.
372 * @base: Pointer to the base address of the reserved area returned by
373 * dma_contiguous_reserve_area() function, also used to return
374 *
375 * This function assigns the given device to the contiguous memory area
376 * reserved earlier by dma_contiguous_reserve_area() function.
377 */
378int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
379{
380 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
381 pr_err("Not enough slots for CMA reserved regions!\n");
382 return -ENOSPC;
383 }
384 cma_maps[cma_map_count].dev = dev;
385 cma_maps[cma_map_count].base = base;
386 cma_map_count++;
387 return 0;
388}
389
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100390#ifdef CONFIG_OF
391static void cma_assign_device_from_dt(struct device *dev)
392{
393 struct device_node *node;
394 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800395 const char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100396 u32 value;
397
398 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
399 if (!node)
400 return;
401 if (of_property_read_u32(node, "reg", &value) && !value)
402 return;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800403
404 if (of_property_read_string(node, "label", &name))
405 return;
406
407 cma = cma_get_area_by_name(name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100408 if (!cma)
409 return;
410
411 dev_set_cma_area(dev, cma);
412 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
413}
414
415static int cma_device_init_notifier_call(struct notifier_block *nb,
416 unsigned long event, void *data)
417{
418 struct device *dev = data;
419 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
420 cma_assign_device_from_dt(dev);
421 return NOTIFY_DONE;
422}
423
424static struct notifier_block cma_dev_init_nb = {
425 .notifier_call = cma_device_init_notifier_call,
426};
427#endif
428
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100429static int __init cma_init_reserved_areas(void)
430{
431 struct cma *cma;
432 int i;
433
434 for (i = 0; i < cma_area_count; i++) {
435 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
436 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
437
438 cma = cma_create_area(base, count);
439 if (!IS_ERR(cma))
440 cma_areas[i].cma = cma;
441 }
442
443 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
444
445 for (i = 0; i < cma_map_count; i++) {
446 cma = cma_get_area(cma_maps[i].base);
447 dev_set_cma_area(cma_maps[i].dev, cma);
448 }
449
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100450#ifdef CONFIG_OF
451 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
452#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100453 return 0;
454}
455core_initcall(cma_init_reserved_areas);
456
Laura Abbott2dcd9e62013-06-03 19:14:08 -0700457phys_addr_t cma_get_base(struct device *dev)
458{
459 struct cma *cma = dev_get_cma_area(dev);
460
461 return cma->base_pfn << PAGE_SHIFT;
462}
463
Laura Abbott7761c982014-03-20 14:21:17 -0700464static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
465{
466 mutex_lock(&cma->lock);
467 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
468 mutex_unlock(&cma->lock);
469}
470
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100471/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100472 * dma_alloc_from_contiguous() - allocate pages from contiguous area
473 * @dev: Pointer to device for which the allocation is performed.
474 * @count: Requested number of pages.
475 * @align: Requested alignment of pages (in PAGE_SIZE order).
476 *
477 * This function allocates memory buffer for specified device. It uses
478 * device specific contiguous memory area if available or the default
479 * global one. Requires architecture specific get_dev_cma_area() helper
480 * function.
481 */
482struct page *dma_alloc_from_contiguous(struct device *dev, int count,
483 unsigned int align)
484{
485 unsigned long mask, pfn, pageno, start = 0;
486 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200487 struct page *page = NULL;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100488 int ret;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800489 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100490
491 if (!cma || !cma->count)
492 return NULL;
493
494 if (align > CONFIG_CMA_ALIGNMENT)
495 align = CONFIG_CMA_ALIGNMENT;
496
497 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
498 count, align);
499
500 if (!count)
501 return NULL;
502
503 mask = (1 << align) - 1;
504
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100505
506 for (;;) {
Laura Abbott7761c982014-03-20 14:21:17 -0700507 mutex_lock(&cma->lock);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100508 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
509 start, count, mask);
Laura Abbott7761c982014-03-20 14:21:17 -0700510 if (pageno >= cma->count) {
511 mutex_unlock(&cma_mutex);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200512 break;
Laura Abbott7761c982014-03-20 14:21:17 -0700513 }
514 bitmap_set(cma->bitmap, pageno, count);
515 /*
516 * It's safe to drop the lock here. We've marked this region for
517 * our exclusive use. If the migration fails we will take the
518 * lock again and unmark it.
519 */
520 mutex_unlock(&cma->lock);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100521
522 pfn = cma->base_pfn + pageno;
Laura Abbott7761c982014-03-20 14:21:17 -0700523 mutex_lock(&cma_mutex);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100524 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
Laura Abbott7761c982014-03-20 14:21:17 -0700525 mutex_unlock(&cma_mutex);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100526 if (ret == 0) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200527 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100528 break;
529 } else if (ret != -EBUSY) {
Laura Abbott7761c982014-03-20 14:21:17 -0700530 clear_cma_bitmap(cma, pfn, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200531 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100532 }
Laura Abbott7761c982014-03-20 14:21:17 -0700533 clear_cma_bitmap(cma, pfn, count);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800534 tries++;
535 trace_dma_alloc_contiguous_retry(tries);
536
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100537 pr_debug("%s(): memory range at %p is busy, retrying\n",
538 __func__, pfn_to_page(pfn));
539 /* try again with a bit different memory target */
540 start = pageno + mask + 1;
541 }
542
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200543 pr_debug("%s(): returned %p\n", __func__, page);
544 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100545}
546
547/**
548 * dma_release_from_contiguous() - release allocated pages
549 * @dev: Pointer to device for which the pages were allocated.
550 * @pages: Allocated pages.
551 * @count: Number of allocated pages.
552 *
553 * This function releases memory allocated by dma_alloc_from_contiguous().
554 * It returns false when provided pages do not belong to contiguous area and
555 * true otherwise.
556 */
557bool dma_release_from_contiguous(struct device *dev, struct page *pages,
558 int count)
559{
560 struct cma *cma = dev_get_cma_area(dev);
561 unsigned long pfn;
562
563 if (!cma || !pages)
564 return false;
565
566 pr_debug("%s(page %p)\n", __func__, (void *)pages);
567
568 pfn = page_to_pfn(pages);
569
570 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
571 return false;
572
573 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
574
Laura Abbott5adb04c2014-02-27 10:23:51 -0800575 free_contig_range(pfn, count);
Laura Abbott7761c982014-03-20 14:21:17 -0700576 clear_cma_bitmap(cma, pfn, count);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100577
578 return true;
579}