blob: 474540675c08bb523604d3550a82eacb5ead8caf [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
Laura Abbott9e821fb2013-02-26 10:26:30 -080012 *
13 * The Linux Foundation chooses to take subject only to the GPLv2 license
14 * terms, and distributes only under these terms.
Marek Szyprowski55bb0332011-12-29 13:09:51 +010015 */
16
17#define pr_fmt(fmt) "cma: " fmt
18
19#ifdef CONFIG_CMA_DEBUG
20#ifndef DEBUG
21# define DEBUG
22#endif
23#endif
24
25#include <asm/page.h>
26#include <asm/dma-contiguous.h>
27
28#include <linux/memblock.h>
29#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010030#include <linux/of.h>
31#include <linux/of_fdt.h>
32#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010033#include <linux/mm.h>
34#include <linux/mutex.h>
35#include <linux/page-isolation.h>
36#include <linux/slab.h>
37#include <linux/swap.h>
38#include <linux/mm_types.h>
39#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080040#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010041
42#ifndef SZ_1M
43#define SZ_1M (1 << 20)
44#endif
45
46struct cma {
47 unsigned long base_pfn;
48 unsigned long count;
49 unsigned long *bitmap;
Laura Abbott6bc7adb2013-04-26 15:51:06 -070050 bool in_system;
Laura Abbott7761c982014-03-20 14:21:17 -070051 struct mutex lock;
Marek Szyprowski55bb0332011-12-29 13:09:51 +010052};
53
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010054static DEFINE_MUTEX(cma_mutex);
55
56struct cma *dma_contiguous_def_area;
57phys_addr_t dma_contiguous_def_base;
58
59static struct cma_area {
60 phys_addr_t base;
61 unsigned long size;
62 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -080063 const char *name;
Laura Abbott6bc7adb2013-04-26 15:51:06 -070064 bool to_system;
Laura Abbott9e821fb2013-02-26 10:26:30 -080065} cma_areas[MAX_CMA_AREAS];
66static unsigned cma_area_count;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010067
68
69static struct cma_map {
70 phys_addr_t base;
71 struct device *dev;
72} cma_maps[MAX_CMA_AREAS] __initdata;
73static unsigned cma_map_count __initdata;
74
75static struct cma *cma_get_area(phys_addr_t base)
76{
77 int i;
78 for (i = 0; i < cma_area_count; i++)
79 if (cma_areas[i].base == base)
80 return cma_areas[i].cma;
81 return NULL;
82}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010083
Laura Abbott52c4dce2013-02-26 10:38:34 -080084static struct cma *cma_get_area_by_name(const char *name)
85{
86 int i;
87 if (!name)
88 return NULL;
89
90 for (i = 0; i < cma_area_count; i++)
91 if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
92 return cma_areas[i].cma;
93 return NULL;
94}
95
96
97
Marek Szyprowski55bb0332011-12-29 13:09:51 +010098#ifdef CONFIG_CMA_SIZE_MBYTES
99#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
100#else
101#define CMA_SIZE_MBYTES 0
102#endif
103
104/*
105 * Default global CMA area size can be defined in kernel's .config.
106 * This is usefull mainly for distro maintainers to create a kernel
107 * that works correctly for most supported systems.
108 * The size can be set in bytes or as a percentage of the total memory
109 * in the system.
110 *
111 * Users, who want to set the size of global CMA area for their system
112 * should use cma= kernel parameter.
113 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500114static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
115static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100116
117static int __init early_cma(char *p)
118{
119 pr_debug("%s(%s)\n", __func__, p);
120 size_cmdline = memparse(p, &p);
121 return 0;
122}
123early_param("cma", early_cma);
124
125#ifdef CONFIG_CMA_SIZE_PERCENTAGE
126
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500127static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100128{
129 struct memblock_region *reg;
130 unsigned long total_pages = 0;
131
132 /*
133 * We cannot use memblock_phys_mem_size() here, because
134 * memblock_analyze() has not been called yet.
135 */
136 for_each_memblock(memory, reg)
137 total_pages += memblock_region_memory_end_pfn(reg) -
138 memblock_region_memory_base_pfn(reg);
139
140 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
141}
142
143#else
144
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500145static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100146{
147 return 0;
148}
149
150#endif
151
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100152static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
153{
154 unsigned long pfn = base_pfn;
155 unsigned i = count >> pageblock_order;
156 struct zone *zone;
157
158 WARN_ON_ONCE(!pfn_valid(pfn));
159 zone = page_zone(pfn_to_page(pfn));
160
161 do {
162 unsigned j;
163 base_pfn = pfn;
164 for (j = pageblock_nr_pages; j; --j, pfn++) {
165 WARN_ON_ONCE(!pfn_valid(pfn));
166 if (page_zone(pfn_to_page(pfn)) != zone)
167 return -EINVAL;
168 }
169 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
170 } while (--i);
171 return 0;
172}
173
174static __init struct cma *cma_create_area(unsigned long base_pfn,
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700175 unsigned long count, bool system)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100176{
177 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
178 struct cma *cma;
179 int ret = -ENOMEM;
180
181 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
182
183 cma = kmalloc(sizeof *cma, GFP_KERNEL);
184 if (!cma)
185 return ERR_PTR(-ENOMEM);
186
187 cma->base_pfn = base_pfn;
188 cma->count = count;
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700189 cma->in_system = system;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100190 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
191
192 if (!cma->bitmap)
193 goto no_mem;
194
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700195 if (cma->in_system) {
196 ret = cma_activate_area(base_pfn, count);
197 if (ret)
198 goto error;
199 }
Laura Abbott7761c982014-03-20 14:21:17 -0700200 mutex_init(&cma->lock);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100201
202 pr_debug("%s: returned %p\n", __func__, (void *)cma);
203 return cma;
204
205error:
206 kfree(cma->bitmap);
207no_mem:
208 kfree(cma);
209 return ERR_PTR(ret);
210}
211
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100212/*****************************************************************************/
213
214#ifdef CONFIG_OF
215int __init cma_fdt_scan(unsigned long node, const char *uname,
216 int depth, void *data)
217{
218 phys_addr_t base, size;
219 unsigned long len;
220 __be32 *prop;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800221 char *name;
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700222 bool in_system;
Laura Abbott5df42402013-09-24 10:41:26 -0700223 phys_addr_t limit = MEMBLOCK_ALLOC_ANYWHERE;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100224
Laura Abbott670688a2013-03-14 19:13:49 -0700225 if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100226 return 0;
227
228 prop = of_get_flat_dt_prop(node, "reg", &len);
229 if (!prop || (len != 2 * sizeof(unsigned long)))
230 return 0;
231
232 base = be32_to_cpu(prop[0]);
233 size = be32_to_cpu(prop[1]);
234
Laura Abbott52c4dce2013-02-26 10:38:34 -0800235 name = of_get_flat_dt_prop(node, "label", NULL);
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700236 in_system =
237 of_get_flat_dt_prop(node, "linux,reserve-region", NULL) ? 0 : 1;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800238
Laura Abbott5df42402013-09-24 10:41:26 -0700239 prop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
240 if (prop)
241 limit = be32_to_cpu(prop[0]);
242
243 pr_info("Found %s, memory base %lx, size %ld MiB, limit %pa\n", uname,
244 (unsigned long)base, (unsigned long)size / SZ_1M, &limit);
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700245 dma_contiguous_reserve_area(size, &base, limit, name,
246 in_system);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100247
248 return 0;
249}
250#endif
251
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100252/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100253 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100254 * @limit: End address of the reserved memory (optional, 0 for any).
255 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100256 * This function reserves memory from early allocator. It should be
257 * called by arch specific code once the early allocator (memblock or bootmem)
258 * has been activated and all other subsystems have already allocated/reserved
259 * memory. It reserves contiguous areas for global, device independent
260 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100261 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100262void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100263{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100264 phys_addr_t sel_size = 0;
265
266 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
267
268 if (size_cmdline != -1) {
269 sel_size = size_cmdline;
270 } else {
271#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
272 sel_size = size_bytes;
273#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
274 sel_size = cma_early_percent_memory();
275#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
276 sel_size = min(size_bytes, cma_early_percent_memory());
277#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
278 sel_size = max(size_bytes, cma_early_percent_memory());
279#endif
280 }
281
282 if (sel_size) {
283 phys_addr_t base = 0;
284 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
285 (unsigned long)sel_size / SZ_1M);
286
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700287 if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL,
288 true) == 0)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100289 dma_contiguous_def_base = base;
290 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100291#ifdef CONFIG_OF
292 of_scan_flat_dt(cma_fdt_scan, NULL);
293#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100294};
295
296/**
297 * dma_contiguous_reserve_area() - reserve custom contiguous area
298 * @size: Size of the reserved area (in bytes),
299 * @base: Pointer to the base address of the reserved area, also used to return
300 * base address of the actually reserved area, optional, use pointer to
301 * 0 for any
302 * @limit: End address of the reserved memory (optional, 0 for any).
303 *
304 * This function reserves memory from early allocator. It should be
305 * called by arch specific code once the early allocator (memblock or bootmem)
306 * has been activated and all other subsystems have already allocated/reserved
307 * memory. This function allows to create custom reserved areas for specific
308 * devices.
309 */
310int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700311 phys_addr_t limit, const char *name,
312 bool to_system)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100313{
314 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500315 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100316 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100317
318 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
319 (unsigned long)size, (unsigned long)base,
320 (unsigned long)limit);
321
322 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100323 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100324 pr_err("Not enough slots for CMA reserved regions!\n");
325 return -ENOSPC;
326 }
327
328 if (!size)
329 return -EINVAL;
330
331 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200332 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100333 base = ALIGN(base, alignment);
334 size = ALIGN(size, alignment);
335 limit &= ~(alignment - 1);
336
337 /* Reserve memory */
338 if (base) {
339 if (memblock_is_region_reserved(base, size) ||
340 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100341 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100342 goto err;
343 }
344 } else {
345 /*
346 * Use __memblock_alloc_base() since
347 * memblock_alloc_base() panic()s.
348 */
349 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
350 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100351 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100352 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100353 } else {
354 base = addr;
355 }
356 }
357
358 /*
359 * Each reserved area must be initialised later, when more kernel
360 * subsystems (like slab allocator) are available.
361 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100362 cma_areas[cma_area_count].base = base;
363 cma_areas[cma_area_count].size = size;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800364 cma_areas[cma_area_count].name = name;
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700365 cma_areas[cma_area_count].to_system = to_system;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100366 cma_area_count++;
367 *res_base = base;
368
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500369 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100370 (unsigned long)base);
371
372 /* Architecture specific contiguous memory fixup. */
373 dma_contiguous_early_fixup(base, size);
374 return 0;
375err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500376 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100377 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100378}
379
380/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100381 * dma_contiguous_add_device() - add device to custom contiguous reserved area
382 * @dev: Pointer to device structure.
383 * @base: Pointer to the base address of the reserved area returned by
384 * dma_contiguous_reserve_area() function, also used to return
385 *
386 * This function assigns the given device to the contiguous memory area
387 * reserved earlier by dma_contiguous_reserve_area() function.
388 */
389int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
390{
391 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
392 pr_err("Not enough slots for CMA reserved regions!\n");
393 return -ENOSPC;
394 }
395 cma_maps[cma_map_count].dev = dev;
396 cma_maps[cma_map_count].base = base;
397 cma_map_count++;
398 return 0;
399}
400
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100401#ifdef CONFIG_OF
402static void cma_assign_device_from_dt(struct device *dev)
403{
404 struct device_node *node;
405 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800406 const char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100407 u32 value;
408
409 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
410 if (!node)
411 return;
412 if (of_property_read_u32(node, "reg", &value) && !value)
413 return;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800414
415 if (of_property_read_string(node, "label", &name))
416 return;
417
418 cma = cma_get_area_by_name(name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100419 if (!cma)
420 return;
421
422 dev_set_cma_area(dev, cma);
423 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
424}
425
426static int cma_device_init_notifier_call(struct notifier_block *nb,
427 unsigned long event, void *data)
428{
429 struct device *dev = data;
430 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
431 cma_assign_device_from_dt(dev);
432 return NOTIFY_DONE;
433}
434
435static struct notifier_block cma_dev_init_nb = {
436 .notifier_call = cma_device_init_notifier_call,
437};
438#endif
439
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100440static int __init cma_init_reserved_areas(void)
441{
442 struct cma *cma;
443 int i;
444
445 for (i = 0; i < cma_area_count; i++) {
446 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
447 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700448 bool system = cma_areas[i].to_system;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100449
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700450 cma = cma_create_area(base, count, system);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100451 if (!IS_ERR(cma))
452 cma_areas[i].cma = cma;
453 }
454
455 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
456
457 for (i = 0; i < cma_map_count; i++) {
458 cma = cma_get_area(cma_maps[i].base);
459 dev_set_cma_area(cma_maps[i].dev, cma);
460 }
461
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100462#ifdef CONFIG_OF
463 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
464#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100465 return 0;
466}
467core_initcall(cma_init_reserved_areas);
468
Laura Abbott2dcd9e62013-06-03 19:14:08 -0700469phys_addr_t cma_get_base(struct device *dev)
470{
471 struct cma *cma = dev_get_cma_area(dev);
472
473 return cma->base_pfn << PAGE_SHIFT;
474}
475
Laura Abbott7761c982014-03-20 14:21:17 -0700476static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
477{
478 mutex_lock(&cma->lock);
479 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
480 mutex_unlock(&cma->lock);
481}
482
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100483/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100484 * dma_alloc_from_contiguous() - allocate pages from contiguous area
485 * @dev: Pointer to device for which the allocation is performed.
486 * @count: Requested number of pages.
487 * @align: Requested alignment of pages (in PAGE_SIZE order).
488 *
489 * This function allocates memory buffer for specified device. It uses
490 * device specific contiguous memory area if available or the default
491 * global one. Requires architecture specific get_dev_cma_area() helper
492 * function.
493 */
494struct page *dma_alloc_from_contiguous(struct device *dev, int count,
495 unsigned int align)
496{
497 unsigned long mask, pfn, pageno, start = 0;
498 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200499 struct page *page = NULL;
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700500 int ret = 0;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800501 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100502
503 if (!cma || !cma->count)
504 return NULL;
505
506 if (align > CONFIG_CMA_ALIGNMENT)
507 align = CONFIG_CMA_ALIGNMENT;
508
509 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
510 count, align);
511
512 if (!count)
513 return NULL;
514
515 mask = (1 << align) - 1;
516
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100517
518 for (;;) {
Laura Abbott7761c982014-03-20 14:21:17 -0700519 mutex_lock(&cma->lock);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100520 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
521 start, count, mask);
Laura Abbott7761c982014-03-20 14:21:17 -0700522 if (pageno >= cma->count) {
Laura Abbott2b421162014-04-02 20:25:51 -0700523 mutex_unlock(&cma->lock);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200524 break;
Laura Abbott7761c982014-03-20 14:21:17 -0700525 }
526 bitmap_set(cma->bitmap, pageno, count);
527 /*
528 * It's safe to drop the lock here. We've marked this region for
529 * our exclusive use. If the migration fails we will take the
530 * lock again and unmark it.
531 */
532 mutex_unlock(&cma->lock);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100533
534 pfn = cma->base_pfn + pageno;
Laura Abbott7761c982014-03-20 14:21:17 -0700535 mutex_lock(&cma_mutex);
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700536 if (cma->in_system)
537 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
Laura Abbott7761c982014-03-20 14:21:17 -0700538 mutex_unlock(&cma_mutex);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100539 if (ret == 0) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200540 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100541 break;
542 } else if (ret != -EBUSY) {
Chintan Pandyae840db82014-04-24 14:07:19 +0530543 pfn = 0;
Laura Abbott7761c982014-03-20 14:21:17 -0700544 clear_cma_bitmap(cma, pfn, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200545 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100546 }
Laura Abbott7761c982014-03-20 14:21:17 -0700547 clear_cma_bitmap(cma, pfn, count);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800548 tries++;
549 trace_dma_alloc_contiguous_retry(tries);
550
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100551 pr_debug("%s(): memory range at %p is busy, retrying\n",
552 __func__, pfn_to_page(pfn));
553 /* try again with a bit different memory target */
554 start = pageno + mask + 1;
555 }
556
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200557 pr_debug("%s(): returned %p\n", __func__, page);
558 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100559}
560
561/**
562 * dma_release_from_contiguous() - release allocated pages
563 * @dev: Pointer to device for which the pages were allocated.
564 * @pages: Allocated pages.
565 * @count: Number of allocated pages.
566 *
567 * This function releases memory allocated by dma_alloc_from_contiguous().
568 * It returns false when provided pages do not belong to contiguous area and
569 * true otherwise.
570 */
571bool dma_release_from_contiguous(struct device *dev, struct page *pages,
572 int count)
573{
574 struct cma *cma = dev_get_cma_area(dev);
575 unsigned long pfn;
576
577 if (!cma || !pages)
578 return false;
579
580 pr_debug("%s(page %p)\n", __func__, (void *)pages);
581
582 pfn = page_to_pfn(pages);
583
584 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
585 return false;
586
587 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
588
Laura Abbott6bc7adb2013-04-26 15:51:06 -0700589 if (cma->in_system)
590 free_contig_range(pfn, count);
Laura Abbott7761c982014-03-20 14:21:17 -0700591 clear_cma_bitmap(cma, pfn, count);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100592
593 return true;
594}