blob: adac2119444c981dbac69e56535d7212c5f3d310 [file] [log] [blame]
Marek Szyprowski55bb0332011-12-29 13:09:51 +01001/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
Laura Abbott9e821fb2013-02-26 10:26:30 -080012 *
13 * The Linux Foundation chooses to take subject only to the GPLv2 license
14 * terms, and distributes only under these terms.
Marek Szyprowski55bb0332011-12-29 13:09:51 +010015 */
16
17#define pr_fmt(fmt) "cma: " fmt
18
19#ifdef CONFIG_CMA_DEBUG
20#ifndef DEBUG
21# define DEBUG
22#endif
23#endif
24
25#include <asm/page.h>
26#include <asm/dma-contiguous.h>
27
28#include <linux/memblock.h>
29#include <linux/err.h>
Marek Szyprowski4b861c62013-02-14 13:45:28 +010030#include <linux/of.h>
31#include <linux/of_fdt.h>
32#include <linux/of_platform.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010033#include <linux/mm.h>
34#include <linux/mutex.h>
35#include <linux/page-isolation.h>
36#include <linux/slab.h>
37#include <linux/swap.h>
38#include <linux/mm_types.h>
39#include <linux/dma-contiguous.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080040#include <trace/events/kmem.h>
Marek Szyprowski55bb0332011-12-29 13:09:51 +010041
42#ifndef SZ_1M
43#define SZ_1M (1 << 20)
44#endif
45
46struct cma {
47 unsigned long base_pfn;
48 unsigned long count;
49 unsigned long *bitmap;
50};
51
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010052static DEFINE_MUTEX(cma_mutex);
53
54struct cma *dma_contiguous_def_area;
55phys_addr_t dma_contiguous_def_base;
56
57static struct cma_area {
58 phys_addr_t base;
59 unsigned long size;
60 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -080061 const char *name;
Laura Abbott9e821fb2013-02-26 10:26:30 -080062} cma_areas[MAX_CMA_AREAS];
63static unsigned cma_area_count;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +010064
65
66static struct cma_map {
67 phys_addr_t base;
68 struct device *dev;
69} cma_maps[MAX_CMA_AREAS] __initdata;
70static unsigned cma_map_count __initdata;
71
72static struct cma *cma_get_area(phys_addr_t base)
73{
74 int i;
75 for (i = 0; i < cma_area_count; i++)
76 if (cma_areas[i].base == base)
77 return cma_areas[i].cma;
78 return NULL;
79}
Marek Szyprowski55bb0332011-12-29 13:09:51 +010080
Laura Abbott52c4dce2013-02-26 10:38:34 -080081static struct cma *cma_get_area_by_name(const char *name)
82{
83 int i;
84 if (!name)
85 return NULL;
86
87 for (i = 0; i < cma_area_count; i++)
88 if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
89 return cma_areas[i].cma;
90 return NULL;
91}
92
93
94
Marek Szyprowski55bb0332011-12-29 13:09:51 +010095#ifdef CONFIG_CMA_SIZE_MBYTES
96#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
97#else
98#define CMA_SIZE_MBYTES 0
99#endif
100
101/*
102 * Default global CMA area size can be defined in kernel's .config.
103 * This is usefull mainly for distro maintainers to create a kernel
104 * that works correctly for most supported systems.
105 * The size can be set in bytes or as a percentage of the total memory
106 * in the system.
107 *
108 * Users, who want to set the size of global CMA area for their system
109 * should use cma= kernel parameter.
110 */
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500111static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
112static phys_addr_t size_cmdline = -1;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100113
114static int __init early_cma(char *p)
115{
116 pr_debug("%s(%s)\n", __func__, p);
117 size_cmdline = memparse(p, &p);
118 return 0;
119}
120early_param("cma", early_cma);
121
122#ifdef CONFIG_CMA_SIZE_PERCENTAGE
123
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500124static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100125{
126 struct memblock_region *reg;
127 unsigned long total_pages = 0;
128
129 /*
130 * We cannot use memblock_phys_mem_size() here, because
131 * memblock_analyze() has not been called yet.
132 */
133 for_each_memblock(memory, reg)
134 total_pages += memblock_region_memory_end_pfn(reg) -
135 memblock_region_memory_base_pfn(reg);
136
137 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
138}
139
140#else
141
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500142static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100143{
144 return 0;
145}
146
147#endif
148
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100149static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
150{
151 unsigned long pfn = base_pfn;
152 unsigned i = count >> pageblock_order;
153 struct zone *zone;
154
155 WARN_ON_ONCE(!pfn_valid(pfn));
156 zone = page_zone(pfn_to_page(pfn));
157
158 do {
159 unsigned j;
160 base_pfn = pfn;
161 for (j = pageblock_nr_pages; j; --j, pfn++) {
162 WARN_ON_ONCE(!pfn_valid(pfn));
163 if (page_zone(pfn_to_page(pfn)) != zone)
164 return -EINVAL;
165 }
166 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
167 } while (--i);
168 return 0;
169}
170
171static __init struct cma *cma_create_area(unsigned long base_pfn,
172 unsigned long count)
173{
174 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
175 struct cma *cma;
176 int ret = -ENOMEM;
177
178 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
179
180 cma = kmalloc(sizeof *cma, GFP_KERNEL);
181 if (!cma)
182 return ERR_PTR(-ENOMEM);
183
184 cma->base_pfn = base_pfn;
185 cma->count = count;
186 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
187
188 if (!cma->bitmap)
189 goto no_mem;
190
191 ret = cma_activate_area(base_pfn, count);
192 if (ret)
193 goto error;
194
195 pr_debug("%s: returned %p\n", __func__, (void *)cma);
196 return cma;
197
198error:
199 kfree(cma->bitmap);
200no_mem:
201 kfree(cma);
202 return ERR_PTR(ret);
203}
204
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100205/*****************************************************************************/
206
207#ifdef CONFIG_OF
208int __init cma_fdt_scan(unsigned long node, const char *uname,
209 int depth, void *data)
210{
211 phys_addr_t base, size;
212 unsigned long len;
213 __be32 *prop;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800214 char *name;
Laura Abbott5df42402013-09-24 10:41:26 -0700215 phys_addr_t limit = MEMBLOCK_ALLOC_ANYWHERE;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100216
Laura Abbott670688a2013-03-14 19:13:49 -0700217 if (!of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100218 return 0;
219
220 prop = of_get_flat_dt_prop(node, "reg", &len);
221 if (!prop || (len != 2 * sizeof(unsigned long)))
222 return 0;
223
224 base = be32_to_cpu(prop[0]);
225 size = be32_to_cpu(prop[1]);
226
Laura Abbott52c4dce2013-02-26 10:38:34 -0800227 name = of_get_flat_dt_prop(node, "label", NULL);
228
Laura Abbott5df42402013-09-24 10:41:26 -0700229 prop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
230 if (prop)
231 limit = be32_to_cpu(prop[0]);
232
233 pr_info("Found %s, memory base %lx, size %ld MiB, limit %pa\n", uname,
234 (unsigned long)base, (unsigned long)size / SZ_1M, &limit);
235 dma_contiguous_reserve_area(size, &base, limit, name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100236
237 return 0;
238}
239#endif
240
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100241/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100242 * dma_contiguous_reserve() - reserve area for contiguous memory handling
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100243 * @limit: End address of the reserved memory (optional, 0 for any).
244 *
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100245 * This function reserves memory from early allocator. It should be
246 * called by arch specific code once the early allocator (memblock or bootmem)
247 * has been activated and all other subsystems have already allocated/reserved
248 * memory. It reserves contiguous areas for global, device independent
249 * allocations and (optionally) all areas defined in device tree structures.
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100250 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100251void __init dma_contiguous_reserve(phys_addr_t limit)
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100252{
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100253 phys_addr_t sel_size = 0;
254
255 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
256
257 if (size_cmdline != -1) {
258 sel_size = size_cmdline;
259 } else {
260#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
261 sel_size = size_bytes;
262#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
263 sel_size = cma_early_percent_memory();
264#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
265 sel_size = min(size_bytes, cma_early_percent_memory());
266#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
267 sel_size = max(size_bytes, cma_early_percent_memory());
268#endif
269 }
270
271 if (sel_size) {
272 phys_addr_t base = 0;
273 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
274 (unsigned long)sel_size / SZ_1M);
275
Laura Abbott52c4dce2013-02-26 10:38:34 -0800276 if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
277 == 0)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100278 dma_contiguous_def_base = base;
279 }
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100280#ifdef CONFIG_OF
281 of_scan_flat_dt(cma_fdt_scan, NULL);
282#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100283};
284
285/**
286 * dma_contiguous_reserve_area() - reserve custom contiguous area
287 * @size: Size of the reserved area (in bytes),
288 * @base: Pointer to the base address of the reserved area, also used to return
289 * base address of the actually reserved area, optional, use pointer to
290 * 0 for any
291 * @limit: End address of the reserved memory (optional, 0 for any).
292 *
293 * This function reserves memory from early allocator. It should be
294 * called by arch specific code once the early allocator (memblock or bootmem)
295 * has been activated and all other subsystems have already allocated/reserved
296 * memory. This function allows to create custom reserved areas for specific
297 * devices.
298 */
299int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
Laura Abbott52c4dce2013-02-26 10:38:34 -0800300 phys_addr_t limit, const char *name)
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100301{
302 phys_addr_t base = *res_base;
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500303 phys_addr_t alignment;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100304 int ret = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100305
306 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
307 (unsigned long)size, (unsigned long)base,
308 (unsigned long)limit);
309
310 /* Sanity checks */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100311 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100312 pr_err("Not enough slots for CMA reserved regions!\n");
313 return -ENOSPC;
314 }
315
316 if (!size)
317 return -EINVAL;
318
319 /* Sanitise input arguments */
Marek Szyprowski12731372012-08-27 20:27:19 +0200320 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100321 base = ALIGN(base, alignment);
322 size = ALIGN(size, alignment);
323 limit &= ~(alignment - 1);
324
325 /* Reserve memory */
326 if (base) {
327 if (memblock_is_region_reserved(base, size) ||
328 memblock_reserve(base, size) < 0) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100329 ret = -EBUSY;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100330 goto err;
331 }
332 } else {
333 /*
334 * Use __memblock_alloc_base() since
335 * memblock_alloc_base() panic()s.
336 */
337 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
338 if (!addr) {
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100339 ret = -ENOMEM;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100340 goto err;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100341 } else {
342 base = addr;
343 }
344 }
345
346 /*
347 * Each reserved area must be initialised later, when more kernel
348 * subsystems (like slab allocator) are available.
349 */
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100350 cma_areas[cma_area_count].base = base;
351 cma_areas[cma_area_count].size = size;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800352 cma_areas[cma_area_count].name = name;
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100353 cma_area_count++;
354 *res_base = base;
355
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500356 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100357 (unsigned long)base);
358
359 /* Architecture specific contiguous memory fixup. */
360 dma_contiguous_early_fixup(base, size);
361 return 0;
362err:
Vitaly Andrianov2ee01742012-12-05 09:29:25 -0500363 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100364 return ret;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100365}
366
367/**
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100368 * dma_contiguous_add_device() - add device to custom contiguous reserved area
369 * @dev: Pointer to device structure.
370 * @base: Pointer to the base address of the reserved area returned by
371 * dma_contiguous_reserve_area() function, also used to return
372 *
373 * This function assigns the given device to the contiguous memory area
374 * reserved earlier by dma_contiguous_reserve_area() function.
375 */
376int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
377{
378 if (cma_map_count == ARRAY_SIZE(cma_maps)) {
379 pr_err("Not enough slots for CMA reserved regions!\n");
380 return -ENOSPC;
381 }
382 cma_maps[cma_map_count].dev = dev;
383 cma_maps[cma_map_count].base = base;
384 cma_map_count++;
385 return 0;
386}
387
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100388#ifdef CONFIG_OF
389static void cma_assign_device_from_dt(struct device *dev)
390{
391 struct device_node *node;
392 struct cma *cma;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800393 const char *name;
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100394 u32 value;
395
396 node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
397 if (!node)
398 return;
399 if (of_property_read_u32(node, "reg", &value) && !value)
400 return;
Laura Abbott52c4dce2013-02-26 10:38:34 -0800401
402 if (of_property_read_string(node, "label", &name))
403 return;
404
405 cma = cma_get_area_by_name(name);
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100406 if (!cma)
407 return;
408
409 dev_set_cma_area(dev, cma);
410 pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
411}
412
413static int cma_device_init_notifier_call(struct notifier_block *nb,
414 unsigned long event, void *data)
415{
416 struct device *dev = data;
417 if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
418 cma_assign_device_from_dt(dev);
419 return NOTIFY_DONE;
420}
421
422static struct notifier_block cma_dev_init_nb = {
423 .notifier_call = cma_device_init_notifier_call,
424};
425#endif
426
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100427static int __init cma_init_reserved_areas(void)
428{
429 struct cma *cma;
430 int i;
431
432 for (i = 0; i < cma_area_count; i++) {
433 phys_addr_t base = PFN_DOWN(cma_areas[i].base);
434 unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
435
436 cma = cma_create_area(base, count);
437 if (!IS_ERR(cma))
438 cma_areas[i].cma = cma;
439 }
440
441 dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
442
443 for (i = 0; i < cma_map_count; i++) {
444 cma = cma_get_area(cma_maps[i].base);
445 dev_set_cma_area(cma_maps[i].dev, cma);
446 }
447
Marek Szyprowski4b861c62013-02-14 13:45:28 +0100448#ifdef CONFIG_OF
449 bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
450#endif
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100451 return 0;
452}
453core_initcall(cma_init_reserved_areas);
454
Laura Abbott2dcd9e62013-06-03 19:14:08 -0700455phys_addr_t cma_get_base(struct device *dev)
456{
457 struct cma *cma = dev_get_cma_area(dev);
458
459 return cma->base_pfn << PAGE_SHIFT;
460}
461
Marek Szyprowskidb7909c2013-02-14 13:45:27 +0100462/**
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100463 * dma_alloc_from_contiguous() - allocate pages from contiguous area
464 * @dev: Pointer to device for which the allocation is performed.
465 * @count: Requested number of pages.
466 * @align: Requested alignment of pages (in PAGE_SIZE order).
467 *
468 * This function allocates memory buffer for specified device. It uses
469 * device specific contiguous memory area if available or the default
470 * global one. Requires architecture specific get_dev_cma_area() helper
471 * function.
472 */
473struct page *dma_alloc_from_contiguous(struct device *dev, int count,
474 unsigned int align)
475{
476 unsigned long mask, pfn, pageno, start = 0;
477 struct cma *cma = dev_get_cma_area(dev);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200478 struct page *page = NULL;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100479 int ret;
Liam Markcc2d4bd2013-01-16 10:14:40 -0800480 int tries = 0;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100481
482 if (!cma || !cma->count)
483 return NULL;
484
485 if (align > CONFIG_CMA_ALIGNMENT)
486 align = CONFIG_CMA_ALIGNMENT;
487
488 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
489 count, align);
490
491 if (!count)
492 return NULL;
493
494 mask = (1 << align) - 1;
495
496 mutex_lock(&cma_mutex);
497
498 for (;;) {
499 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
500 start, count, mask);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200501 if (pageno >= cma->count)
502 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100503
504 pfn = cma->base_pfn + pageno;
505 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
506 if (ret == 0) {
507 bitmap_set(cma->bitmap, pageno, count);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200508 page = pfn_to_page(pfn);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100509 break;
510 } else if (ret != -EBUSY) {
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200511 break;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100512 }
Liam Markcc2d4bd2013-01-16 10:14:40 -0800513 tries++;
514 trace_dma_alloc_contiguous_retry(tries);
515
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100516 pr_debug("%s(): memory range at %p is busy, retrying\n",
517 __func__, pfn_to_page(pfn));
518 /* try again with a bit different memory target */
519 start = pageno + mask + 1;
520 }
521
522 mutex_unlock(&cma_mutex);
Michal Nazarewicz81e10962012-09-05 07:50:41 +0200523 pr_debug("%s(): returned %p\n", __func__, page);
524 return page;
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100525}
526
527/**
528 * dma_release_from_contiguous() - release allocated pages
529 * @dev: Pointer to device for which the pages were allocated.
530 * @pages: Allocated pages.
531 * @count: Number of allocated pages.
532 *
533 * This function releases memory allocated by dma_alloc_from_contiguous().
534 * It returns false when provided pages do not belong to contiguous area and
535 * true otherwise.
536 */
537bool dma_release_from_contiguous(struct device *dev, struct page *pages,
538 int count)
539{
540 struct cma *cma = dev_get_cma_area(dev);
541 unsigned long pfn;
542
543 if (!cma || !pages)
544 return false;
545
546 pr_debug("%s(page %p)\n", __func__, (void *)pages);
547
548 pfn = page_to_pfn(pages);
549
550 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
551 return false;
552
553 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
554
Laura Abbott5adb04c2014-02-27 10:23:51 -0800555 free_contig_range(pfn, count);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100556 mutex_lock(&cma_mutex);
557 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
Marek Szyprowski55bb0332011-12-29 13:09:51 +0100558 mutex_unlock(&cma_mutex);
559
560 return true;
561}