blob: 8e9ec13d31db2cce25384a4dc1a296e2a6fd2ca8 [file] [log] [blame]
Joonsoo Kima2541292014-08-06 16:05:25 -07001/*
2 * Contiguous Memory Allocator
3 *
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
17 */
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26
27#include <linux/memblock.h>
28#include <linux/err.h>
29#include <linux/mm.h>
30#include <linux/mutex.h>
31#include <linux/sizes.h>
32#include <linux/slab.h>
33#include <linux/log2.h>
34#include <linux/cma.h>
Marek Szyprowskif7426b92014-10-09 15:26:47 -070035#include <linux/highmem.h>
Joonsoo Kima2541292014-08-06 16:05:25 -070036
37struct cma {
38 unsigned long base_pfn;
39 unsigned long count;
40 unsigned long *bitmap;
41 unsigned int order_per_bit; /* Order of pages represented by one bit */
42 struct mutex lock;
43};
44
45static struct cma cma_areas[MAX_CMA_AREAS];
46static unsigned cma_area_count;
47static DEFINE_MUTEX(cma_mutex);
48
49phys_addr_t cma_get_base(struct cma *cma)
50{
51 return PFN_PHYS(cma->base_pfn);
52}
53
54unsigned long cma_get_size(struct cma *cma)
55{
56 return cma->count << PAGE_SHIFT;
57}
58
59static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
60{
Weijie Yang68faed62014-10-13 15:51:03 -070061 if (align_order <= cma->order_per_bit)
62 return 0;
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
Joonsoo Kima2541292014-08-06 16:05:25 -070064}
65
66static unsigned long cma_bitmap_maxno(struct cma *cma)
67{
68 return cma->count >> cma->order_per_bit;
69}
70
71static unsigned long cma_bitmap_pages_to_bits(struct cma *cma,
72 unsigned long pages)
73{
74 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
75}
76
77static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, int count)
78{
79 unsigned long bitmap_no, bitmap_count;
80
81 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
82 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
83
84 mutex_lock(&cma->lock);
85 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
86 mutex_unlock(&cma->lock);
87}
88
89static int __init cma_activate_area(struct cma *cma)
90{
91 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
92 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
93 unsigned i = cma->count >> pageblock_order;
94 struct zone *zone;
95
96 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
97
98 if (!cma->bitmap)
99 return -ENOMEM;
100
101 WARN_ON_ONCE(!pfn_valid(pfn));
102 zone = page_zone(pfn_to_page(pfn));
103
104 do {
105 unsigned j;
106
107 base_pfn = pfn;
108 for (j = pageblock_nr_pages; j; --j, pfn++) {
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 /*
111 * alloc_contig_range requires the pfn range
112 * specified to be in the same zone. Make this
113 * simple by forcing the entire CMA resv range
114 * to be in the same zone.
115 */
116 if (page_zone(pfn_to_page(pfn)) != zone)
117 goto err;
118 }
119 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
120 } while (--i);
121
122 mutex_init(&cma->lock);
123 return 0;
124
125err:
126 kfree(cma->bitmap);
Laurent Pinchartf022d8c2014-10-24 13:18:39 +0300127 cma->count = 0;
Joonsoo Kima2541292014-08-06 16:05:25 -0700128 return -EINVAL;
129}
130
131static int __init cma_init_reserved_areas(void)
132{
133 int i;
134
135 for (i = 0; i < cma_area_count; i++) {
136 int ret = cma_activate_area(&cma_areas[i]);
137
138 if (ret)
139 return ret;
140 }
141
142 return 0;
143}
144core_initcall(cma_init_reserved_areas);
145
146/**
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700147 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
148 * @base: Base address of the reserved area
149 * @size: Size of the reserved area (in bytes),
150 * @order_per_bit: Order of pages represented by one bit on bitmap.
151 * @res_cma: Pointer to store the created cma region.
152 *
153 * This function creates custom contiguous area from already reserved memory.
154 */
155int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
156 int order_per_bit, struct cma **res_cma)
157{
158 struct cma *cma;
159 phys_addr_t alignment;
160
161 /* Sanity checks */
162 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
163 pr_err("Not enough slots for CMA reserved regions!\n");
164 return -ENOSPC;
165 }
166
167 if (!size || !memblock_is_region_reserved(base, size))
168 return -EINVAL;
169
170 /* ensure minimal alignment requied by mm core */
171 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
172
173 /* alignment should be aligned with order_per_bit */
174 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
175 return -EINVAL;
176
177 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
178 return -EINVAL;
179
180 /*
181 * Each reserved area must be initialised later, when more kernel
182 * subsystems (like slab allocator) are available.
183 */
184 cma = &cma_areas[cma_area_count];
185 cma->base_pfn = PFN_DOWN(base);
186 cma->count = size >> PAGE_SHIFT;
187 cma->order_per_bit = order_per_bit;
188 *res_cma = cma;
189 cma_area_count++;
190
191 return 0;
192}
193
194/**
Joonsoo Kima2541292014-08-06 16:05:25 -0700195 * cma_declare_contiguous() - reserve custom contiguous area
Joonsoo Kima2541292014-08-06 16:05:25 -0700196 * @base: Base address of the reserved area optional, use 0 for any
Joonsoo Kimc1f733a2014-08-06 16:05:32 -0700197 * @size: Size of the reserved area (in bytes),
Joonsoo Kima2541292014-08-06 16:05:25 -0700198 * @limit: End address of the reserved memory (optional, 0 for any).
199 * @alignment: Alignment for the CMA area, should be power of 2 or zero
200 * @order_per_bit: Order of pages represented by one bit on bitmap.
Joonsoo Kima2541292014-08-06 16:05:25 -0700201 * @fixed: hint about where to place the reserved area
Joonsoo Kimc1f733a2014-08-06 16:05:32 -0700202 * @res_cma: Pointer to store the created cma region.
Joonsoo Kima2541292014-08-06 16:05:25 -0700203 *
204 * This function reserves memory from early allocator. It should be
205 * called by arch specific code once the early allocator (memblock or bootmem)
206 * has been activated and all other subsystems have already allocated/reserved
207 * memory. This function allows to create custom reserved areas.
208 *
209 * If @fixed is true, reserve contiguous area at exactly @base. If false,
210 * reserve in range from @base to @limit.
211 */
Joonsoo Kimc1f733a2014-08-06 16:05:32 -0700212int __init cma_declare_contiguous(phys_addr_t base,
213 phys_addr_t size, phys_addr_t limit,
Joonsoo Kima2541292014-08-06 16:05:25 -0700214 phys_addr_t alignment, unsigned int order_per_bit,
Joonsoo Kimc1f733a2014-08-06 16:05:32 -0700215 bool fixed, struct cma **res_cma)
Joonsoo Kima2541292014-08-06 16:05:25 -0700216{
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700217 phys_addr_t memblock_end = memblock_end_of_DRAM();
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800218 phys_addr_t highmem_start;
Joonsoo Kima2541292014-08-06 16:05:25 -0700219 int ret = 0;
220
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800221#ifdef CONFIG_X86
222 /*
223 * high_memory isn't direct mapped memory so retrieving its physical
224 * address isn't appropriate. But it would be useful to check the
225 * physical address of the highmem boundary so it's justfiable to get
226 * the physical address from it. On x86 there is a validation check for
227 * this case, so the following workaround is needed to avoid it.
228 */
229 highmem_start = __pa_nodebug(high_memory);
230#else
231 highmem_start = __pa(high_memory);
232#endif
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300233 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
234 __func__, &size, &base, &limit, &alignment);
Joonsoo Kima2541292014-08-06 16:05:25 -0700235
236 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
237 pr_err("Not enough slots for CMA reserved regions!\n");
238 return -ENOSPC;
239 }
240
241 if (!size)
242 return -EINVAL;
243
244 if (alignment && !is_power_of_2(alignment))
245 return -EINVAL;
246
247 /*
248 * Sanitise input arguments.
249 * Pages both ends in CMA area could be merged into adjacent unmovable
250 * migratetype page by page allocator's buddy algorithm. In the case,
251 * you couldn't get a contiguous memory, which is not what we want.
252 */
253 alignment = max(alignment,
254 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
255 base = ALIGN(base, alignment);
256 size = ALIGN(size, alignment);
257 limit &= ~(alignment - 1);
258
Laurent Pinchart800a85d2014-10-24 13:18:40 +0300259 if (!base)
260 fixed = false;
261
Joonsoo Kima2541292014-08-06 16:05:25 -0700262 /* size should be aligned with order_per_bit */
263 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
264 return -EINVAL;
265
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700266 /*
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300267 * If allocating at a fixed base the request region must not cross the
268 * low/high memory boundary.
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700269 */
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300270 if (fixed && base < highmem_start && base + size > highmem_start) {
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700271 ret = -EINVAL;
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300272 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
273 &base, &highmem_start);
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700274 goto err;
275 }
276
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300277 /*
278 * If the limit is unspecified or above the memblock end, its effective
279 * value will be the memblock end. Set it explicitly to simplify further
280 * checks.
281 */
282 if (limit == 0 || limit > memblock_end)
283 limit = memblock_end;
284
Joonsoo Kima2541292014-08-06 16:05:25 -0700285 /* Reserve memory */
Laurent Pinchart800a85d2014-10-24 13:18:40 +0300286 if (fixed) {
Joonsoo Kima2541292014-08-06 16:05:25 -0700287 if (memblock_is_region_reserved(base, size) ||
288 memblock_reserve(base, size) < 0) {
289 ret = -EBUSY;
290 goto err;
291 }
292 } else {
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300293 phys_addr_t addr = 0;
294
295 /*
296 * All pages in the reserved area must come from the same zone.
297 * If the requested region crosses the low/high memory boundary,
298 * try allocating from high memory first and fall back to low
299 * memory in case of failure.
300 */
301 if (base < highmem_start && limit > highmem_start) {
302 addr = memblock_alloc_range(size, alignment,
303 highmem_start, limit);
304 limit = highmem_start;
Joonsoo Kima2541292014-08-06 16:05:25 -0700305 }
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300306
307 if (!addr) {
308 addr = memblock_alloc_range(size, alignment, base,
309 limit);
310 if (!addr) {
311 ret = -ENOMEM;
312 goto err;
313 }
314 }
315
316 base = addr;
Joonsoo Kima2541292014-08-06 16:05:25 -0700317 }
318
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700319 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
320 if (ret)
321 goto err;
Joonsoo Kima2541292014-08-06 16:05:25 -0700322
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300323 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
324 &base);
Joonsoo Kima2541292014-08-06 16:05:25 -0700325 return 0;
326
327err:
Joonsoo Kim0de9d2e2014-08-06 16:05:34 -0700328 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Joonsoo Kima2541292014-08-06 16:05:25 -0700329 return ret;
330}
331
332/**
333 * cma_alloc() - allocate pages from contiguous area
334 * @cma: Contiguous memory region for which the allocation is performed.
335 * @count: Requested number of pages.
336 * @align: Requested alignment of pages (in PAGE_SIZE order).
337 *
338 * This function allocates part of contiguous memory on specific
339 * contiguous memory area.
340 */
341struct page *cma_alloc(struct cma *cma, int count, unsigned int align)
342{
343 unsigned long mask, pfn, start = 0;
344 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
345 struct page *page = NULL;
346 int ret;
347
348 if (!cma || !cma->count)
349 return NULL;
350
351 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
352 count, align);
353
354 if (!count)
355 return NULL;
356
357 mask = cma_bitmap_aligned_mask(cma, align);
358 bitmap_maxno = cma_bitmap_maxno(cma);
359 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
360
361 for (;;) {
362 mutex_lock(&cma->lock);
363 bitmap_no = bitmap_find_next_zero_area(cma->bitmap,
364 bitmap_maxno, start, bitmap_count, mask);
365 if (bitmap_no >= bitmap_maxno) {
366 mutex_unlock(&cma->lock);
367 break;
368 }
369 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
370 /*
371 * It's safe to drop the lock here. We've marked this region for
372 * our exclusive use. If the migration fails we will take the
373 * lock again and unmark it.
374 */
375 mutex_unlock(&cma->lock);
376
377 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
378 mutex_lock(&cma_mutex);
379 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
380 mutex_unlock(&cma_mutex);
381 if (ret == 0) {
382 page = pfn_to_page(pfn);
383 break;
Joonsoo Kima2541292014-08-06 16:05:25 -0700384 }
Joonsoo Kimb7155e72014-08-06 16:05:30 -0700385
Joonsoo Kima2541292014-08-06 16:05:25 -0700386 cma_clear_bitmap(cma, pfn, count);
Joonsoo Kimb7155e72014-08-06 16:05:30 -0700387 if (ret != -EBUSY)
388 break;
389
Joonsoo Kima2541292014-08-06 16:05:25 -0700390 pr_debug("%s(): memory range at %p is busy, retrying\n",
391 __func__, pfn_to_page(pfn));
392 /* try again with a bit different memory target */
393 start = bitmap_no + mask + 1;
394 }
395
396 pr_debug("%s(): returned %p\n", __func__, page);
397 return page;
398}
399
400/**
401 * cma_release() - release allocated pages
402 * @cma: Contiguous memory region for which the allocation is performed.
403 * @pages: Allocated pages.
404 * @count: Number of allocated pages.
405 *
406 * This function releases memory allocated by alloc_cma().
407 * It returns false when provided pages do not belong to contiguous area and
408 * true otherwise.
409 */
410bool cma_release(struct cma *cma, struct page *pages, int count)
411{
412 unsigned long pfn;
413
414 if (!cma || !pages)
415 return false;
416
417 pr_debug("%s(page %p)\n", __func__, (void *)pages);
418
419 pfn = page_to_pfn(pages);
420
421 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
422 return false;
423
424 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
425
426 free_contig_range(pfn, count);
427 cma_clear_bitmap(cma, pfn, count);
428
429 return true;
430}