blob: 3a7a67b933942f5232091094f5d719d59bda13ab [file] [log] [blame]
Joonsoo Kima2541292014-08-06 16:05:25 -07001/*
2 * Contiguous Memory Allocator
3 *
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
17 */
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
Stefan Strogin99e8ea62015-04-15 16:14:50 -070026#define CREATE_TRACE_POINTS
Joonsoo Kima2541292014-08-06 16:05:25 -070027
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
Marek Szyprowskif7426b92014-10-09 15:26:47 -070036#include <linux/highmem.h>
Thierry Reding620951e2014-12-12 16:58:31 -080037#include <linux/io.h>
Stefan Strogin99e8ea62015-04-15 16:14:50 -070038#include <trace/events/cma.h>
Joonsoo Kima2541292014-08-06 16:05:25 -070039
Sasha Levin28b24c12015-04-14 15:44:57 -070040#include "cma.h"
Joonsoo Kima2541292014-08-06 16:05:25 -070041
Sasha Levin28b24c12015-04-14 15:44:57 -070042struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
Joonsoo Kima2541292014-08-06 16:05:25 -070044static DEFINE_MUTEX(cma_mutex);
45
Sasha Levinac173822015-04-14 15:47:04 -070046phys_addr_t cma_get_base(const struct cma *cma)
Joonsoo Kima2541292014-08-06 16:05:25 -070047{
48 return PFN_PHYS(cma->base_pfn);
49}
50
Sasha Levinac173822015-04-14 15:47:04 -070051unsigned long cma_get_size(const struct cma *cma)
Joonsoo Kima2541292014-08-06 16:05:25 -070052{
53 return cma->count << PAGE_SHIFT;
54}
55
Sasha Levinac173822015-04-14 15:47:04 -070056static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 int align_order)
Joonsoo Kima2541292014-08-06 16:05:25 -070058{
Weijie Yang68faed62014-10-13 15:51:03 -070059 if (align_order <= cma->order_per_bit)
60 return 0;
61 return (1UL << (align_order - cma->order_per_bit)) - 1;
Joonsoo Kima2541292014-08-06 16:05:25 -070062}
63
Danesh Petigara850fc432015-03-12 16:25:57 -070064/*
65 * Find a PFN aligned to the specified order and return an offset represented in
66 * order_per_bits.
67 */
Sasha Levinac173822015-04-14 15:47:04 -070068static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 int align_order)
Gregory Fongb5be83e2014-12-12 16:54:48 -080070{
Gregory Fongb5be83e2014-12-12 16:54:48 -080071 if (align_order <= cma->order_per_bit)
72 return 0;
Danesh Petigara850fc432015-03-12 16:25:57 -070073
74 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 - cma->base_pfn) >> cma->order_per_bit;
Gregory Fongb5be83e2014-12-12 16:54:48 -080076}
77
Sasha Levinac173822015-04-14 15:47:04 -070078static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
79 unsigned long pages)
Joonsoo Kima2541292014-08-06 16:05:25 -070080{
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
82}
83
Sasha Levinac173822015-04-14 15:47:04 -070084static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
85 unsigned int count)
Joonsoo Kima2541292014-08-06 16:05:25 -070086{
87 unsigned long bitmap_no, bitmap_count;
88
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91
92 mutex_lock(&cma->lock);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 mutex_unlock(&cma->lock);
95}
96
97static int __init cma_activate_area(struct cma *cma)
98{
99 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 unsigned i = cma->count >> pageblock_order;
102 struct zone *zone;
103
104 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
105
106 if (!cma->bitmap)
107 return -ENOMEM;
108
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 zone = page_zone(pfn_to_page(pfn));
111
112 do {
113 unsigned j;
114
115 base_pfn = pfn;
116 for (j = pageblock_nr_pages; j; --j, pfn++) {
117 WARN_ON_ONCE(!pfn_valid(pfn));
118 /*
119 * alloc_contig_range requires the pfn range
120 * specified to be in the same zone. Make this
121 * simple by forcing the entire CMA resv range
122 * to be in the same zone.
123 */
124 if (page_zone(pfn_to_page(pfn)) != zone)
125 goto err;
126 }
127 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
128 } while (--i);
129
130 mutex_init(&cma->lock);
Sasha Levin26b02a12015-04-14 15:44:59 -0700131
132#ifdef CONFIG_CMA_DEBUGFS
133 INIT_HLIST_HEAD(&cma->mem_head);
134 spin_lock_init(&cma->mem_head_lock);
135#endif
136
Joonsoo Kima2541292014-08-06 16:05:25 -0700137 return 0;
138
139err:
140 kfree(cma->bitmap);
Laurent Pinchartf022d8c2014-10-24 13:18:39 +0300141 cma->count = 0;
Joonsoo Kima2541292014-08-06 16:05:25 -0700142 return -EINVAL;
143}
144
145static int __init cma_init_reserved_areas(void)
146{
147 int i;
148
149 for (i = 0; i < cma_area_count; i++) {
150 int ret = cma_activate_area(&cma_areas[i]);
151
152 if (ret)
153 return ret;
154 }
155
156 return 0;
157}
158core_initcall(cma_init_reserved_areas);
159
160/**
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700161 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162 * @base: Base address of the reserved area
163 * @size: Size of the reserved area (in bytes),
164 * @order_per_bit: Order of pages represented by one bit on bitmap.
165 * @res_cma: Pointer to store the created cma region.
166 *
167 * This function creates custom contiguous area from already reserved memory.
168 */
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
Sasha Levinac173822015-04-14 15:47:04 -0700170 unsigned int order_per_bit,
171 struct cma **res_cma)
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700172{
173 struct cma *cma;
174 phys_addr_t alignment;
175
176 /* Sanity checks */
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
179 return -ENOSPC;
180 }
181
182 if (!size || !memblock_is_region_reserved(base, size))
183 return -EINVAL;
184
185 /* ensure minimal alignment requied by mm core */
186 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
187
188 /* alignment should be aligned with order_per_bit */
189 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
190 return -EINVAL;
191
192 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
193 return -EINVAL;
194
195 /*
196 * Each reserved area must be initialised later, when more kernel
197 * subsystems (like slab allocator) are available.
198 */
199 cma = &cma_areas[cma_area_count];
200 cma->base_pfn = PFN_DOWN(base);
201 cma->count = size >> PAGE_SHIFT;
202 cma->order_per_bit = order_per_bit;
203 *res_cma = cma;
204 cma_area_count++;
George G. Davis94737a82015-02-11 15:26:27 -0800205 totalcma_pages += (size / PAGE_SIZE);
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700206
207 return 0;
208}
209
210/**
Joonsoo Kima2541292014-08-06 16:05:25 -0700211 * cma_declare_contiguous() - reserve custom contiguous area
Joonsoo Kima2541292014-08-06 16:05:25 -0700212 * @base: Base address of the reserved area optional, use 0 for any
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700213 * @size: Size of the reserved area (in bytes),
Joonsoo Kima2541292014-08-06 16:05:25 -0700214 * @limit: End address of the reserved memory (optional, 0 for any).
215 * @alignment: Alignment for the CMA area, should be power of 2 or zero
216 * @order_per_bit: Order of pages represented by one bit on bitmap.
Joonsoo Kima2541292014-08-06 16:05:25 -0700217 * @fixed: hint about where to place the reserved area
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700218 * @res_cma: Pointer to store the created cma region.
Joonsoo Kima2541292014-08-06 16:05:25 -0700219 *
220 * This function reserves memory from early allocator. It should be
221 * called by arch specific code once the early allocator (memblock or bootmem)
222 * has been activated and all other subsystems have already allocated/reserved
223 * memory. This function allows to create custom reserved areas.
224 *
225 * If @fixed is true, reserve contiguous area at exactly @base. If false,
226 * reserve in range from @base to @limit.
227 */
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700228int __init cma_declare_contiguous(phys_addr_t base,
229 phys_addr_t size, phys_addr_t limit,
Joonsoo Kima2541292014-08-06 16:05:25 -0700230 phys_addr_t alignment, unsigned int order_per_bit,
Joonsoo Kimc1f733aa2014-08-06 16:05:32 -0700231 bool fixed, struct cma **res_cma)
Joonsoo Kima2541292014-08-06 16:05:25 -0700232{
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700233 phys_addr_t memblock_end = memblock_end_of_DRAM();
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800234 phys_addr_t highmem_start;
Joonsoo Kima2541292014-08-06 16:05:25 -0700235 int ret = 0;
236
Joonsoo Kim6b101e22014-12-10 15:41:12 -0800237#ifdef CONFIG_X86
238 /*
239 * high_memory isn't direct mapped memory so retrieving its physical
240 * address isn't appropriate. But it would be useful to check the
241 * physical address of the highmem boundary so it's justfiable to get
242 * the physical address from it. On x86 there is a validation check for
243 * this case, so the following workaround is needed to avoid it.
244 */
245 highmem_start = __pa_nodebug(high_memory);
246#else
247 highmem_start = __pa(high_memory);
248#endif
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300249 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
250 __func__, &size, &base, &limit, &alignment);
Joonsoo Kima2541292014-08-06 16:05:25 -0700251
252 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
253 pr_err("Not enough slots for CMA reserved regions!\n");
254 return -ENOSPC;
255 }
256
257 if (!size)
258 return -EINVAL;
259
260 if (alignment && !is_power_of_2(alignment))
261 return -EINVAL;
262
263 /*
264 * Sanitise input arguments.
265 * Pages both ends in CMA area could be merged into adjacent unmovable
266 * migratetype page by page allocator's buddy algorithm. In the case,
267 * you couldn't get a contiguous memory, which is not what we want.
268 */
269 alignment = max(alignment,
270 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
271 base = ALIGN(base, alignment);
272 size = ALIGN(size, alignment);
273 limit &= ~(alignment - 1);
274
Laurent Pinchart800a85d2014-10-24 13:18:40 +0300275 if (!base)
276 fixed = false;
277
Joonsoo Kima2541292014-08-06 16:05:25 -0700278 /* size should be aligned with order_per_bit */
279 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
280 return -EINVAL;
281
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700282 /*
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300283 * If allocating at a fixed base the request region must not cross the
284 * low/high memory boundary.
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700285 */
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300286 if (fixed && base < highmem_start && base + size > highmem_start) {
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700287 ret = -EINVAL;
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300288 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
289 &base, &highmem_start);
Marek Szyprowskif7426b92014-10-09 15:26:47 -0700290 goto err;
291 }
292
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300293 /*
294 * If the limit is unspecified or above the memblock end, its effective
295 * value will be the memblock end. Set it explicitly to simplify further
296 * checks.
297 */
298 if (limit == 0 || limit > memblock_end)
299 limit = memblock_end;
300
Joonsoo Kima2541292014-08-06 16:05:25 -0700301 /* Reserve memory */
Laurent Pinchart800a85d2014-10-24 13:18:40 +0300302 if (fixed) {
Joonsoo Kima2541292014-08-06 16:05:25 -0700303 if (memblock_is_region_reserved(base, size) ||
304 memblock_reserve(base, size) < 0) {
305 ret = -EBUSY;
306 goto err;
307 }
308 } else {
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300309 phys_addr_t addr = 0;
310
311 /*
312 * All pages in the reserved area must come from the same zone.
313 * If the requested region crosses the low/high memory boundary,
314 * try allocating from high memory first and fall back to low
315 * memory in case of failure.
316 */
317 if (base < highmem_start && limit > highmem_start) {
318 addr = memblock_alloc_range(size, alignment,
319 highmem_start, limit);
320 limit = highmem_start;
Joonsoo Kima2541292014-08-06 16:05:25 -0700321 }
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300322
323 if (!addr) {
324 addr = memblock_alloc_range(size, alignment, base,
325 limit);
326 if (!addr) {
327 ret = -ENOMEM;
328 goto err;
329 }
330 }
331
Thierry Reding620951e2014-12-12 16:58:31 -0800332 /*
333 * kmemleak scans/reads tracked objects for pointers to other
334 * objects but this address isn't mapped and accessible
335 */
336 kmemleak_ignore(phys_to_virt(addr));
Laurent Pinchart16195dd2014-10-24 13:18:41 +0300337 base = addr;
Joonsoo Kima2541292014-08-06 16:05:25 -0700338 }
339
Marek Szyprowskide9e14e2014-10-13 15:51:09 -0700340 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
341 if (ret)
342 goto err;
Joonsoo Kima2541292014-08-06 16:05:25 -0700343
Laurent Pinchart56fa4f62014-10-24 13:18:42 +0300344 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
345 &base);
Joonsoo Kima2541292014-08-06 16:05:25 -0700346 return 0;
347
348err:
Joonsoo Kim0de9d2e2014-08-06 16:05:34 -0700349 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
Joonsoo Kima2541292014-08-06 16:05:25 -0700350 return ret;
351}
352
353/**
354 * cma_alloc() - allocate pages from contiguous area
355 * @cma: Contiguous memory region for which the allocation is performed.
356 * @count: Requested number of pages.
357 * @align: Requested alignment of pages (in PAGE_SIZE order).
358 *
359 * This function allocates part of contiguous memory on specific
360 * contiguous memory area.
361 */
Sasha Levinac173822015-04-14 15:47:04 -0700362struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
Joonsoo Kima2541292014-08-06 16:05:25 -0700363{
Gregory Fongb5be83e2014-12-12 16:54:48 -0800364 unsigned long mask, offset, pfn, start = 0;
Joonsoo Kima2541292014-08-06 16:05:25 -0700365 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
366 struct page *page = NULL;
367 int ret;
368
369 if (!cma || !cma->count)
370 return NULL;
371
372 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
373 count, align);
374
375 if (!count)
376 return NULL;
377
378 mask = cma_bitmap_aligned_mask(cma, align);
Gregory Fongb5be83e2014-12-12 16:54:48 -0800379 offset = cma_bitmap_aligned_offset(cma, align);
Joonsoo Kima2541292014-08-06 16:05:25 -0700380 bitmap_maxno = cma_bitmap_maxno(cma);
381 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
382
383 for (;;) {
384 mutex_lock(&cma->lock);
Gregory Fongb5be83e2014-12-12 16:54:48 -0800385 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
386 bitmap_maxno, start, bitmap_count, mask,
387 offset);
Joonsoo Kima2541292014-08-06 16:05:25 -0700388 if (bitmap_no >= bitmap_maxno) {
389 mutex_unlock(&cma->lock);
390 break;
391 }
392 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
393 /*
394 * It's safe to drop the lock here. We've marked this region for
395 * our exclusive use. If the migration fails we will take the
396 * lock again and unmark it.
397 */
398 mutex_unlock(&cma->lock);
399
400 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
401 mutex_lock(&cma_mutex);
402 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
403 mutex_unlock(&cma_mutex);
404 if (ret == 0) {
405 page = pfn_to_page(pfn);
406 break;
Joonsoo Kima2541292014-08-06 16:05:25 -0700407 }
Joonsoo Kimb7155e72014-08-06 16:05:30 -0700408
Joonsoo Kima2541292014-08-06 16:05:25 -0700409 cma_clear_bitmap(cma, pfn, count);
Joonsoo Kimb7155e72014-08-06 16:05:30 -0700410 if (ret != -EBUSY)
411 break;
412
Joonsoo Kima2541292014-08-06 16:05:25 -0700413 pr_debug("%s(): memory range at %p is busy, retrying\n",
414 __func__, pfn_to_page(pfn));
415 /* try again with a bit different memory target */
416 start = bitmap_no + mask + 1;
417 }
418
Stefan Strogin99e8ea62015-04-15 16:14:50 -0700419 trace_cma_alloc(page ? pfn : -1UL, page, count, align);
420
Joonsoo Kima2541292014-08-06 16:05:25 -0700421 pr_debug("%s(): returned %p\n", __func__, page);
422 return page;
423}
424
425/**
426 * cma_release() - release allocated pages
427 * @cma: Contiguous memory region for which the allocation is performed.
428 * @pages: Allocated pages.
429 * @count: Number of allocated pages.
430 *
431 * This function releases memory allocated by alloc_cma().
432 * It returns false when provided pages do not belong to contiguous area and
433 * true otherwise.
434 */
Sasha Levinac173822015-04-14 15:47:04 -0700435bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
Joonsoo Kima2541292014-08-06 16:05:25 -0700436{
437 unsigned long pfn;
438
439 if (!cma || !pages)
440 return false;
441
442 pr_debug("%s(page %p)\n", __func__, (void *)pages);
443
444 pfn = page_to_pfn(pages);
445
446 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
447 return false;
448
449 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
450
451 free_contig_range(pfn, count);
452 cma_clear_bitmap(cma, pfn, count);
Stefan Strogin99e8ea62015-04-15 16:14:50 -0700453 trace_cma_release(pfn, pages, count);
Joonsoo Kima2541292014-08-06 16:05:25 -0700454
455 return true;
456}