Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Contiguous Memory Allocator |
| 3 | * |
| 4 | * Copyright (c) 2010-2011 by Samsung Electronics. |
| 5 | * Copyright IBM Corporation, 2013 |
| 6 | * Copyright LG Electronics Inc., 2014 |
| 7 | * Written by: |
| 8 | * Marek Szyprowski <m.szyprowski@samsung.com> |
| 9 | * Michal Nazarewicz <mina86@mina86.com> |
| 10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
| 11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
| 12 | * |
| 13 | * This program is free software; you can redistribute it and/or |
| 14 | * modify it under the terms of the GNU General Public License as |
| 15 | * published by the Free Software Foundation; either version 2 of the |
| 16 | * License or (at your optional) any later version of the license. |
| 17 | */ |
| 18 | |
| 19 | #define pr_fmt(fmt) "cma: " fmt |
| 20 | |
| 21 | #ifdef CONFIG_CMA_DEBUG |
| 22 | #ifndef DEBUG |
| 23 | # define DEBUG |
| 24 | #endif |
| 25 | #endif |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 26 | #define CREATE_TRACE_POINTS |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 27 | |
| 28 | #include <linux/memblock.h> |
| 29 | #include <linux/err.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/mutex.h> |
| 32 | #include <linux/sizes.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <linux/log2.h> |
| 35 | #include <linux/cma.h> |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 36 | #include <linux/highmem.h> |
Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 37 | #include <linux/io.h> |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 38 | #include <trace/events/cma.h> |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 39 | |
Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 40 | #include "cma.h" |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 41 | |
Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 42 | struct cma cma_areas[MAX_CMA_AREAS]; |
| 43 | unsigned cma_area_count; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 44 | static DEFINE_MUTEX(cma_mutex); |
| 45 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 46 | phys_addr_t cma_get_base(const struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 47 | { |
| 48 | return PFN_PHYS(cma->base_pfn); |
| 49 | } |
| 50 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 51 | unsigned long cma_get_size(const struct cma *cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 52 | { |
| 53 | return cma->count << PAGE_SHIFT; |
| 54 | } |
| 55 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 56 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
| 57 | int align_order) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 58 | { |
Weijie Yang | 68faed6 | 2014-10-13 15:51:03 -0700 | [diff] [blame] | 59 | if (align_order <= cma->order_per_bit) |
| 60 | return 0; |
| 61 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 62 | } |
| 63 | |
Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 64 | /* |
| 65 | * Find a PFN aligned to the specified order and return an offset represented in |
| 66 | * order_per_bits. |
| 67 | */ |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 68 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
| 69 | int align_order) |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 70 | { |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 71 | if (align_order <= cma->order_per_bit) |
| 72 | return 0; |
Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 73 | |
| 74 | return (ALIGN(cma->base_pfn, (1UL << align_order)) |
| 75 | - cma->base_pfn) >> cma->order_per_bit; |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 76 | } |
| 77 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 78 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
| 79 | unsigned long pages) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 80 | { |
| 81 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
| 82 | } |
| 83 | |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 84 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
| 85 | unsigned int count) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 86 | { |
| 87 | unsigned long bitmap_no, bitmap_count; |
| 88 | |
| 89 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
| 90 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
| 91 | |
| 92 | mutex_lock(&cma->lock); |
| 93 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
| 94 | mutex_unlock(&cma->lock); |
| 95 | } |
| 96 | |
| 97 | static int __init cma_activate_area(struct cma *cma) |
| 98 | { |
| 99 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); |
| 100 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; |
| 101 | unsigned i = cma->count >> pageblock_order; |
| 102 | struct zone *zone; |
| 103 | |
| 104 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
| 105 | |
| 106 | if (!cma->bitmap) |
| 107 | return -ENOMEM; |
| 108 | |
| 109 | WARN_ON_ONCE(!pfn_valid(pfn)); |
| 110 | zone = page_zone(pfn_to_page(pfn)); |
| 111 | |
| 112 | do { |
| 113 | unsigned j; |
| 114 | |
| 115 | base_pfn = pfn; |
| 116 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
| 117 | WARN_ON_ONCE(!pfn_valid(pfn)); |
| 118 | /* |
| 119 | * alloc_contig_range requires the pfn range |
| 120 | * specified to be in the same zone. Make this |
| 121 | * simple by forcing the entire CMA resv range |
| 122 | * to be in the same zone. |
| 123 | */ |
| 124 | if (page_zone(pfn_to_page(pfn)) != zone) |
| 125 | goto err; |
| 126 | } |
| 127 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
| 128 | } while (--i); |
| 129 | |
| 130 | mutex_init(&cma->lock); |
Sasha Levin | 26b02a1 | 2015-04-14 15:44:59 -0700 | [diff] [blame] | 131 | |
| 132 | #ifdef CONFIG_CMA_DEBUGFS |
| 133 | INIT_HLIST_HEAD(&cma->mem_head); |
| 134 | spin_lock_init(&cma->mem_head_lock); |
| 135 | #endif |
| 136 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 137 | return 0; |
| 138 | |
| 139 | err: |
| 140 | kfree(cma->bitmap); |
Laurent Pinchart | f022d8c | 2014-10-24 13:18:39 +0300 | [diff] [blame] | 141 | cma->count = 0; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 142 | return -EINVAL; |
| 143 | } |
| 144 | |
| 145 | static int __init cma_init_reserved_areas(void) |
| 146 | { |
| 147 | int i; |
| 148 | |
| 149 | for (i = 0; i < cma_area_count; i++) { |
| 150 | int ret = cma_activate_area(&cma_areas[i]); |
| 151 | |
| 152 | if (ret) |
| 153 | return ret; |
| 154 | } |
| 155 | |
| 156 | return 0; |
| 157 | } |
| 158 | core_initcall(cma_init_reserved_areas); |
| 159 | |
| 160 | /** |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 161 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
| 162 | * @base: Base address of the reserved area |
| 163 | * @size: Size of the reserved area (in bytes), |
| 164 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
| 165 | * @res_cma: Pointer to store the created cma region. |
| 166 | * |
| 167 | * This function creates custom contiguous area from already reserved memory. |
| 168 | */ |
| 169 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 170 | unsigned int order_per_bit, |
| 171 | struct cma **res_cma) |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 172 | { |
| 173 | struct cma *cma; |
| 174 | phys_addr_t alignment; |
| 175 | |
| 176 | /* Sanity checks */ |
| 177 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
| 178 | pr_err("Not enough slots for CMA reserved regions!\n"); |
| 179 | return -ENOSPC; |
| 180 | } |
| 181 | |
| 182 | if (!size || !memblock_is_region_reserved(base, size)) |
| 183 | return -EINVAL; |
| 184 | |
Shailendra Verma | 0f96ae2 | 2015-06-24 16:58:03 -0700 | [diff] [blame] | 185 | /* ensure minimal alignment required by mm core */ |
Stephen Rothwell | badbda5 | 2016-05-27 14:27:41 -0700 | [diff] [blame] | 186 | alignment = PAGE_SIZE << |
| 187 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 188 | |
| 189 | /* alignment should be aligned with order_per_bit */ |
| 190 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) |
| 191 | return -EINVAL; |
| 192 | |
| 193 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) |
| 194 | return -EINVAL; |
| 195 | |
| 196 | /* |
| 197 | * Each reserved area must be initialised later, when more kernel |
| 198 | * subsystems (like slab allocator) are available. |
| 199 | */ |
| 200 | cma = &cma_areas[cma_area_count]; |
| 201 | cma->base_pfn = PFN_DOWN(base); |
| 202 | cma->count = size >> PAGE_SHIFT; |
| 203 | cma->order_per_bit = order_per_bit; |
| 204 | *res_cma = cma; |
| 205 | cma_area_count++; |
George G. Davis | 94737a8 | 2015-02-11 15:26:27 -0800 | [diff] [blame] | 206 | totalcma_pages += (size / PAGE_SIZE); |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 207 | |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | /** |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 212 | * cma_declare_contiguous() - reserve custom contiguous area |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 213 | * @base: Base address of the reserved area optional, use 0 for any |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 214 | * @size: Size of the reserved area (in bytes), |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 215 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 216 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
| 217 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 218 | * @fixed: hint about where to place the reserved area |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 219 | * @res_cma: Pointer to store the created cma region. |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 220 | * |
| 221 | * This function reserves memory from early allocator. It should be |
| 222 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 223 | * has been activated and all other subsystems have already allocated/reserved |
| 224 | * memory. This function allows to create custom reserved areas. |
| 225 | * |
| 226 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
| 227 | * reserve in range from @base to @limit. |
| 228 | */ |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 229 | int __init cma_declare_contiguous(phys_addr_t base, |
| 230 | phys_addr_t size, phys_addr_t limit, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 231 | phys_addr_t alignment, unsigned int order_per_bit, |
Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 232 | bool fixed, struct cma **res_cma) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 233 | { |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 234 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 235 | phys_addr_t highmem_start; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 236 | int ret = 0; |
| 237 | |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 238 | #ifdef CONFIG_X86 |
| 239 | /* |
| 240 | * high_memory isn't direct mapped memory so retrieving its physical |
| 241 | * address isn't appropriate. But it would be useful to check the |
Shailendra Verma | 0f96ae2 | 2015-06-24 16:58:03 -0700 | [diff] [blame] | 242 | * physical address of the highmem boundary so it's justifiable to get |
Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 243 | * the physical address from it. On x86 there is a validation check for |
| 244 | * this case, so the following workaround is needed to avoid it. |
| 245 | */ |
| 246 | highmem_start = __pa_nodebug(high_memory); |
| 247 | #else |
| 248 | highmem_start = __pa(high_memory); |
| 249 | #endif |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 250 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
| 251 | __func__, &size, &base, &limit, &alignment); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 252 | |
| 253 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
| 254 | pr_err("Not enough slots for CMA reserved regions!\n"); |
| 255 | return -ENOSPC; |
| 256 | } |
| 257 | |
| 258 | if (!size) |
| 259 | return -EINVAL; |
| 260 | |
| 261 | if (alignment && !is_power_of_2(alignment)) |
| 262 | return -EINVAL; |
| 263 | |
| 264 | /* |
| 265 | * Sanitise input arguments. |
| 266 | * Pages both ends in CMA area could be merged into adjacent unmovable |
| 267 | * migratetype page by page allocator's buddy algorithm. In the case, |
| 268 | * you couldn't get a contiguous memory, which is not what we want. |
| 269 | */ |
Stephen Rothwell | badbda5 | 2016-05-27 14:27:41 -0700 | [diff] [blame] | 270 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << |
| 271 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 272 | base = ALIGN(base, alignment); |
| 273 | size = ALIGN(size, alignment); |
| 274 | limit &= ~(alignment - 1); |
| 275 | |
Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 276 | if (!base) |
| 277 | fixed = false; |
| 278 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 279 | /* size should be aligned with order_per_bit */ |
| 280 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
| 281 | return -EINVAL; |
| 282 | |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 283 | /* |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 284 | * If allocating at a fixed base the request region must not cross the |
| 285 | * low/high memory boundary. |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 286 | */ |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 287 | if (fixed && base < highmem_start && base + size > highmem_start) { |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 288 | ret = -EINVAL; |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 289 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
| 290 | &base, &highmem_start); |
Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 291 | goto err; |
| 292 | } |
| 293 | |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 294 | /* |
| 295 | * If the limit is unspecified or above the memblock end, its effective |
| 296 | * value will be the memblock end. Set it explicitly to simplify further |
| 297 | * checks. |
| 298 | */ |
| 299 | if (limit == 0 || limit > memblock_end) |
| 300 | limit = memblock_end; |
| 301 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 302 | /* Reserve memory */ |
Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 303 | if (fixed) { |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 304 | if (memblock_is_region_reserved(base, size) || |
| 305 | memblock_reserve(base, size) < 0) { |
| 306 | ret = -EBUSY; |
| 307 | goto err; |
| 308 | } |
| 309 | } else { |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 310 | phys_addr_t addr = 0; |
| 311 | |
| 312 | /* |
| 313 | * All pages in the reserved area must come from the same zone. |
| 314 | * If the requested region crosses the low/high memory boundary, |
| 315 | * try allocating from high memory first and fall back to low |
| 316 | * memory in case of failure. |
| 317 | */ |
| 318 | if (base < highmem_start && limit > highmem_start) { |
| 319 | addr = memblock_alloc_range(size, alignment, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 320 | highmem_start, limit, |
| 321 | MEMBLOCK_NONE); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 322 | limit = highmem_start; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 323 | } |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 324 | |
| 325 | if (!addr) { |
| 326 | addr = memblock_alloc_range(size, alignment, base, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 327 | limit, |
| 328 | MEMBLOCK_NONE); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 329 | if (!addr) { |
| 330 | ret = -ENOMEM; |
| 331 | goto err; |
| 332 | } |
| 333 | } |
| 334 | |
Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 335 | /* |
| 336 | * kmemleak scans/reads tracked objects for pointers to other |
| 337 | * objects but this address isn't mapped and accessible |
| 338 | */ |
| 339 | kmemleak_ignore(phys_to_virt(addr)); |
Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 340 | base = addr; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 341 | } |
| 342 | |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 343 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); |
| 344 | if (ret) |
| 345 | goto err; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 346 | |
Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 347 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
| 348 | &base); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 349 | return 0; |
| 350 | |
| 351 | err: |
Joonsoo Kim | 0de9d2e | 2014-08-06 16:05:34 -0700 | [diff] [blame] | 352 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 353 | return ret; |
| 354 | } |
| 355 | |
| 356 | /** |
| 357 | * cma_alloc() - allocate pages from contiguous area |
| 358 | * @cma: Contiguous memory region for which the allocation is performed. |
| 359 | * @count: Requested number of pages. |
| 360 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
| 361 | * |
| 362 | * This function allocates part of contiguous memory on specific |
| 363 | * contiguous memory area. |
| 364 | */ |
Rohit Vaswani | 67a2e213 | 2015-10-22 13:32:11 -0700 | [diff] [blame] | 365 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 366 | { |
Andrew Morton | 3acaea6 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 367 | unsigned long mask, offset; |
| 368 | unsigned long pfn = -1; |
| 369 | unsigned long start = 0; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 370 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
| 371 | struct page *page = NULL; |
| 372 | int ret; |
| 373 | |
| 374 | if (!cma || !cma->count) |
| 375 | return NULL; |
| 376 | |
Rohit Vaswani | 67a2e213 | 2015-10-22 13:32:11 -0700 | [diff] [blame] | 377 | pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 378 | count, align); |
| 379 | |
| 380 | if (!count) |
| 381 | return NULL; |
| 382 | |
| 383 | mask = cma_bitmap_aligned_mask(cma, align); |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 384 | offset = cma_bitmap_aligned_offset(cma, align); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 385 | bitmap_maxno = cma_bitmap_maxno(cma); |
| 386 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
| 387 | |
| 388 | for (;;) { |
| 389 | mutex_lock(&cma->lock); |
Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 390 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
| 391 | bitmap_maxno, start, bitmap_count, mask, |
| 392 | offset); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 393 | if (bitmap_no >= bitmap_maxno) { |
| 394 | mutex_unlock(&cma->lock); |
| 395 | break; |
| 396 | } |
| 397 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); |
| 398 | /* |
| 399 | * It's safe to drop the lock here. We've marked this region for |
| 400 | * our exclusive use. If the migration fails we will take the |
| 401 | * lock again and unmark it. |
| 402 | */ |
| 403 | mutex_unlock(&cma->lock); |
| 404 | |
| 405 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
| 406 | mutex_lock(&cma_mutex); |
| 407 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); |
| 408 | mutex_unlock(&cma_mutex); |
| 409 | if (ret == 0) { |
| 410 | page = pfn_to_page(pfn); |
| 411 | break; |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 412 | } |
Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 413 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 414 | cma_clear_bitmap(cma, pfn, count); |
Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 415 | if (ret != -EBUSY) |
| 416 | break; |
| 417 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 418 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
| 419 | __func__, pfn_to_page(pfn)); |
| 420 | /* try again with a bit different memory target */ |
| 421 | start = bitmap_no + mask + 1; |
| 422 | } |
| 423 | |
Andrew Morton | 3acaea6 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 424 | trace_cma_alloc(pfn, page, count, align); |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 425 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 426 | pr_debug("%s(): returned %p\n", __func__, page); |
| 427 | return page; |
| 428 | } |
| 429 | |
| 430 | /** |
| 431 | * cma_release() - release allocated pages |
| 432 | * @cma: Contiguous memory region for which the allocation is performed. |
| 433 | * @pages: Allocated pages. |
| 434 | * @count: Number of allocated pages. |
| 435 | * |
| 436 | * This function releases memory allocated by alloc_cma(). |
| 437 | * It returns false when provided pages do not belong to contiguous area and |
| 438 | * true otherwise. |
| 439 | */ |
Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 440 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 441 | { |
| 442 | unsigned long pfn; |
| 443 | |
| 444 | if (!cma || !pages) |
| 445 | return false; |
| 446 | |
| 447 | pr_debug("%s(page %p)\n", __func__, (void *)pages); |
| 448 | |
| 449 | pfn = page_to_pfn(pages); |
| 450 | |
| 451 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) |
| 452 | return false; |
| 453 | |
| 454 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
| 455 | |
| 456 | free_contig_range(pfn, count); |
| 457 | cma_clear_bitmap(cma, pfn, count); |
Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 458 | trace_cma_release(pfn, pages, count); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 459 | |
| 460 | return true; |
| 461 | } |