| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Contiguous Memory Allocator | 
|  | 3 | * | 
|  | 4 | * Copyright (c) 2010-2011 by Samsung Electronics. | 
|  | 5 | * Copyright IBM Corporation, 2013 | 
|  | 6 | * Copyright LG Electronics Inc., 2014 | 
|  | 7 | * Written by: | 
|  | 8 | *	Marek Szyprowski <m.szyprowski@samsung.com> | 
|  | 9 | *	Michal Nazarewicz <mina86@mina86.com> | 
|  | 10 | *	Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 
|  | 11 | *	Joonsoo Kim <iamjoonsoo.kim@lge.com> | 
|  | 12 | * | 
|  | 13 | * This program is free software; you can redistribute it and/or | 
|  | 14 | * modify it under the terms of the GNU General Public License as | 
|  | 15 | * published by the Free Software Foundation; either version 2 of the | 
|  | 16 | * License or (at your optional) any later version of the license. | 
|  | 17 | */ | 
|  | 18 |  | 
|  | 19 | #define pr_fmt(fmt) "cma: " fmt | 
|  | 20 |  | 
|  | 21 | #ifdef CONFIG_CMA_DEBUG | 
|  | 22 | #ifndef DEBUG | 
|  | 23 | #  define DEBUG | 
|  | 24 | #endif | 
|  | 25 | #endif | 
| Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 26 | #define CREATE_TRACE_POINTS | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 27 |  | 
|  | 28 | #include <linux/memblock.h> | 
|  | 29 | #include <linux/err.h> | 
|  | 30 | #include <linux/mm.h> | 
|  | 31 | #include <linux/mutex.h> | 
|  | 32 | #include <linux/sizes.h> | 
|  | 33 | #include <linux/slab.h> | 
|  | 34 | #include <linux/log2.h> | 
|  | 35 | #include <linux/cma.h> | 
| Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 36 | #include <linux/highmem.h> | 
| Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 37 | #include <linux/io.h> | 
| Randy Dunlap | 514c603 | 2018-04-05 16:25:34 -0700 | [diff] [blame] | 38 | #include <linux/kmemleak.h> | 
| Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 39 | #include <trace/events/cma.h> | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 40 |  | 
| Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 41 | #include "cma.h" | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 42 |  | 
| Sasha Levin | 28b24c1 | 2015-04-14 15:44:57 -0700 | [diff] [blame] | 43 | struct cma cma_areas[MAX_CMA_AREAS]; | 
|  | 44 | unsigned cma_area_count; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 45 | static DEFINE_MUTEX(cma_mutex); | 
|  | 46 |  | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 47 | phys_addr_t cma_get_base(const struct cma *cma) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 48 | { | 
|  | 49 | return PFN_PHYS(cma->base_pfn); | 
|  | 50 | } | 
|  | 51 |  | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 52 | unsigned long cma_get_size(const struct cma *cma) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 53 | { | 
|  | 54 | return cma->count << PAGE_SHIFT; | 
|  | 55 | } | 
|  | 56 |  | 
| Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 57 | const char *cma_get_name(const struct cma *cma) | 
|  | 58 | { | 
|  | 59 | return cma->name ? cma->name : "(undefined)"; | 
|  | 60 | } | 
|  | 61 |  | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 62 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, | 
| Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 63 | unsigned int align_order) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 64 | { | 
| Weijie Yang | 68faed6 | 2014-10-13 15:51:03 -0700 | [diff] [blame] | 65 | if (align_order <= cma->order_per_bit) | 
|  | 66 | return 0; | 
|  | 67 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 68 | } | 
|  | 69 |  | 
| Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 70 | /* | 
| Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 71 | * Find the offset of the base PFN from the specified align_order. | 
|  | 72 | * The value returned is represented in order_per_bits. | 
| Danesh Petigara | 850fc43 | 2015-03-12 16:25:57 -0700 | [diff] [blame] | 73 | */ | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 74 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, | 
| Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 75 | unsigned int align_order) | 
| Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 76 | { | 
| Doug Berger | e048cb3 | 2017-07-10 15:49:44 -0700 | [diff] [blame] | 77 | return (cma->base_pfn & ((1UL << align_order) - 1)) | 
|  | 78 | >> cma->order_per_bit; | 
| Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 81 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, | 
|  | 82 | unsigned long pages) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 83 | { | 
|  | 84 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | 
|  | 85 | } | 
|  | 86 |  | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 87 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, | 
|  | 88 | unsigned int count) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 89 | { | 
|  | 90 | unsigned long bitmap_no, bitmap_count; | 
|  | 91 |  | 
|  | 92 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; | 
|  | 93 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 
|  | 94 |  | 
|  | 95 | mutex_lock(&cma->lock); | 
|  | 96 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); | 
|  | 97 | mutex_unlock(&cma->lock); | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | static int __init cma_activate_area(struct cma *cma) | 
|  | 101 | { | 
|  | 102 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); | 
|  | 103 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; | 
|  | 104 | unsigned i = cma->count >> pageblock_order; | 
|  | 105 | struct zone *zone; | 
|  | 106 |  | 
|  | 107 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 
|  | 108 |  | 
|  | 109 | if (!cma->bitmap) | 
|  | 110 | return -ENOMEM; | 
|  | 111 |  | 
| Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 112 | WARN_ON_ONCE(!pfn_valid(pfn)); | 
|  | 113 | zone = page_zone(pfn_to_page(pfn)); | 
|  | 114 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 115 | do { | 
|  | 116 | unsigned j; | 
|  | 117 |  | 
|  | 118 | base_pfn = pfn; | 
|  | 119 | for (j = pageblock_nr_pages; j; --j, pfn++) { | 
| Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 120 | WARN_ON_ONCE(!pfn_valid(pfn)); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 121 | /* | 
| Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 122 | * alloc_contig_range requires the pfn range | 
|  | 123 | * specified to be in the same zone. Make this | 
|  | 124 | * simple by forcing the entire CMA resv range | 
|  | 125 | * to be in the same zone. | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 126 | */ | 
|  | 127 | if (page_zone(pfn_to_page(pfn)) != zone) | 
| Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 128 | goto not_in_zone; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 129 | } | 
|  | 130 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | 
|  | 131 | } while (--i); | 
|  | 132 |  | 
|  | 133 | mutex_init(&cma->lock); | 
| Sasha Levin | 26b02a1 | 2015-04-14 15:44:59 -0700 | [diff] [blame] | 134 |  | 
|  | 135 | #ifdef CONFIG_CMA_DEBUGFS | 
|  | 136 | INIT_HLIST_HEAD(&cma->mem_head); | 
|  | 137 | spin_lock_init(&cma->mem_head_lock); | 
|  | 138 | #endif | 
|  | 139 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 140 | return 0; | 
|  | 141 |  | 
| Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 142 | not_in_zone: | 
| Anshuman Khandual | e35ef63 | 2017-07-10 15:48:12 -0700 | [diff] [blame] | 143 | pr_err("CMA area %s could not be activated\n", cma->name); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 144 | kfree(cma->bitmap); | 
| Laurent Pinchart | f022d8c | 2014-10-24 13:18:39 +0300 | [diff] [blame] | 145 | cma->count = 0; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 146 | return -EINVAL; | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | static int __init cma_init_reserved_areas(void) | 
|  | 150 | { | 
|  | 151 | int i; | 
|  | 152 |  | 
|  | 153 | for (i = 0; i < cma_area_count; i++) { | 
|  | 154 | int ret = cma_activate_area(&cma_areas[i]); | 
|  | 155 |  | 
|  | 156 | if (ret) | 
|  | 157 | return ret; | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | return 0; | 
|  | 161 | } | 
| Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 162 | core_initcall(cma_init_reserved_areas); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 163 |  | 
|  | 164 | /** | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 165 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory | 
|  | 166 | * @base: Base address of the reserved area | 
|  | 167 | * @size: Size of the reserved area (in bytes), | 
|  | 168 | * @order_per_bit: Order of pages represented by one bit on bitmap. | 
| Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 169 | * @name: The name of the area. If this parameter is NULL, the name of | 
|  | 170 | *        the area will be set to "cmaN", where N is a running counter of | 
|  | 171 | *        used areas. | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 172 | * @res_cma: Pointer to store the created cma region. | 
|  | 173 | * | 
|  | 174 | * This function creates custom contiguous area from already reserved memory. | 
|  | 175 | */ | 
|  | 176 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 177 | unsigned int order_per_bit, | 
| Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 178 | const char *name, | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 179 | struct cma **res_cma) | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 180 | { | 
|  | 181 | struct cma *cma; | 
|  | 182 | phys_addr_t alignment; | 
|  | 183 |  | 
|  | 184 | /* Sanity checks */ | 
|  | 185 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 
|  | 186 | pr_err("Not enough slots for CMA reserved regions!\n"); | 
|  | 187 | return -ENOSPC; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | if (!size || !memblock_is_region_reserved(base, size)) | 
|  | 191 | return -EINVAL; | 
|  | 192 |  | 
| Shailendra Verma | 0f96ae2 | 2015-06-24 16:58:03 -0700 | [diff] [blame] | 193 | /* ensure minimal alignment required by mm core */ | 
| Stephen Rothwell | badbda5 | 2016-05-27 14:27:41 -0700 | [diff] [blame] | 194 | alignment = PAGE_SIZE << | 
|  | 195 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 196 |  | 
|  | 197 | /* alignment should be aligned with order_per_bit */ | 
|  | 198 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) | 
|  | 199 | return -EINVAL; | 
|  | 200 |  | 
|  | 201 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) | 
|  | 202 | return -EINVAL; | 
|  | 203 |  | 
|  | 204 | /* | 
|  | 205 | * Each reserved area must be initialised later, when more kernel | 
|  | 206 | * subsystems (like slab allocator) are available. | 
|  | 207 | */ | 
|  | 208 | cma = &cma_areas[cma_area_count]; | 
| Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 209 | if (name) { | 
|  | 210 | cma->name = name; | 
|  | 211 | } else { | 
|  | 212 | cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count); | 
|  | 213 | if (!cma->name) | 
|  | 214 | return -ENOMEM; | 
|  | 215 | } | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 216 | cma->base_pfn = PFN_DOWN(base); | 
|  | 217 | cma->count = size >> PAGE_SHIFT; | 
|  | 218 | cma->order_per_bit = order_per_bit; | 
|  | 219 | *res_cma = cma; | 
|  | 220 | cma_area_count++; | 
| George G. Davis | 94737a8 | 2015-02-11 15:26:27 -0800 | [diff] [blame] | 221 | totalcma_pages += (size / PAGE_SIZE); | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 222 |  | 
|  | 223 | return 0; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | /** | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 227 | * cma_declare_contiguous() - reserve custom contiguous area | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 228 | * @base: Base address of the reserved area optional, use 0 for any | 
| Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 229 | * @size: Size of the reserved area (in bytes), | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 230 | * @limit: End address of the reserved memory (optional, 0 for any). | 
|  | 231 | * @alignment: Alignment for the CMA area, should be power of 2 or zero | 
|  | 232 | * @order_per_bit: Order of pages represented by one bit on bitmap. | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 233 | * @fixed: hint about where to place the reserved area | 
| Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 234 | * @name: The name of the area. See function cma_init_reserved_mem() | 
| Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 235 | * @res_cma: Pointer to store the created cma region. | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 236 | * | 
|  | 237 | * This function reserves memory from early allocator. It should be | 
|  | 238 | * called by arch specific code once the early allocator (memblock or bootmem) | 
|  | 239 | * has been activated and all other subsystems have already allocated/reserved | 
|  | 240 | * memory. This function allows to create custom reserved areas. | 
|  | 241 | * | 
|  | 242 | * If @fixed is true, reserve contiguous area at exactly @base.  If false, | 
|  | 243 | * reserve in range from @base to @limit. | 
|  | 244 | */ | 
| Joonsoo Kim | c1f733aa | 2014-08-06 16:05:32 -0700 | [diff] [blame] | 245 | int __init cma_declare_contiguous(phys_addr_t base, | 
|  | 246 | phys_addr_t size, phys_addr_t limit, | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 247 | phys_addr_t alignment, unsigned int order_per_bit, | 
| Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 248 | bool fixed, const char *name, struct cma **res_cma) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 249 | { | 
| Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 250 | phys_addr_t memblock_end = memblock_end_of_DRAM(); | 
| Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 251 | phys_addr_t highmem_start; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 252 | int ret = 0; | 
|  | 253 |  | 
| Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 254 | /* | 
| Laura Abbott | 2dece44 | 2017-01-10 13:35:41 -0800 | [diff] [blame] | 255 | * We can't use __pa(high_memory) directly, since high_memory | 
|  | 256 | * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) | 
|  | 257 | * complain. Find the boundary by adding one to the last valid | 
|  | 258 | * address. | 
| Joonsoo Kim | 6b101e2 | 2014-12-10 15:41:12 -0800 | [diff] [blame] | 259 | */ | 
| Laura Abbott | 2dece44 | 2017-01-10 13:35:41 -0800 | [diff] [blame] | 260 | highmem_start = __pa(high_memory - 1) + 1; | 
| Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 261 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", | 
|  | 262 | __func__, &size, &base, &limit, &alignment); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 263 |  | 
|  | 264 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | 
|  | 265 | pr_err("Not enough slots for CMA reserved regions!\n"); | 
|  | 266 | return -ENOSPC; | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | if (!size) | 
|  | 270 | return -EINVAL; | 
|  | 271 |  | 
|  | 272 | if (alignment && !is_power_of_2(alignment)) | 
|  | 273 | return -EINVAL; | 
|  | 274 |  | 
|  | 275 | /* | 
|  | 276 | * Sanitise input arguments. | 
|  | 277 | * Pages both ends in CMA area could be merged into adjacent unmovable | 
|  | 278 | * migratetype page by page allocator's buddy algorithm. In the case, | 
|  | 279 | * you couldn't get a contiguous memory, which is not what we want. | 
|  | 280 | */ | 
| Stephen Rothwell | badbda5 | 2016-05-27 14:27:41 -0700 | [diff] [blame] | 281 | alignment = max(alignment,  (phys_addr_t)PAGE_SIZE << | 
|  | 282 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 283 | base = ALIGN(base, alignment); | 
|  | 284 | size = ALIGN(size, alignment); | 
|  | 285 | limit &= ~(alignment - 1); | 
|  | 286 |  | 
| Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 287 | if (!base) | 
|  | 288 | fixed = false; | 
|  | 289 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 290 | /* size should be aligned with order_per_bit */ | 
|  | 291 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | 
|  | 292 | return -EINVAL; | 
|  | 293 |  | 
| Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 294 | /* | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 295 | * If allocating at a fixed base the request region must not cross the | 
|  | 296 | * low/high memory boundary. | 
| Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 297 | */ | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 298 | if (fixed && base < highmem_start && base + size > highmem_start) { | 
| Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 299 | ret = -EINVAL; | 
| Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 300 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", | 
|  | 301 | &base, &highmem_start); | 
| Marek Szyprowski | f7426b9 | 2014-10-09 15:26:47 -0700 | [diff] [blame] | 302 | goto err; | 
|  | 303 | } | 
|  | 304 |  | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 305 | /* | 
|  | 306 | * If the limit is unspecified or above the memblock end, its effective | 
|  | 307 | * value will be the memblock end. Set it explicitly to simplify further | 
|  | 308 | * checks. | 
|  | 309 | */ | 
|  | 310 | if (limit == 0 || limit > memblock_end) | 
|  | 311 | limit = memblock_end; | 
|  | 312 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 313 | /* Reserve memory */ | 
| Laurent Pinchart | 800a85d | 2014-10-24 13:18:40 +0300 | [diff] [blame] | 314 | if (fixed) { | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 315 | if (memblock_is_region_reserved(base, size) || | 
|  | 316 | memblock_reserve(base, size) < 0) { | 
|  | 317 | ret = -EBUSY; | 
|  | 318 | goto err; | 
|  | 319 | } | 
|  | 320 | } else { | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 321 | phys_addr_t addr = 0; | 
|  | 322 |  | 
|  | 323 | /* | 
|  | 324 | * All pages in the reserved area must come from the same zone. | 
|  | 325 | * If the requested region crosses the low/high memory boundary, | 
|  | 326 | * try allocating from high memory first and fall back to low | 
|  | 327 | * memory in case of failure. | 
|  | 328 | */ | 
|  | 329 | if (base < highmem_start && limit > highmem_start) { | 
|  | 330 | addr = memblock_alloc_range(size, alignment, | 
| Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 331 | highmem_start, limit, | 
|  | 332 | MEMBLOCK_NONE); | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 333 | limit = highmem_start; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 334 | } | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 335 |  | 
|  | 336 | if (!addr) { | 
|  | 337 | addr = memblock_alloc_range(size, alignment, base, | 
| Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 338 | limit, | 
|  | 339 | MEMBLOCK_NONE); | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 340 | if (!addr) { | 
|  | 341 | ret = -ENOMEM; | 
|  | 342 | goto err; | 
|  | 343 | } | 
|  | 344 | } | 
|  | 345 |  | 
| Thierry Reding | 620951e | 2014-12-12 16:58:31 -0800 | [diff] [blame] | 346 | /* | 
|  | 347 | * kmemleak scans/reads tracked objects for pointers to other | 
|  | 348 | * objects but this address isn't mapped and accessible | 
|  | 349 | */ | 
| Catalin Marinas | 9099dae | 2016-10-11 13:55:11 -0700 | [diff] [blame] | 350 | kmemleak_ignore_phys(addr); | 
| Laurent Pinchart | 16195dd | 2014-10-24 13:18:41 +0300 | [diff] [blame] | 351 | base = addr; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 352 | } | 
|  | 353 |  | 
| Laura Abbott | f318dd0 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 354 | ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); | 
| Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 355 | if (ret) | 
|  | 356 | goto err; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 357 |  | 
| Laurent Pinchart | 56fa4f6 | 2014-10-24 13:18:42 +0300 | [diff] [blame] | 358 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, | 
|  | 359 | &base); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 360 | return 0; | 
|  | 361 |  | 
|  | 362 | err: | 
| Joonsoo Kim | 0de9d2e | 2014-08-06 16:05:34 -0700 | [diff] [blame] | 363 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 364 | return ret; | 
|  | 365 | } | 
|  | 366 |  | 
| Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 367 | #ifdef CONFIG_CMA_DEBUG | 
|  | 368 | static void cma_debug_show_areas(struct cma *cma) | 
|  | 369 | { | 
|  | 370 | unsigned long next_zero_bit, next_set_bit; | 
|  | 371 | unsigned long start = 0; | 
|  | 372 | unsigned int nr_zero, nr_total = 0; | 
|  | 373 |  | 
|  | 374 | mutex_lock(&cma->lock); | 
|  | 375 | pr_info("number of available pages: "); | 
|  | 376 | for (;;) { | 
|  | 377 | next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); | 
|  | 378 | if (next_zero_bit >= cma->count) | 
|  | 379 | break; | 
|  | 380 | next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); | 
|  | 381 | nr_zero = next_set_bit - next_zero_bit; | 
|  | 382 | pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); | 
|  | 383 | nr_total += nr_zero; | 
|  | 384 | start = next_zero_bit + nr_zero; | 
|  | 385 | } | 
|  | 386 | pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); | 
|  | 387 | mutex_unlock(&cma->lock); | 
|  | 388 | } | 
|  | 389 | #else | 
|  | 390 | static inline void cma_debug_show_areas(struct cma *cma) { } | 
|  | 391 | #endif | 
|  | 392 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 393 | /** | 
|  | 394 | * cma_alloc() - allocate pages from contiguous area | 
|  | 395 | * @cma:   Contiguous memory region for which the allocation is performed. | 
|  | 396 | * @count: Requested number of pages. | 
|  | 397 | * @align: Requested alignment of pages (in PAGE_SIZE order). | 
| Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 398 | * @gfp_mask:  GFP mask to use during compaction | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 399 | * | 
|  | 400 | * This function allocates part of contiguous memory on specific | 
|  | 401 | * contiguous memory area. | 
|  | 402 | */ | 
| Lucas Stach | e2f466e | 2017-02-24 14:58:41 -0800 | [diff] [blame] | 403 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | 
|  | 404 | gfp_t gfp_mask) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 405 | { | 
| Andrew Morton | 3acaea6 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 406 | unsigned long mask, offset; | 
|  | 407 | unsigned long pfn = -1; | 
|  | 408 | unsigned long start = 0; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 409 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; | 
|  | 410 | struct page *page = NULL; | 
| Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 411 | int ret = -ENOMEM; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 412 |  | 
|  | 413 | if (!cma || !cma->count) | 
|  | 414 | return NULL; | 
|  | 415 |  | 
| Rohit Vaswani | 67a2e213 | 2015-10-22 13:32:11 -0700 | [diff] [blame] | 416 | pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 417 | count, align); | 
|  | 418 |  | 
|  | 419 | if (!count) | 
|  | 420 | return NULL; | 
|  | 421 |  | 
|  | 422 | mask = cma_bitmap_aligned_mask(cma, align); | 
| Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 423 | offset = cma_bitmap_aligned_offset(cma, align); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 424 | bitmap_maxno = cma_bitmap_maxno(cma); | 
|  | 425 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | 
|  | 426 |  | 
| Shiraz Hashim | 6b36ba5 | 2016-11-10 10:46:16 -0800 | [diff] [blame] | 427 | if (bitmap_count > bitmap_maxno) | 
|  | 428 | return NULL; | 
|  | 429 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 430 | for (;;) { | 
|  | 431 | mutex_lock(&cma->lock); | 
| Gregory Fong | b5be83e | 2014-12-12 16:54:48 -0800 | [diff] [blame] | 432 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, | 
|  | 433 | bitmap_maxno, start, bitmap_count, mask, | 
|  | 434 | offset); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 435 | if (bitmap_no >= bitmap_maxno) { | 
|  | 436 | mutex_unlock(&cma->lock); | 
|  | 437 | break; | 
|  | 438 | } | 
|  | 439 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); | 
|  | 440 | /* | 
|  | 441 | * It's safe to drop the lock here. We've marked this region for | 
|  | 442 | * our exclusive use. If the migration fails we will take the | 
|  | 443 | * lock again and unmark it. | 
|  | 444 | */ | 
|  | 445 | mutex_unlock(&cma->lock); | 
|  | 446 |  | 
|  | 447 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | 
|  | 448 | mutex_lock(&cma_mutex); | 
| Lucas Stach | ca96b62 | 2017-02-24 14:58:37 -0800 | [diff] [blame] | 449 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, | 
| Lucas Stach | e2f466e | 2017-02-24 14:58:41 -0800 | [diff] [blame] | 450 | gfp_mask); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 451 | mutex_unlock(&cma_mutex); | 
|  | 452 | if (ret == 0) { | 
|  | 453 | page = pfn_to_page(pfn); | 
|  | 454 | break; | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 455 | } | 
| Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 456 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 457 | cma_clear_bitmap(cma, pfn, count); | 
| Joonsoo Kim | b7155e7 | 2014-08-06 16:05:30 -0700 | [diff] [blame] | 458 | if (ret != -EBUSY) | 
|  | 459 | break; | 
|  | 460 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 461 | pr_debug("%s(): memory range at %p is busy, retrying\n", | 
|  | 462 | __func__, pfn_to_page(pfn)); | 
|  | 463 | /* try again with a bit different memory target */ | 
|  | 464 | start = bitmap_no + mask + 1; | 
|  | 465 | } | 
|  | 466 |  | 
| Andrew Morton | 3acaea6 | 2015-11-05 18:50:08 -0800 | [diff] [blame] | 467 | trace_cma_alloc(pfn, page, count, align); | 
| Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 468 |  | 
| Boris Brezillon | ef46501 | 2017-10-13 15:58:01 -0700 | [diff] [blame] | 469 | if (ret && !(gfp_mask & __GFP_NOWARN)) { | 
| Pintu Agarwal | 5984af108 | 2017-11-15 17:34:26 -0800 | [diff] [blame] | 470 | pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", | 
| Jaewon Kim | dbe43d4 | 2017-02-24 14:58:50 -0800 | [diff] [blame] | 471 | __func__, count, ret); | 
|  | 472 | cma_debug_show_areas(cma); | 
|  | 473 | } | 
|  | 474 |  | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 475 | pr_debug("%s(): returned %p\n", __func__, page); | 
|  | 476 | return page; | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /** | 
|  | 480 | * cma_release() - release allocated pages | 
|  | 481 | * @cma:   Contiguous memory region for which the allocation is performed. | 
|  | 482 | * @pages: Allocated pages. | 
|  | 483 | * @count: Number of allocated pages. | 
|  | 484 | * | 
|  | 485 | * This function releases memory allocated by alloc_cma(). | 
|  | 486 | * It returns false when provided pages do not belong to contiguous area and | 
|  | 487 | * true otherwise. | 
|  | 488 | */ | 
| Sasha Levin | ac17382 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 489 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 490 | { | 
|  | 491 | unsigned long pfn; | 
|  | 492 |  | 
|  | 493 | if (!cma || !pages) | 
|  | 494 | return false; | 
|  | 495 |  | 
|  | 496 | pr_debug("%s(page %p)\n", __func__, (void *)pages); | 
|  | 497 |  | 
|  | 498 | pfn = page_to_pfn(pages); | 
|  | 499 |  | 
|  | 500 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) | 
|  | 501 | return false; | 
|  | 502 |  | 
|  | 503 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | 
|  | 504 |  | 
|  | 505 | free_contig_range(pfn, count); | 
|  | 506 | cma_clear_bitmap(cma, pfn, count); | 
| Stefan Strogin | 99e8ea6 | 2015-04-15 16:14:50 -0700 | [diff] [blame] | 507 | trace_cma_release(pfn, pages, count); | 
| Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 508 |  | 
|  | 509 | return true; | 
|  | 510 | } | 
| Laura Abbott | e4231bc | 2017-04-18 11:27:04 -0700 | [diff] [blame] | 511 |  | 
|  | 512 | int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) | 
|  | 513 | { | 
|  | 514 | int i; | 
|  | 515 |  | 
|  | 516 | for (i = 0; i < cma_area_count; i++) { | 
|  | 517 | int ret = it(&cma_areas[i], data); | 
|  | 518 |  | 
|  | 519 | if (ret) | 
|  | 520 | return ret; | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | return 0; | 
|  | 524 | } |