Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 13 | #include <linux/radix-tree.h> |
| 14 | #include <linux/memremap.h> |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 15 | #include <linux/device.h> |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 16 | #include <linux/types.h> |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 17 | #include <linux/pfn_t.h> |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 18 | #include <linux/io.h> |
| 19 | #include <linux/mm.h> |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 20 | #include <linux/memory_hotplug.h> |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 21 | |
| 22 | #ifndef ioremap_cache |
| 23 | /* temporary while we convert existing ioremap_cache users to memremap */ |
| 24 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) |
| 25 | { |
| 26 | return ioremap(offset, size); |
| 27 | } |
| 28 | #endif |
| 29 | |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 30 | static void *try_ram_remap(resource_size_t offset, size_t size) |
| 31 | { |
| 32 | struct page *page = pfn_to_page(offset >> PAGE_SHIFT); |
| 33 | |
| 34 | /* In the simple case just return the existing linear address */ |
| 35 | if (!PageHighMem(page)) |
| 36 | return __va(offset); |
| 37 | return NULL; /* fallback to ioremap_cache */ |
| 38 | } |
| 39 | |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 40 | /** |
| 41 | * memremap() - remap an iomem_resource as cacheable memory |
| 42 | * @offset: iomem resource start address |
| 43 | * @size: size of remap |
| 44 | * @flags: either MEMREMAP_WB or MEMREMAP_WT |
| 45 | * |
| 46 | * memremap() is "ioremap" for cases where it is known that the resource |
| 47 | * being mapped does not have i/o side effects and the __iomem |
| 48 | * annotation is not applicable. |
| 49 | * |
| 50 | * MEMREMAP_WB - matches the default mapping for "System RAM" on |
| 51 | * the architecture. This is usually a read-allocate write-back cache. |
| 52 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM |
| 53 | * memremap() will bypass establishing a new mapping and instead return |
| 54 | * a pointer into the direct map. |
| 55 | * |
| 56 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the |
| 57 | * cache or are written through to memory and never exist in a |
| 58 | * cache-dirty state with respect to program visibility. Attempts to |
| 59 | * map "System RAM" with this mapping type will fail. |
| 60 | */ |
| 61 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) |
| 62 | { |
| 63 | int is_ram = region_intersects(offset, size, "System RAM"); |
| 64 | void *addr = NULL; |
| 65 | |
| 66 | if (is_ram == REGION_MIXED) { |
| 67 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", |
| 68 | &offset, (unsigned long) size); |
| 69 | return NULL; |
| 70 | } |
| 71 | |
| 72 | /* Try all mapping types requested until one returns non-NULL */ |
| 73 | if (flags & MEMREMAP_WB) { |
| 74 | flags &= ~MEMREMAP_WB; |
| 75 | /* |
| 76 | * MEMREMAP_WB is special in that it can be satisifed |
| 77 | * from the direct map. Some archs depend on the |
| 78 | * capability of memremap() to autodetect cases where |
| 79 | * the requested range is potentially in "System RAM" |
| 80 | */ |
| 81 | if (is_ram == REGION_INTERSECTS) |
Dan Williams | 182475b | 2015-10-26 16:55:56 -0400 | [diff] [blame] | 82 | addr = try_ram_remap(offset, size); |
| 83 | if (!addr) |
Dan Williams | 92281dee | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 84 | addr = ioremap_cache(offset, size); |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * If we don't have a mapping yet and more request flags are |
| 89 | * pending then we will be attempting to establish a new virtual |
| 90 | * address mapping. Enforce that this mapping is not aliasing |
| 91 | * "System RAM" |
| 92 | */ |
| 93 | if (!addr && is_ram == REGION_INTERSECTS && flags) { |
| 94 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", |
| 95 | &offset, (unsigned long) size); |
| 96 | return NULL; |
| 97 | } |
| 98 | |
| 99 | if (!addr && (flags & MEMREMAP_WT)) { |
| 100 | flags &= ~MEMREMAP_WT; |
| 101 | addr = ioremap_wt(offset, size); |
| 102 | } |
| 103 | |
| 104 | return addr; |
| 105 | } |
| 106 | EXPORT_SYMBOL(memremap); |
| 107 | |
| 108 | void memunmap(void *addr) |
| 109 | { |
| 110 | if (is_vmalloc_addr(addr)) |
| 111 | iounmap((void __iomem *) addr); |
| 112 | } |
| 113 | EXPORT_SYMBOL(memunmap); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 114 | |
| 115 | static void devm_memremap_release(struct device *dev, void *res) |
| 116 | { |
| 117 | memunmap(res); |
| 118 | } |
| 119 | |
| 120 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) |
| 121 | { |
| 122 | return *(void **)res == match_data; |
| 123 | } |
| 124 | |
| 125 | void *devm_memremap(struct device *dev, resource_size_t offset, |
| 126 | size_t size, unsigned long flags) |
| 127 | { |
| 128 | void **ptr, *addr; |
| 129 | |
Dan Williams | 538ea4a | 2015-10-05 20:35:56 -0400 | [diff] [blame] | 130 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
| 131 | dev_to_node(dev)); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 132 | if (!ptr) |
Dan Williams | b36f476 | 2015-09-15 02:42:20 -0400 | [diff] [blame] | 133 | return ERR_PTR(-ENOMEM); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 134 | |
| 135 | addr = memremap(offset, size, flags); |
| 136 | if (addr) { |
| 137 | *ptr = addr; |
| 138 | devres_add(dev, ptr); |
| 139 | } else |
| 140 | devres_free(ptr); |
| 141 | |
| 142 | return addr; |
| 143 | } |
| 144 | EXPORT_SYMBOL(devm_memremap); |
| 145 | |
| 146 | void devm_memunmap(struct device *dev, void *addr) |
| 147 | { |
Dan Williams | d741314 | 2015-09-15 02:37:48 -0400 | [diff] [blame] | 148 | WARN_ON(devres_release(dev, devm_memremap_release, |
| 149 | devm_memremap_match, addr)); |
Christoph Hellwig | 7d3dcf2 | 2015-08-10 23:07:07 -0400 | [diff] [blame] | 150 | } |
| 151 | EXPORT_SYMBOL(devm_memunmap); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 152 | |
Dan Williams | 34c0fd5 | 2016-01-15 16:56:14 -0800 | [diff] [blame] | 153 | pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags) |
| 154 | { |
| 155 | return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); |
| 156 | } |
| 157 | EXPORT_SYMBOL(phys_to_pfn_t); |
| 158 | |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 159 | #ifdef CONFIG_ZONE_DEVICE |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 160 | static DEFINE_MUTEX(pgmap_lock); |
| 161 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); |
| 162 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) |
| 163 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) |
| 164 | |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 165 | struct page_map { |
| 166 | struct resource res; |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 167 | struct percpu_ref *ref; |
| 168 | struct dev_pagemap pgmap; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 169 | }; |
| 170 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 171 | static void pgmap_radix_release(struct resource *res) |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 172 | { |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 173 | resource_size_t key; |
| 174 | |
| 175 | mutex_lock(&pgmap_lock); |
| 176 | for (key = res->start; key <= res->end; key += SECTION_SIZE) |
| 177 | radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT); |
| 178 | mutex_unlock(&pgmap_lock); |
| 179 | } |
| 180 | |
| 181 | static void devm_memremap_pages_release(struct device *dev, void *data) |
| 182 | { |
| 183 | struct page_map *page_map = data; |
| 184 | struct resource *res = &page_map->res; |
| 185 | resource_size_t align_start, align_size; |
| 186 | |
| 187 | pgmap_radix_release(res); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 188 | |
| 189 | /* pages are dead and unused, undo the arch mapping */ |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 190 | align_start = res->start & ~(SECTION_SIZE - 1); |
| 191 | align_size = ALIGN(resource_size(res), SECTION_SIZE); |
| 192 | arch_remove_memory(align_start, align_size); |
| 193 | } |
| 194 | |
| 195 | /* assumes rcu_read_lock() held at entry */ |
| 196 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys) |
| 197 | { |
| 198 | struct page_map *page_map; |
| 199 | |
| 200 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 201 | |
| 202 | page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT); |
| 203 | return page_map ? &page_map->pgmap : NULL; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | void *devm_memremap_pages(struct device *dev, struct resource *res) |
| 207 | { |
| 208 | int is_ram = region_intersects(res->start, resource_size(res), |
| 209 | "System RAM"); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 210 | resource_size_t key, align_start, align_size; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 211 | struct page_map *page_map; |
| 212 | int error, nid; |
| 213 | |
| 214 | if (is_ram == REGION_MIXED) { |
| 215 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", |
| 216 | __func__, res); |
| 217 | return ERR_PTR(-ENXIO); |
| 218 | } |
| 219 | |
| 220 | if (is_ram == REGION_INTERSECTS) |
| 221 | return __va(res->start); |
| 222 | |
Dan Williams | 538ea4a | 2015-10-05 20:35:56 -0400 | [diff] [blame] | 223 | page_map = devres_alloc_node(devm_memremap_pages_release, |
| 224 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 225 | if (!page_map) |
| 226 | return ERR_PTR(-ENOMEM); |
| 227 | |
| 228 | memcpy(&page_map->res, res, sizeof(*res)); |
| 229 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 230 | page_map->pgmap.dev = dev; |
| 231 | mutex_lock(&pgmap_lock); |
| 232 | error = 0; |
| 233 | for (key = res->start; key <= res->end; key += SECTION_SIZE) { |
| 234 | struct dev_pagemap *dup; |
| 235 | |
| 236 | rcu_read_lock(); |
| 237 | dup = find_dev_pagemap(key); |
| 238 | rcu_read_unlock(); |
| 239 | if (dup) { |
| 240 | dev_err(dev, "%s: %pr collides with mapping for %s\n", |
| 241 | __func__, res, dev_name(dup->dev)); |
| 242 | error = -EBUSY; |
| 243 | break; |
| 244 | } |
| 245 | error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT, |
| 246 | page_map); |
| 247 | if (error) { |
| 248 | dev_err(dev, "%s: failed: %d\n", __func__, error); |
| 249 | break; |
| 250 | } |
| 251 | } |
| 252 | mutex_unlock(&pgmap_lock); |
| 253 | if (error) |
| 254 | goto err_radix; |
| 255 | |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 256 | nid = dev_to_node(dev); |
| 257 | if (nid < 0) |
Dan Williams | 7eff93b | 2015-10-05 20:35:55 -0400 | [diff] [blame] | 258 | nid = numa_mem_id(); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 259 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 260 | align_start = res->start & ~(SECTION_SIZE - 1); |
| 261 | align_size = ALIGN(resource_size(res), SECTION_SIZE); |
| 262 | error = arch_add_memory(nid, align_start, align_size, true); |
| 263 | if (error) |
| 264 | goto err_add_memory; |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 265 | |
| 266 | devres_add(dev, page_map); |
| 267 | return __va(res->start); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame^] | 268 | |
| 269 | err_add_memory: |
| 270 | err_radix: |
| 271 | pgmap_radix_release(res); |
| 272 | devres_free(page_map); |
| 273 | return ERR_PTR(error); |
Christoph Hellwig | 41e94a8 | 2015-08-17 16:00:35 +0200 | [diff] [blame] | 274 | } |
| 275 | EXPORT_SYMBOL(devm_memremap_pages); |
| 276 | #endif /* CONFIG_ZONE_DEVICE */ |