blob: 7a1b5c3ef14e9301966f724c873b1edee864f910 [file] [log] [blame]
Dan Williams92281dee2015-08-10 23:07:06 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
Dan Williams9476df72016-01-15 16:56:19 -080013#include <linux/radix-tree.h>
14#include <linux/memremap.h>
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -040015#include <linux/device.h>
Dan Williams92281dee2015-08-10 23:07:06 -040016#include <linux/types.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080017#include <linux/pfn_t.h>
Dan Williams92281dee2015-08-10 23:07:06 -040018#include <linux/io.h>
19#include <linux/mm.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +020020#include <linux/memory_hotplug.h>
Dan Williams92281dee2015-08-10 23:07:06 -040021
22#ifndef ioremap_cache
23/* temporary while we convert existing ioremap_cache users to memremap */
24__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
25{
26 return ioremap(offset, size);
27}
28#endif
29
Dan Williams182475b2015-10-26 16:55:56 -040030static void *try_ram_remap(resource_size_t offset, size_t size)
31{
32 struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
33
34 /* In the simple case just return the existing linear address */
35 if (!PageHighMem(page))
36 return __va(offset);
37 return NULL; /* fallback to ioremap_cache */
38}
39
Dan Williams92281dee2015-08-10 23:07:06 -040040/**
41 * memremap() - remap an iomem_resource as cacheable memory
42 * @offset: iomem resource start address
43 * @size: size of remap
44 * @flags: either MEMREMAP_WB or MEMREMAP_WT
45 *
46 * memremap() is "ioremap" for cases where it is known that the resource
47 * being mapped does not have i/o side effects and the __iomem
48 * annotation is not applicable.
49 *
50 * MEMREMAP_WB - matches the default mapping for "System RAM" on
51 * the architecture. This is usually a read-allocate write-back cache.
52 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
53 * memremap() will bypass establishing a new mapping and instead return
54 * a pointer into the direct map.
55 *
56 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
57 * cache or are written through to memory and never exist in a
58 * cache-dirty state with respect to program visibility. Attempts to
59 * map "System RAM" with this mapping type will fail.
60 */
61void *memremap(resource_size_t offset, size_t size, unsigned long flags)
62{
63 int is_ram = region_intersects(offset, size, "System RAM");
64 void *addr = NULL;
65
66 if (is_ram == REGION_MIXED) {
67 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
68 &offset, (unsigned long) size);
69 return NULL;
70 }
71
72 /* Try all mapping types requested until one returns non-NULL */
73 if (flags & MEMREMAP_WB) {
74 flags &= ~MEMREMAP_WB;
75 /*
76 * MEMREMAP_WB is special in that it can be satisifed
77 * from the direct map. Some archs depend on the
78 * capability of memremap() to autodetect cases where
79 * the requested range is potentially in "System RAM"
80 */
81 if (is_ram == REGION_INTERSECTS)
Dan Williams182475b2015-10-26 16:55:56 -040082 addr = try_ram_remap(offset, size);
83 if (!addr)
Dan Williams92281dee2015-08-10 23:07:06 -040084 addr = ioremap_cache(offset, size);
85 }
86
87 /*
88 * If we don't have a mapping yet and more request flags are
89 * pending then we will be attempting to establish a new virtual
90 * address mapping. Enforce that this mapping is not aliasing
91 * "System RAM"
92 */
93 if (!addr && is_ram == REGION_INTERSECTS && flags) {
94 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
95 &offset, (unsigned long) size);
96 return NULL;
97 }
98
99 if (!addr && (flags & MEMREMAP_WT)) {
100 flags &= ~MEMREMAP_WT;
101 addr = ioremap_wt(offset, size);
102 }
103
104 return addr;
105}
106EXPORT_SYMBOL(memremap);
107
108void memunmap(void *addr)
109{
110 if (is_vmalloc_addr(addr))
111 iounmap((void __iomem *) addr);
112}
113EXPORT_SYMBOL(memunmap);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400114
115static void devm_memremap_release(struct device *dev, void *res)
116{
Toshi Kani9273a8b2016-02-17 13:11:29 -0800117 memunmap(*(void **)res);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400118}
119
120static int devm_memremap_match(struct device *dev, void *res, void *match_data)
121{
122 return *(void **)res == match_data;
123}
124
125void *devm_memremap(struct device *dev, resource_size_t offset,
126 size_t size, unsigned long flags)
127{
128 void **ptr, *addr;
129
Dan Williams538ea4a2015-10-05 20:35:56 -0400130 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
131 dev_to_node(dev));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400132 if (!ptr)
Dan Williamsb36f4762015-09-15 02:42:20 -0400133 return ERR_PTR(-ENOMEM);
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400134
135 addr = memremap(offset, size, flags);
136 if (addr) {
137 *ptr = addr;
138 devres_add(dev, ptr);
139 } else
140 devres_free(ptr);
141
142 return addr;
143}
144EXPORT_SYMBOL(devm_memremap);
145
146void devm_memunmap(struct device *dev, void *addr)
147{
Dan Williamsd7413142015-09-15 02:37:48 -0400148 WARN_ON(devres_release(dev, devm_memremap_release,
149 devm_memremap_match, addr));
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -0400150}
151EXPORT_SYMBOL(devm_memunmap);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200152
Dan Williamsdb78c222016-02-11 16:13:17 -0800153pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
Dan Williams34c0fd52016-01-15 16:56:14 -0800154{
155 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
156}
157EXPORT_SYMBOL(phys_to_pfn_t);
158
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200159#ifdef CONFIG_ZONE_DEVICE
Dan Williams9476df72016-01-15 16:56:19 -0800160static DEFINE_MUTEX(pgmap_lock);
161static RADIX_TREE(pgmap_radix, GFP_KERNEL);
162#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
163#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
164
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200165struct page_map {
166 struct resource res;
Dan Williams9476df72016-01-15 16:56:19 -0800167 struct percpu_ref *ref;
168 struct dev_pagemap pgmap;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800169 struct vmem_altmap altmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200170};
171
Dan Williams3565fce2016-01-15 16:56:55 -0800172void get_zone_device_page(struct page *page)
173{
174 percpu_ref_get(page->pgmap->ref);
175}
176EXPORT_SYMBOL(get_zone_device_page);
177
178void put_zone_device_page(struct page *page)
179{
180 put_dev_pagemap(page->pgmap);
181}
182EXPORT_SYMBOL(put_zone_device_page);
183
Dan Williams9476df72016-01-15 16:56:19 -0800184static void pgmap_radix_release(struct resource *res)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200185{
Dan Williamseb7d78c2016-01-29 21:48:34 -0800186 resource_size_t key, align_start, align_size, align_end;
187
188 align_start = res->start & ~(SECTION_SIZE - 1);
189 align_size = ALIGN(resource_size(res), SECTION_SIZE);
190 align_end = align_start + align_size - 1;
Dan Williams9476df72016-01-15 16:56:19 -0800191
192 mutex_lock(&pgmap_lock);
193 for (key = res->start; key <= res->end; key += SECTION_SIZE)
194 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
195 mutex_unlock(&pgmap_lock);
196}
197
Dan Williams5c2c2582016-01-15 16:56:49 -0800198static unsigned long pfn_first(struct page_map *page_map)
199{
200 struct dev_pagemap *pgmap = &page_map->pgmap;
201 const struct resource *res = &page_map->res;
202 struct vmem_altmap *altmap = pgmap->altmap;
203 unsigned long pfn;
204
205 pfn = res->start >> PAGE_SHIFT;
206 if (altmap)
207 pfn += vmem_altmap_offset(altmap);
208 return pfn;
209}
210
211static unsigned long pfn_end(struct page_map *page_map)
212{
213 const struct resource *res = &page_map->res;
214
215 return (res->start + resource_size(res)) >> PAGE_SHIFT;
216}
217
218#define for_each_device_pfn(pfn, map) \
219 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
220
Dan Williams9476df72016-01-15 16:56:19 -0800221static void devm_memremap_pages_release(struct device *dev, void *data)
222{
223 struct page_map *page_map = data;
224 struct resource *res = &page_map->res;
225 resource_size_t align_start, align_size;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800226 struct dev_pagemap *pgmap = &page_map->pgmap;
Dan Williams9476df72016-01-15 16:56:19 -0800227
Dan Williams5c2c2582016-01-15 16:56:49 -0800228 if (percpu_ref_tryget_live(pgmap->ref)) {
229 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
230 percpu_ref_put(pgmap->ref);
231 }
232
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200233 /* pages are dead and unused, undo the arch mapping */
Dan Williams9476df72016-01-15 16:56:19 -0800234 align_start = res->start & ~(SECTION_SIZE - 1);
235 align_size = ALIGN(resource_size(res), SECTION_SIZE);
236 arch_remove_memory(align_start, align_size);
Dan Williamseb7d78c2016-01-29 21:48:34 -0800237 pgmap_radix_release(res);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800238 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
239 "%s: failed to free all reserved pages\n", __func__);
Dan Williams9476df72016-01-15 16:56:19 -0800240}
241
242/* assumes rcu_read_lock() held at entry */
243struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
244{
245 struct page_map *page_map;
246
247 WARN_ON_ONCE(!rcu_read_lock_held());
248
249 page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
250 return page_map ? &page_map->pgmap : NULL;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200251}
252
Dan Williams4b94ffd2016-01-15 16:56:22 -0800253/**
254 * devm_memremap_pages - remap and provide memmap backing for the given resource
255 * @dev: hosting device for @res
256 * @res: "host memory" address range
Dan Williams5c2c2582016-01-15 16:56:49 -0800257 * @ref: a live per-cpu reference count
Dan Williams4b94ffd2016-01-15 16:56:22 -0800258 * @altmap: optional descriptor for allocating the memmap from @res
259 *
Dan Williams5c2c2582016-01-15 16:56:49 -0800260 * Notes:
261 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
262 * (or devm release event).
263 *
264 * 2/ @res is expected to be a host memory range that could feasibly be
265 * treated as a "System RAM" range, i.e. not a device mmio range, but
266 * this is not enforced.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800267 */
268void *devm_memremap_pages(struct device *dev, struct resource *res,
Dan Williams5c2c2582016-01-15 16:56:49 -0800269 struct percpu_ref *ref, struct vmem_altmap *altmap)
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200270{
271 int is_ram = region_intersects(res->start, resource_size(res),
272 "System RAM");
Dan Williamseb7d78c2016-01-29 21:48:34 -0800273 resource_size_t key, align_start, align_size, align_end;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800274 struct dev_pagemap *pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200275 struct page_map *page_map;
Dan Williams5c2c2582016-01-15 16:56:49 -0800276 unsigned long pfn;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200277 int error, nid;
278
279 if (is_ram == REGION_MIXED) {
280 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
281 __func__, res);
282 return ERR_PTR(-ENXIO);
283 }
284
285 if (is_ram == REGION_INTERSECTS)
286 return __va(res->start);
287
Dan Williams4b94ffd2016-01-15 16:56:22 -0800288 if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) {
289 dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
290 __func__);
291 return ERR_PTR(-ENXIO);
292 }
293
Dan Williams5c2c2582016-01-15 16:56:49 -0800294 if (!ref)
295 return ERR_PTR(-EINVAL);
296
Dan Williams538ea4a2015-10-05 20:35:56 -0400297 page_map = devres_alloc_node(devm_memremap_pages_release,
298 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200299 if (!page_map)
300 return ERR_PTR(-ENOMEM);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800301 pgmap = &page_map->pgmap;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200302
303 memcpy(&page_map->res, res, sizeof(*res));
304
Dan Williams4b94ffd2016-01-15 16:56:22 -0800305 pgmap->dev = dev;
306 if (altmap) {
307 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
308 pgmap->altmap = &page_map->altmap;
309 }
Dan Williams5c2c2582016-01-15 16:56:49 -0800310 pgmap->ref = ref;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800311 pgmap->res = &page_map->res;
312
Dan Williams9476df72016-01-15 16:56:19 -0800313 mutex_lock(&pgmap_lock);
314 error = 0;
Dan Williamseb7d78c2016-01-29 21:48:34 -0800315 align_start = res->start & ~(SECTION_SIZE - 1);
316 align_size = ALIGN(resource_size(res), SECTION_SIZE);
317 align_end = align_start + align_size - 1;
318 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
Dan Williams9476df72016-01-15 16:56:19 -0800319 struct dev_pagemap *dup;
320
321 rcu_read_lock();
322 dup = find_dev_pagemap(key);
323 rcu_read_unlock();
324 if (dup) {
325 dev_err(dev, "%s: %pr collides with mapping for %s\n",
326 __func__, res, dev_name(dup->dev));
327 error = -EBUSY;
328 break;
329 }
330 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
331 page_map);
332 if (error) {
333 dev_err(dev, "%s: failed: %d\n", __func__, error);
334 break;
335 }
336 }
337 mutex_unlock(&pgmap_lock);
338 if (error)
339 goto err_radix;
340
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200341 nid = dev_to_node(dev);
342 if (nid < 0)
Dan Williams7eff93b2015-10-05 20:35:55 -0400343 nid = numa_mem_id();
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200344
Dan Williams9476df72016-01-15 16:56:19 -0800345 error = arch_add_memory(nid, align_start, align_size, true);
346 if (error)
347 goto err_add_memory;
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200348
Dan Williams5c2c2582016-01-15 16:56:49 -0800349 for_each_device_pfn(pfn, page_map) {
350 struct page *page = pfn_to_page(pfn);
351
352 /* ZONE_DEVICE pages must never appear on a slab lru */
353 list_force_poison(&page->lru);
354 page->pgmap = pgmap;
355 }
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200356 devres_add(dev, page_map);
357 return __va(res->start);
Dan Williams9476df72016-01-15 16:56:19 -0800358
359 err_add_memory:
360 err_radix:
361 pgmap_radix_release(res);
362 devres_free(page_map);
363 return ERR_PTR(error);
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200364}
365EXPORT_SYMBOL(devm_memremap_pages);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800366
367unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
368{
369 /* number of pfns from base where pfn_to_page() is valid */
370 return altmap->reserve + altmap->free;
371}
372
373void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
374{
375 altmap->alloc -= nr_pfns;
376}
377
378#ifdef CONFIG_SPARSEMEM_VMEMMAP
379struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
380{
381 /*
382 * 'memmap_start' is the virtual address for the first "struct
383 * page" in this range of the vmemmap array. In the case of
384 * CONFIG_SPARSE_VMEMMAP a page_to_pfn conversion is simple
385 * pointer arithmetic, so we can perform this to_vmem_altmap()
386 * conversion without concern for the initialization state of
387 * the struct page fields.
388 */
389 struct page *page = (struct page *) memmap_start;
390 struct dev_pagemap *pgmap;
391
392 /*
393 * Uncoditionally retrieve a dev_pagemap associated with the
394 * given physical address, this is only for use in the
395 * arch_{add|remove}_memory() for setting up and tearing down
396 * the memmap.
397 */
398 rcu_read_lock();
399 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
400 rcu_read_unlock();
401
402 return pgmap ? pgmap->altmap : NULL;
403}
404#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Christoph Hellwig41e94a82015-08-17 16:00:35 +0200405#endif /* CONFIG_ZONE_DEVICE */