Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_MEMREMAP_H_ |
| 2 | #define _LINUX_MEMREMAP_H_ |
| 3 | #include <linux/mm.h> |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 4 | #include <linux/ioport.h> |
| 5 | #include <linux/percpu-refcount.h> |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 6 | |
| 7 | struct resource; |
| 8 | struct device; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 9 | |
| 10 | /** |
| 11 | * struct vmem_altmap - pre-allocated storage for vmemmap_populate |
| 12 | * @base_pfn: base of the entire dev_pagemap mapping |
| 13 | * @reserve: pages mapped, but reserved for driver use (relative to @base) |
| 14 | * @free: free pages set aside in the mapping for memmap storage |
| 15 | * @align: pages reserved to meet allocation alignments |
| 16 | * @alloc: track pages consumed, private to vmemmap_populate() |
| 17 | */ |
| 18 | struct vmem_altmap { |
| 19 | const unsigned long base_pfn; |
| 20 | const unsigned long reserve; |
| 21 | unsigned long free; |
| 22 | unsigned long align; |
| 23 | unsigned long alloc; |
| 24 | }; |
| 25 | |
| 26 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); |
| 27 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); |
| 28 | |
Dan Williams | 11db048 | 2016-07-28 15:48:11 -0700 | [diff] [blame] | 29 | #ifdef CONFIG_ZONE_DEVICE |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 30 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); |
| 31 | #else |
| 32 | static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) |
| 33 | { |
| 34 | return NULL; |
| 35 | } |
| 36 | #endif |
| 37 | |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 38 | /** |
| 39 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 40 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 41 | * @res: physical address range covered by @ref |
| 42 | * @ref: reference count that pins the devm_memremap_pages() mapping |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 43 | * @dev: host device of the mapping for debug |
| 44 | */ |
| 45 | struct dev_pagemap { |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 46 | struct vmem_altmap *altmap; |
| 47 | const struct resource *res; |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 48 | struct percpu_ref *ref; |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 49 | struct device *dev; |
| 50 | }; |
| 51 | |
| 52 | #ifdef CONFIG_ZONE_DEVICE |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 53 | void *devm_memremap_pages(struct device *dev, struct resource *res, |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 54 | struct percpu_ref *ref, struct vmem_altmap *altmap); |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 55 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys); |
| 56 | #else |
| 57 | static inline void *devm_memremap_pages(struct device *dev, |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 58 | struct resource *res, struct percpu_ref *ref, |
| 59 | struct vmem_altmap *altmap) |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 60 | { |
| 61 | /* |
| 62 | * Fail attempts to call devm_memremap_pages() without |
| 63 | * ZONE_DEVICE support enabled, this requires callers to fall |
| 64 | * back to plain devm_memremap() based on config |
| 65 | */ |
| 66 | WARN_ON_ONCE(1); |
| 67 | return ERR_PTR(-ENXIO); |
| 68 | } |
| 69 | |
| 70 | static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) |
| 71 | { |
| 72 | return NULL; |
| 73 | } |
| 74 | #endif |
Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 75 | |
| 76 | /** |
| 77 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn |
| 78 | * @pfn: page frame number to lookup page_map |
| 79 | * @pgmap: optional known pgmap that already has a reference |
| 80 | * |
| 81 | * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the |
| 82 | * same mapping. |
| 83 | */ |
| 84 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
| 85 | struct dev_pagemap *pgmap) |
| 86 | { |
| 87 | const struct resource *res = pgmap ? pgmap->res : NULL; |
| 88 | resource_size_t phys = PFN_PHYS(pfn); |
| 89 | |
| 90 | /* |
| 91 | * In the cached case we're already holding a live reference so |
| 92 | * we can simply do a blind increment |
| 93 | */ |
| 94 | if (res && phys >= res->start && phys <= res->end) { |
| 95 | percpu_ref_get(pgmap->ref); |
| 96 | return pgmap; |
| 97 | } |
| 98 | |
| 99 | /* fall back to slow path lookup */ |
| 100 | rcu_read_lock(); |
| 101 | pgmap = find_dev_pagemap(phys); |
| 102 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) |
| 103 | pgmap = NULL; |
| 104 | rcu_read_unlock(); |
| 105 | |
| 106 | return pgmap; |
| 107 | } |
| 108 | |
| 109 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) |
| 110 | { |
| 111 | if (pgmap) |
| 112 | percpu_ref_put(pgmap->ref); |
| 113 | } |
Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 114 | #endif /* _LINUX_MEMREMAP_H_ */ |