Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of version 2 of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | */ |
| 13 | #ifndef __PMEM_H__ |
| 14 | #define __PMEM_H__ |
| 15 | |
| 16 | #include <linux/io.h> |
Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame^] | 17 | #include <linux/uio.h> |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 18 | |
| 19 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
Ross Zwisler | 4060352 | 2015-08-18 13:55:36 -0600 | [diff] [blame] | 20 | #include <asm/pmem.h> |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 21 | #else |
| 22 | static inline void arch_wmb_pmem(void) |
| 23 | { |
| 24 | BUG(); |
| 25 | } |
| 26 | |
Ross Zwisler | 18279b4 | 2015-08-18 13:55:37 -0600 | [diff] [blame] | 27 | static inline bool arch_has_wmb_pmem(void) |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 28 | { |
| 29 | return false; |
| 30 | } |
| 31 | |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 32 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, |
| 33 | size_t n) |
| 34 | { |
| 35 | BUG(); |
| 36 | } |
Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame^] | 37 | |
| 38 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, |
| 39 | struct iov_iter *i) |
| 40 | { |
| 41 | BUG(); |
| 42 | return 0; |
| 43 | } |
| 44 | |
| 45 | static inline void arch_clear_pmem(void __pmem *addr, size_t size) |
| 46 | { |
| 47 | BUG(); |
| 48 | } |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 49 | #endif |
| 50 | |
| 51 | /* |
| 52 | * Architectures that define ARCH_HAS_PMEM_API must provide |
Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame^] | 53 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), |
| 54 | * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 55 | */ |
| 56 | |
| 57 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) |
| 58 | { |
| 59 | memcpy(dst, (void __force const *) src, size); |
| 60 | } |
| 61 | |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 62 | static inline void memunmap_pmem(struct device *dev, void __pmem *addr) |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 63 | { |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 64 | devm_memunmap(dev, (void __force *) addr); |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | /** |
Ross Zwisler | 18279b4 | 2015-08-18 13:55:37 -0600 | [diff] [blame] | 68 | * arch_has_pmem_api - true if wmb_pmem() ensures durability |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 69 | * |
| 70 | * For a given cpu implementation within an architecture it is possible |
| 71 | * that wmb_pmem() resolves to a nop. In the case this returns |
| 72 | * false, pmem api users are unable to ensure durability and may want to |
| 73 | * fall back to a different data consistency model, or otherwise notify |
| 74 | * the user. |
| 75 | */ |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 76 | static inline bool arch_has_pmem_api(void) |
| 77 | { |
| 78 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * These defaults seek to offer decent performance and minimize the |
| 83 | * window between i/o completion and writes being durable on media. |
| 84 | * However, it is undefined / architecture specific whether |
| 85 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for |
| 86 | * making data durable relative to i/o completion. |
| 87 | */ |
Dan Williams | e836a25 | 2015-08-12 18:42:56 -0400 | [diff] [blame] | 88 | static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 89 | size_t size) |
| 90 | { |
| 91 | memcpy((void __force *) dst, src, size); |
| 92 | } |
| 93 | |
Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame^] | 94 | static inline size_t default_copy_from_iter_pmem(void __pmem *addr, |
| 95 | size_t bytes, struct iov_iter *i) |
| 96 | { |
| 97 | return copy_from_iter_nocache((void __force *)addr, bytes, i); |
| 98 | } |
| 99 | |
| 100 | static inline void default_clear_pmem(void __pmem *addr, size_t size) |
| 101 | { |
| 102 | if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) |
| 103 | clear_page((void __force *)addr); |
| 104 | else |
| 105 | memset((void __force *)addr, 0, size); |
| 106 | } |
| 107 | |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 108 | /** |
| 109 | * memremap_pmem - map physical persistent memory for pmem api |
| 110 | * @offset: physical address of persistent memory |
| 111 | * @size: size of the mapping |
| 112 | * |
| 113 | * Establish a mapping of the architecture specific memory type expected |
| 114 | * by memcpy_to_pmem() and wmb_pmem(). For example, it may be |
| 115 | * the case that an uncacheable or writethrough mapping is sufficient, |
| 116 | * or a writeback mapping provided memcpy_to_pmem() and |
| 117 | * wmb_pmem() arrange for the data to be written through the |
| 118 | * cache to persistent media. |
| 119 | */ |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 120 | static inline void __pmem *memremap_pmem(struct device *dev, |
| 121 | resource_size_t offset, unsigned long size) |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 122 | { |
Dan Williams | e836a25 | 2015-08-12 18:42:56 -0400 | [diff] [blame] | 123 | #ifdef ARCH_MEMREMAP_PMEM |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 124 | return (void __pmem *) devm_memremap(dev, offset, size, |
| 125 | ARCH_MEMREMAP_PMEM); |
Dan Williams | e836a25 | 2015-08-12 18:42:56 -0400 | [diff] [blame] | 126 | #else |
Christoph Hellwig | 708ab62 | 2015-08-10 23:07:08 -0400 | [diff] [blame] | 127 | return (void __pmem *) devm_memremap(dev, offset, size, |
| 128 | MEMREMAP_WT); |
Dan Williams | e836a25 | 2015-08-12 18:42:56 -0400 | [diff] [blame] | 129 | #endif |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | /** |
| 133 | * memcpy_to_pmem - copy data to persistent memory |
| 134 | * @dst: destination buffer for the copy |
| 135 | * @src: source buffer for the copy |
| 136 | * @n: length of the copy in bytes |
| 137 | * |
| 138 | * Perform a memory copy that results in the destination of the copy |
| 139 | * being effectively evicted from, or never written to, the processor |
| 140 | * cache hierarchy after the copy completes. After memcpy_to_pmem() |
| 141 | * data may still reside in cpu or platform buffers, so this operation |
| 142 | * must be followed by a wmb_pmem(). |
| 143 | */ |
| 144 | static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) |
| 145 | { |
| 146 | if (arch_has_pmem_api()) |
| 147 | arch_memcpy_to_pmem(dst, src, n); |
| 148 | else |
| 149 | default_memcpy_to_pmem(dst, src, n); |
| 150 | } |
| 151 | |
| 152 | /** |
| 153 | * wmb_pmem - synchronize writes to persistent memory |
| 154 | * |
| 155 | * After a series of memcpy_to_pmem() operations this drains data from |
| 156 | * cpu write buffers and any platform (memory controller) buffers to |
| 157 | * ensure that written data is durable on persistent memory media. |
| 158 | */ |
| 159 | static inline void wmb_pmem(void) |
| 160 | { |
| 161 | if (arch_has_pmem_api()) |
| 162 | arch_wmb_pmem(); |
| 163 | } |
Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame^] | 164 | |
| 165 | /** |
| 166 | * copy_from_iter_pmem - copy data from an iterator to PMEM |
| 167 | * @addr: PMEM destination address |
| 168 | * @bytes: number of bytes to copy |
| 169 | * @i: iterator with source data |
| 170 | * |
| 171 | * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. |
| 172 | * This function requires explicit ordering with a wmb_pmem() call. |
| 173 | */ |
| 174 | static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, |
| 175 | struct iov_iter *i) |
| 176 | { |
| 177 | if (arch_has_pmem_api()) |
| 178 | return arch_copy_from_iter_pmem(addr, bytes, i); |
| 179 | return default_copy_from_iter_pmem(addr, bytes, i); |
| 180 | } |
| 181 | |
| 182 | /** |
| 183 | * clear_pmem - zero a PMEM memory range |
| 184 | * @addr: virtual start address |
| 185 | * @size: number of bytes to zero |
| 186 | * |
| 187 | * Write zeros into the memory range starting at 'addr' for 'size' bytes. |
| 188 | * This function requires explicit ordering with a wmb_pmem() call. |
| 189 | */ |
| 190 | static inline void clear_pmem(void __pmem *addr, size_t size) |
| 191 | { |
| 192 | if (arch_has_pmem_api()) |
| 193 | arch_clear_pmem(addr, size); |
| 194 | else |
| 195 | default_clear_pmem(addr, size); |
| 196 | } |
Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 197 | #endif /* __PMEM_H__ */ |