| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of version 2 of the GNU General Public License as | 
|  | 6 | * published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it will be useful, but | 
|  | 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 11 | * General Public License for more details. | 
|  | 12 | */ | 
|  | 13 | #ifndef __PMEM_H__ | 
|  | 14 | #define __PMEM_H__ | 
|  | 15 |  | 
|  | 16 | #include <linux/io.h> | 
| Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame] | 17 | #include <linux/uio.h> | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 18 |  | 
|  | 19 | #ifdef CONFIG_ARCH_HAS_PMEM_API | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 20 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WB | 
| Ross Zwisler | 4060352 | 2015-08-18 13:55:36 -0600 | [diff] [blame] | 21 | #include <asm/pmem.h> | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 22 | #else | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 23 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WT | 
|  | 24 | /* | 
|  | 25 | * These are simply here to enable compilation, all call sites gate | 
|  | 26 | * calling these symbols with arch_has_pmem_api() and redirect to the | 
|  | 27 | * implementation in asm/pmem.h. | 
|  | 28 | */ | 
|  | 29 | static inline bool __arch_has_wmb_pmem(void) | 
|  | 30 | { | 
|  | 31 | return false; | 
|  | 32 | } | 
|  | 33 |  | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 34 | static inline void arch_wmb_pmem(void) | 
|  | 35 | { | 
|  | 36 | BUG(); | 
|  | 37 | } | 
|  | 38 |  | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 39 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | 
|  | 40 | size_t n) | 
|  | 41 | { | 
|  | 42 | BUG(); | 
|  | 43 | } | 
| Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame] | 44 |  | 
|  | 45 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, | 
|  | 46 | struct iov_iter *i) | 
|  | 47 | { | 
|  | 48 | BUG(); | 
|  | 49 | return 0; | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static inline void arch_clear_pmem(void __pmem *addr, size_t size) | 
|  | 53 | { | 
|  | 54 | BUG(); | 
|  | 55 | } | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 56 | #endif | 
|  | 57 |  | 
|  | 58 | /* | 
|  | 59 | * Architectures that define ARCH_HAS_PMEM_API must provide | 
| Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame] | 60 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), | 
|  | 61 | * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 62 | */ | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 63 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) | 
|  | 64 | { | 
|  | 65 | memcpy(dst, (void __force const *) src, size); | 
|  | 66 | } | 
|  | 67 |  | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 68 | static inline bool arch_has_pmem_api(void) | 
|  | 69 | { | 
|  | 70 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); | 
|  | 71 | } | 
|  | 72 |  | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 73 | /** | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 74 | * arch_has_wmb_pmem - true if wmb_pmem() ensures durability | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 75 | * | 
|  | 76 | * For a given cpu implementation within an architecture it is possible | 
|  | 77 | * that wmb_pmem() resolves to a nop.  In the case this returns | 
|  | 78 | * false, pmem api users are unable to ensure durability and may want to | 
|  | 79 | * fall back to a different data consistency model, or otherwise notify | 
|  | 80 | * the user. | 
|  | 81 | */ | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 82 | static inline bool arch_has_wmb_pmem(void) | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 83 | { | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 84 | return arch_has_pmem_api() && __arch_has_wmb_pmem(); | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 85 | } | 
|  | 86 |  | 
|  | 87 | /* | 
|  | 88 | * These defaults seek to offer decent performance and minimize the | 
|  | 89 | * window between i/o completion and writes being durable on media. | 
|  | 90 | * However, it is undefined / architecture specific whether | 
| Dan Williams | a639315 | 2015-09-15 02:14:03 -0400 | [diff] [blame] | 91 | * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 92 | * making data durable relative to i/o completion. | 
|  | 93 | */ | 
| Dan Williams | e836a25 | 2015-08-12 18:42:56 -0400 | [diff] [blame] | 94 | static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 95 | size_t size) | 
|  | 96 | { | 
|  | 97 | memcpy((void __force *) dst, src, size); | 
|  | 98 | } | 
|  | 99 |  | 
| Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame] | 100 | static inline size_t default_copy_from_iter_pmem(void __pmem *addr, | 
|  | 101 | size_t bytes, struct iov_iter *i) | 
|  | 102 | { | 
|  | 103 | return copy_from_iter_nocache((void __force *)addr, bytes, i); | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static inline void default_clear_pmem(void __pmem *addr, size_t size) | 
|  | 107 | { | 
|  | 108 | if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) | 
|  | 109 | clear_page((void __force *)addr); | 
|  | 110 | else | 
|  | 111 | memset((void __force *)addr, 0, size); | 
|  | 112 | } | 
|  | 113 |  | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 114 | /** | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 115 | * memcpy_to_pmem - copy data to persistent memory | 
|  | 116 | * @dst: destination buffer for the copy | 
|  | 117 | * @src: source buffer for the copy | 
|  | 118 | * @n: length of the copy in bytes | 
|  | 119 | * | 
|  | 120 | * Perform a memory copy that results in the destination of the copy | 
|  | 121 | * being effectively evicted from, or never written to, the processor | 
|  | 122 | * cache hierarchy after the copy completes.  After memcpy_to_pmem() | 
|  | 123 | * data may still reside in cpu or platform buffers, so this operation | 
|  | 124 | * must be followed by a wmb_pmem(). | 
|  | 125 | */ | 
|  | 126 | static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) | 
|  | 127 | { | 
|  | 128 | if (arch_has_pmem_api()) | 
|  | 129 | arch_memcpy_to_pmem(dst, src, n); | 
|  | 130 | else | 
|  | 131 | default_memcpy_to_pmem(dst, src, n); | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | /** | 
|  | 135 | * wmb_pmem - synchronize writes to persistent memory | 
|  | 136 | * | 
|  | 137 | * After a series of memcpy_to_pmem() operations this drains data from | 
|  | 138 | * cpu write buffers and any platform (memory controller) buffers to | 
|  | 139 | * ensure that written data is durable on persistent memory media. | 
|  | 140 | */ | 
|  | 141 | static inline void wmb_pmem(void) | 
|  | 142 | { | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 143 | if (arch_has_wmb_pmem()) | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 144 | arch_wmb_pmem(); | 
| Dan Williams | 96601ad | 2015-08-24 18:29:38 -0400 | [diff] [blame] | 145 | else | 
|  | 146 | wmb(); | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 147 | } | 
| Ross Zwisler | 5de490d | 2015-08-18 13:55:39 -0600 | [diff] [blame] | 148 |  | 
|  | 149 | /** | 
|  | 150 | * copy_from_iter_pmem - copy data from an iterator to PMEM | 
|  | 151 | * @addr:	PMEM destination address | 
|  | 152 | * @bytes:	number of bytes to copy | 
|  | 153 | * @i:		iterator with source data | 
|  | 154 | * | 
|  | 155 | * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. | 
|  | 156 | * This function requires explicit ordering with a wmb_pmem() call. | 
|  | 157 | */ | 
|  | 158 | static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, | 
|  | 159 | struct iov_iter *i) | 
|  | 160 | { | 
|  | 161 | if (arch_has_pmem_api()) | 
|  | 162 | return arch_copy_from_iter_pmem(addr, bytes, i); | 
|  | 163 | return default_copy_from_iter_pmem(addr, bytes, i); | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | /** | 
|  | 167 | * clear_pmem - zero a PMEM memory range | 
|  | 168 | * @addr:	virtual start address | 
|  | 169 | * @size:	number of bytes to zero | 
|  | 170 | * | 
|  | 171 | * Write zeros into the memory range starting at 'addr' for 'size' bytes. | 
|  | 172 | * This function requires explicit ordering with a wmb_pmem() call. | 
|  | 173 | */ | 
|  | 174 | static inline void clear_pmem(void __pmem *addr, size_t size) | 
|  | 175 | { | 
|  | 176 | if (arch_has_pmem_api()) | 
|  | 177 | arch_clear_pmem(addr, size); | 
|  | 178 | else | 
|  | 179 | default_clear_pmem(addr, size); | 
|  | 180 | } | 
| Ross Zwisler | 6103195 | 2015-06-25 03:08:39 -0400 | [diff] [blame] | 181 | #endif /* __PMEM_H__ */ |