blob: a9d84bf335eeea080d045c110c28273d9e1f69e1 [file] [log] [blame]
Ross Zwisler61031952015-06-25 03:08:39 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __PMEM_H__
14#define __PMEM_H__
15
16#include <linux/io.h>
Ross Zwisler5de490d2015-08-18 13:55:39 -060017#include <linux/uio.h>
Ross Zwisler61031952015-06-25 03:08:39 -040018
19#ifdef CONFIG_ARCH_HAS_PMEM_API
Ross Zwisler40603522015-08-18 13:55:36 -060020#include <asm/pmem.h>
Ross Zwisler61031952015-06-25 03:08:39 -040021#else
22static inline void arch_wmb_pmem(void)
23{
24 BUG();
25}
26
Ross Zwisler18279b42015-08-18 13:55:37 -060027static inline bool arch_has_wmb_pmem(void)
Ross Zwisler61031952015-06-25 03:08:39 -040028{
29 return false;
30}
31
Ross Zwisler61031952015-06-25 03:08:39 -040032static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
33 size_t n)
34{
35 BUG();
36}
Ross Zwisler5de490d2015-08-18 13:55:39 -060037
38static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
39 struct iov_iter *i)
40{
41 BUG();
42 return 0;
43}
44
45static inline void arch_clear_pmem(void __pmem *addr, size_t size)
46{
47 BUG();
48}
Ross Zwisler61031952015-06-25 03:08:39 -040049#endif
50
51/*
52 * Architectures that define ARCH_HAS_PMEM_API must provide
Ross Zwisler5de490d2015-08-18 13:55:39 -060053 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
54 * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
Ross Zwisler61031952015-06-25 03:08:39 -040055 */
56
57static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
58{
59 memcpy(dst, (void __force const *) src, size);
60}
61
Christoph Hellwig708ab622015-08-10 23:07:08 -040062static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
Ross Zwisler61031952015-06-25 03:08:39 -040063{
Christoph Hellwig708ab622015-08-10 23:07:08 -040064 devm_memunmap(dev, (void __force *) addr);
Ross Zwisler61031952015-06-25 03:08:39 -040065}
66
67/**
Ross Zwisler18279b42015-08-18 13:55:37 -060068 * arch_has_pmem_api - true if wmb_pmem() ensures durability
Ross Zwisler61031952015-06-25 03:08:39 -040069 *
70 * For a given cpu implementation within an architecture it is possible
71 * that wmb_pmem() resolves to a nop. In the case this returns
72 * false, pmem api users are unable to ensure durability and may want to
73 * fall back to a different data consistency model, or otherwise notify
74 * the user.
75 */
Ross Zwisler61031952015-06-25 03:08:39 -040076static inline bool arch_has_pmem_api(void)
77{
78 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
79}
80
81/*
82 * These defaults seek to offer decent performance and minimize the
83 * window between i/o completion and writes being durable on media.
84 * However, it is undefined / architecture specific whether
85 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
86 * making data durable relative to i/o completion.
87 */
Dan Williamse836a252015-08-12 18:42:56 -040088static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
Ross Zwisler61031952015-06-25 03:08:39 -040089 size_t size)
90{
91 memcpy((void __force *) dst, src, size);
92}
93
Ross Zwisler5de490d2015-08-18 13:55:39 -060094static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
95 size_t bytes, struct iov_iter *i)
96{
97 return copy_from_iter_nocache((void __force *)addr, bytes, i);
98}
99
100static inline void default_clear_pmem(void __pmem *addr, size_t size)
101{
102 if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
103 clear_page((void __force *)addr);
104 else
105 memset((void __force *)addr, 0, size);
106}
107
Ross Zwisler61031952015-06-25 03:08:39 -0400108/**
109 * memremap_pmem - map physical persistent memory for pmem api
110 * @offset: physical address of persistent memory
111 * @size: size of the mapping
112 *
113 * Establish a mapping of the architecture specific memory type expected
114 * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
115 * the case that an uncacheable or writethrough mapping is sufficient,
116 * or a writeback mapping provided memcpy_to_pmem() and
117 * wmb_pmem() arrange for the data to be written through the
118 * cache to persistent media.
119 */
Christoph Hellwig708ab622015-08-10 23:07:08 -0400120static inline void __pmem *memremap_pmem(struct device *dev,
121 resource_size_t offset, unsigned long size)
Ross Zwisler61031952015-06-25 03:08:39 -0400122{
Dan Williamse836a252015-08-12 18:42:56 -0400123#ifdef ARCH_MEMREMAP_PMEM
Christoph Hellwig708ab622015-08-10 23:07:08 -0400124 return (void __pmem *) devm_memremap(dev, offset, size,
125 ARCH_MEMREMAP_PMEM);
Dan Williamse836a252015-08-12 18:42:56 -0400126#else
Christoph Hellwig708ab622015-08-10 23:07:08 -0400127 return (void __pmem *) devm_memremap(dev, offset, size,
128 MEMREMAP_WT);
Dan Williamse836a252015-08-12 18:42:56 -0400129#endif
Ross Zwisler61031952015-06-25 03:08:39 -0400130}
131
132/**
133 * memcpy_to_pmem - copy data to persistent memory
134 * @dst: destination buffer for the copy
135 * @src: source buffer for the copy
136 * @n: length of the copy in bytes
137 *
138 * Perform a memory copy that results in the destination of the copy
139 * being effectively evicted from, or never written to, the processor
140 * cache hierarchy after the copy completes. After memcpy_to_pmem()
141 * data may still reside in cpu or platform buffers, so this operation
142 * must be followed by a wmb_pmem().
143 */
144static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
145{
146 if (arch_has_pmem_api())
147 arch_memcpy_to_pmem(dst, src, n);
148 else
149 default_memcpy_to_pmem(dst, src, n);
150}
151
152/**
153 * wmb_pmem - synchronize writes to persistent memory
154 *
155 * After a series of memcpy_to_pmem() operations this drains data from
156 * cpu write buffers and any platform (memory controller) buffers to
157 * ensure that written data is durable on persistent memory media.
158 */
159static inline void wmb_pmem(void)
160{
161 if (arch_has_pmem_api())
162 arch_wmb_pmem();
163}
Ross Zwisler5de490d2015-08-18 13:55:39 -0600164
165/**
166 * copy_from_iter_pmem - copy data from an iterator to PMEM
167 * @addr: PMEM destination address
168 * @bytes: number of bytes to copy
169 * @i: iterator with source data
170 *
171 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
172 * This function requires explicit ordering with a wmb_pmem() call.
173 */
174static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
175 struct iov_iter *i)
176{
177 if (arch_has_pmem_api())
178 return arch_copy_from_iter_pmem(addr, bytes, i);
179 return default_copy_from_iter_pmem(addr, bytes, i);
180}
181
182/**
183 * clear_pmem - zero a PMEM memory range
184 * @addr: virtual start address
185 * @size: number of bytes to zero
186 *
187 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
188 * This function requires explicit ordering with a wmb_pmem() call.
189 */
190static inline void clear_pmem(void __pmem *addr, size_t size)
191{
192 if (arch_has_pmem_api())
193 arch_clear_pmem(addr, size);
194 else
195 default_clear_pmem(addr, size);
196}
Ross Zwisler61031952015-06-25 03:08:39 -0400197#endif /* __PMEM_H__ */