blob: 22af9d8a84ae2fe6f11402250b6e6a9367d867d0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07002#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Souptick Joarder2b740302018-08-23 17:01:36 -07007#include <linux/mm_types.h>
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009/*
10 * swapcache pages are stored in the swapper_space radix tree. We want to
11 * get good packing density in that tree, so the index should be dense in
12 * the low-order bits.
13 *
Hugh Dickins9b15b812012-06-15 17:55:50 -070014 * We arrange the `type' and `offset' fields so that `type' is at the seven
Paolo 'Blaisorblade' Giarrussoe83a9592005-09-03 15:54:53 -070015 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
Hugh Dickins9b15b812012-06-15 17:55:50 -070016 * remaining bits. Although `type' itself needs only five bits, we allow for
17 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
19 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
20 */
Hugh Dickins9b15b812012-06-15 17:55:50 -070021#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
22 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
24
25/*
26 * Store a type+offset into a swp_entry_t in an arch-independent format
27 */
28static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
29{
30 swp_entry_t ret;
31
32 ret.val = (type << SWP_TYPE_SHIFT(ret)) |
33 (offset & SWP_OFFSET_MASK(ret));
34 return ret;
35}
36
37/*
38 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
39 * arch-independent format
40 */
41static inline unsigned swp_type(swp_entry_t entry)
42{
43 return (entry.val >> SWP_TYPE_SHIFT(entry));
44}
45
46/*
47 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
48 * arch-independent format
49 */
50static inline pgoff_t swp_offset(swp_entry_t entry)
51{
52 return entry.val & SWP_OFFSET_MASK(entry);
53}
54
Matt Mackall880cdf32008-02-09 00:10:12 -080055#ifdef CONFIG_MMU
Matt Mackall698dd4b2008-02-04 22:29:00 -080056/* check whether a pte points to a swap entry */
57static inline int is_swap_pte(pte_t pte)
58{
Mel Gorman21d9ee32015-02-12 14:58:32 -080059 return !pte_none(pte) && !pte_present(pte);
Matt Mackall698dd4b2008-02-04 22:29:00 -080060}
Matt Mackall880cdf32008-02-09 00:10:12 -080061#endif
Matt Mackall698dd4b2008-02-04 22:29:00 -080062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
64 * Convert the arch-dependent pte representation of a swp_entry_t into an
65 * arch-independent swp_entry_t.
66 */
67static inline swp_entry_t pte_to_swp_entry(pte_t pte)
68{
69 swp_entry_t arch_entry;
70
Cyrill Gorcunov179ef712013-08-13 16:00:49 -070071 if (pte_swp_soft_dirty(pte))
72 pte = pte_swp_clear_soft_dirty(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 arch_entry = __pte_to_swp_entry(pte);
74 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
75}
76
77/*
78 * Convert the arch-independent representation of a swp_entry_t into the
79 * arch-dependent pte representation.
80 */
81static inline pte_t swp_entry_to_pte(swp_entry_t entry)
82{
83 swp_entry_t arch_entry;
84
85 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 return __swp_entry_to_pte(arch_entry);
87}
Christoph Lameter06972122006-06-23 02:03:35 -070088
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070089static inline swp_entry_t radix_to_swp_entry(void *arg)
90{
91 swp_entry_t entry;
92
93 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
94 return entry;
95}
96
97static inline void *swp_to_radix_entry(swp_entry_t entry)
98{
99 unsigned long value;
100
101 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
102 return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
103}
104
Jérôme Glisse5042db42017-09-08 16:11:43 -0700105#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
106static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
107{
108 return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
109 page_to_pfn(page));
110}
111
112static inline bool is_device_private_entry(swp_entry_t entry)
113{
114 int type = swp_type(entry);
115 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
116}
117
118static inline void make_device_private_entry_read(swp_entry_t *entry)
119{
120 *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
121}
122
123static inline bool is_write_device_private_entry(swp_entry_t entry)
124{
125 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
126}
127
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300128static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
129{
130 return swp_offset(entry);
131}
132
Jérôme Glisse5042db42017-09-08 16:11:43 -0700133static inline struct page *device_private_entry_to_page(swp_entry_t entry)
134{
135 return pfn_to_page(swp_offset(entry));
136}
137
Souptick Joarder2b740302018-08-23 17:01:36 -0700138vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
Jérôme Glisse5042db42017-09-08 16:11:43 -0700139 unsigned long addr,
140 swp_entry_t entry,
141 unsigned int flags,
142 pmd_t *pmdp);
143#else /* CONFIG_DEVICE_PRIVATE */
144static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
145{
146 return swp_entry(0, 0);
147}
148
149static inline void make_device_private_entry_read(swp_entry_t *entry)
150{
151}
152
153static inline bool is_device_private_entry(swp_entry_t entry)
154{
155 return false;
156}
157
158static inline bool is_write_device_private_entry(swp_entry_t entry)
159{
160 return false;
161}
162
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300163static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
164{
165 return 0;
166}
167
Jérôme Glisse5042db42017-09-08 16:11:43 -0700168static inline struct page *device_private_entry_to_page(swp_entry_t entry)
169{
170 return NULL;
171}
172
Souptick Joarder2b740302018-08-23 17:01:36 -0700173static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
Jérôme Glisse5042db42017-09-08 16:11:43 -0700174 unsigned long addr,
175 swp_entry_t entry,
176 unsigned int flags,
177 pmd_t *pmdp)
178{
179 return VM_FAULT_SIGBUS;
180}
181#endif /* CONFIG_DEVICE_PRIVATE */
182
Christoph Lameter06972122006-06-23 02:03:35 -0700183#ifdef CONFIG_MIGRATION
184static inline swp_entry_t make_migration_entry(struct page *page, int write)
185{
Zi Yan616b8372017-09-08 16:10:57 -0700186 BUG_ON(!PageLocked(compound_head(page)));
187
Christoph Lameter06972122006-06-23 02:03:35 -0700188 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
189 page_to_pfn(page));
190}
191
192static inline int is_migration_entry(swp_entry_t entry)
193{
194 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
195 swp_type(entry) == SWP_MIGRATION_WRITE);
196}
197
198static inline int is_write_migration_entry(swp_entry_t entry)
199{
200 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
201}
202
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300203static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
204{
205 return swp_offset(entry);
206}
207
Christoph Lameter06972122006-06-23 02:03:35 -0700208static inline struct page *migration_entry_to_page(swp_entry_t entry)
209{
210 struct page *p = pfn_to_page(swp_offset(entry));
211 /*
212 * Any use of migration entries may only occur while the
213 * corresponding page is locked
214 */
Zi Yan616b8372017-09-08 16:10:57 -0700215 BUG_ON(!PageLocked(compound_head(p)));
Christoph Lameter06972122006-06-23 02:03:35 -0700216 return p;
217}
218
219static inline void make_migration_entry_read(swp_entry_t *entry)
220{
221 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
222}
223
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800224extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
225 spinlock_t *ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700226extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
227 unsigned long address);
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800228extern void migration_entry_wait_huge(struct vm_area_struct *vma,
229 struct mm_struct *mm, pte_t *pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700230#else
231
232#define make_migration_entry(page, write) swp_entry(0, 0)
Andrew Morton5ec553a2007-02-20 13:57:50 -0800233static inline int is_migration_entry(swp_entry_t swp)
234{
235 return 0;
236}
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300237
238static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
239{
240 return 0;
241}
242
Zi Yan616b8372017-09-08 16:10:57 -0700243static inline struct page *migration_entry_to_page(swp_entry_t entry)
244{
245 return NULL;
246}
247
Christoph Lameter06972122006-06-23 02:03:35 -0700248static inline void make_migration_entry_read(swp_entry_t *entryp) { }
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800249static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
250 spinlock_t *ptl) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700251static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
252 unsigned long address) { }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800253static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
254 struct mm_struct *mm, pte_t *pte) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700255static inline int is_write_migration_entry(swp_entry_t entry)
256{
257 return 0;
258}
259
260#endif
261
Zi Yan616b8372017-09-08 16:10:57 -0700262struct page_vma_mapped_walk;
263
264#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
265extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
266 struct page *page);
267
268extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
269 struct page *new);
270
271extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
272
273static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
274{
275 swp_entry_t arch_entry;
276
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700277 if (pmd_swp_soft_dirty(pmd))
278 pmd = pmd_swp_clear_soft_dirty(pmd);
Zi Yan616b8372017-09-08 16:10:57 -0700279 arch_entry = __pmd_to_swp_entry(pmd);
280 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
281}
282
283static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
284{
285 swp_entry_t arch_entry;
286
287 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
288 return __swp_entry_to_pmd(arch_entry);
289}
290
291static inline int is_pmd_migration_entry(pmd_t pmd)
292{
293 return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
294}
295#else
296static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
297 struct page *page)
298{
299 BUILD_BUG();
300}
301
302static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
303 struct page *new)
304{
305 BUILD_BUG();
306}
307
308static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
309
310static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
311{
312 return swp_entry(0, 0);
313}
314
315static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
316{
317 return __pmd(0);
318}
319
320static inline int is_pmd_migration_entry(pmd_t pmd)
321{
322 return 0;
323}
324#endif
325
Andi Kleena7420aa2009-09-16 11:50:05 +0200326#ifdef CONFIG_MEMORY_FAILURE
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700327
328extern atomic_long_t num_poisoned_pages __read_mostly;
329
Andi Kleena7420aa2009-09-16 11:50:05 +0200330/*
331 * Support for hardware poisoned pages
332 */
333static inline swp_entry_t make_hwpoison_entry(struct page *page)
334{
335 BUG_ON(!PageLocked(page));
336 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
337}
338
339static inline int is_hwpoison_entry(swp_entry_t entry)
340{
341 return swp_type(entry) == SWP_HWPOISON;
342}
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700343
344static inline void num_poisoned_pages_inc(void)
345{
346 atomic_long_inc(&num_poisoned_pages);
347}
348
349static inline void num_poisoned_pages_dec(void)
350{
351 atomic_long_dec(&num_poisoned_pages);
352}
353
Andi Kleena7420aa2009-09-16 11:50:05 +0200354#else
355
356static inline swp_entry_t make_hwpoison_entry(struct page *page)
357{
358 return swp_entry(0, 0);
359}
360
361static inline int is_hwpoison_entry(swp_entry_t swp)
362{
363 return 0;
364}
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700365
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700366static inline void num_poisoned_pages_inc(void)
367{
368}
Andi Kleena7420aa2009-09-16 11:50:05 +0200369#endif
370
371#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
372static inline int non_swap_entry(swp_entry_t entry)
373{
374 return swp_type(entry) >= MAX_SWAPFILES;
375}
376#else
377static inline int non_swap_entry(swp_entry_t entry)
378{
379 return 0;
380}
381#endif
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700382
383#endif /* _LINUX_SWAPOPS_H */