blob: 5c3a5f3e7eec66e43d255e1423866cb6e44aad54 [file] [log] [blame]
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001#ifndef _LINUX_SWAPOPS_H
2#define _LINUX_SWAPOPS_H
3
4#include <linux/radix-tree.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05005#include <linux/bug.h>
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007/*
8 * swapcache pages are stored in the swapper_space radix tree. We want to
9 * get good packing density in that tree, so the index should be dense in
10 * the low-order bits.
11 *
Hugh Dickins9b15b812012-06-15 17:55:50 -070012 * We arrange the `type' and `offset' fields so that `type' is at the seven
Paolo 'Blaisorblade' Giarrussoe83a9592005-09-03 15:54:53 -070013 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
Hugh Dickins9b15b812012-06-15 17:55:50 -070014 * remaining bits. Although `type' itself needs only five bits, we allow for
15 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
17 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
18 */
Hugh Dickins9b15b812012-06-15 17:55:50 -070019#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
20 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
22
23/*
24 * Store a type+offset into a swp_entry_t in an arch-independent format
25 */
26static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
27{
28 swp_entry_t ret;
29
30 ret.val = (type << SWP_TYPE_SHIFT(ret)) |
31 (offset & SWP_OFFSET_MASK(ret));
32 return ret;
33}
34
35/*
36 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
37 * arch-independent format
38 */
39static inline unsigned swp_type(swp_entry_t entry)
40{
41 return (entry.val >> SWP_TYPE_SHIFT(entry));
42}
43
44/*
45 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
46 * arch-independent format
47 */
48static inline pgoff_t swp_offset(swp_entry_t entry)
49{
50 return entry.val & SWP_OFFSET_MASK(entry);
51}
52
Matt Mackall880cdf32008-02-09 00:10:12 -080053#ifdef CONFIG_MMU
Matt Mackall698dd4b2008-02-04 22:29:00 -080054/* check whether a pte points to a swap entry */
55static inline int is_swap_pte(pte_t pte)
56{
Mel Gorman21d9ee32015-02-12 14:58:32 -080057 return !pte_none(pte) && !pte_present(pte);
Matt Mackall698dd4b2008-02-04 22:29:00 -080058}
Matt Mackall880cdf32008-02-09 00:10:12 -080059#endif
Matt Mackall698dd4b2008-02-04 22:29:00 -080060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * Convert the arch-dependent pte representation of a swp_entry_t into an
63 * arch-independent swp_entry_t.
64 */
65static inline swp_entry_t pte_to_swp_entry(pte_t pte)
66{
67 swp_entry_t arch_entry;
68
Cyrill Gorcunov179ef712013-08-13 16:00:49 -070069 if (pte_swp_soft_dirty(pte))
70 pte = pte_swp_clear_soft_dirty(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 arch_entry = __pte_to_swp_entry(pte);
72 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
73}
74
75/*
76 * Convert the arch-independent representation of a swp_entry_t into the
77 * arch-dependent pte representation.
78 */
79static inline pte_t swp_entry_to_pte(swp_entry_t entry)
80{
81 swp_entry_t arch_entry;
82
83 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 return __swp_entry_to_pte(arch_entry);
85}
Christoph Lameter06972122006-06-23 02:03:35 -070086
Hugh Dickinsa2c16d62011-08-03 16:21:19 -070087static inline swp_entry_t radix_to_swp_entry(void *arg)
88{
89 swp_entry_t entry;
90
91 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
92 return entry;
93}
94
95static inline void *swp_to_radix_entry(swp_entry_t entry)
96{
97 unsigned long value;
98
99 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
100 return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
101}
102
Christoph Lameter06972122006-06-23 02:03:35 -0700103#ifdef CONFIG_MIGRATION
104static inline swp_entry_t make_migration_entry(struct page *page, int write)
105{
106 BUG_ON(!PageLocked(page));
107 return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
108 page_to_pfn(page));
109}
110
111static inline int is_migration_entry(swp_entry_t entry)
112{
113 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
114 swp_type(entry) == SWP_MIGRATION_WRITE);
115}
116
117static inline int is_write_migration_entry(swp_entry_t entry)
118{
119 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
120}
121
122static inline struct page *migration_entry_to_page(swp_entry_t entry)
123{
124 struct page *p = pfn_to_page(swp_offset(entry));
125 /*
126 * Any use of migration entries may only occur while the
127 * corresponding page is locked
128 */
129 BUG_ON(!PageLocked(p));
130 return p;
131}
132
133static inline void make_migration_entry_read(swp_entry_t *entry)
134{
135 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
136}
137
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800138extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
139 spinlock_t *ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700140extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
141 unsigned long address);
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800142extern void migration_entry_wait_huge(struct vm_area_struct *vma,
143 struct mm_struct *mm, pte_t *pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700144#else
145
146#define make_migration_entry(page, write) swp_entry(0, 0)
Andrew Morton5ec553a2007-02-20 13:57:50 -0800147static inline int is_migration_entry(swp_entry_t swp)
148{
149 return 0;
150}
Christoph Lameter06972122006-06-23 02:03:35 -0700151#define migration_entry_to_page(swp) NULL
152static inline void make_migration_entry_read(swp_entry_t *entryp) { }
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800153static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
154 spinlock_t *ptl) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700155static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
156 unsigned long address) { }
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800157static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
158 struct mm_struct *mm, pte_t *pte) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700159static inline int is_write_migration_entry(swp_entry_t entry)
160{
161 return 0;
162}
163
164#endif
165
Andi Kleena7420aa2009-09-16 11:50:05 +0200166#ifdef CONFIG_MEMORY_FAILURE
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700167
168extern atomic_long_t num_poisoned_pages __read_mostly;
169
Andi Kleena7420aa2009-09-16 11:50:05 +0200170/*
171 * Support for hardware poisoned pages
172 */
173static inline swp_entry_t make_hwpoison_entry(struct page *page)
174{
175 BUG_ON(!PageLocked(page));
176 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
177}
178
179static inline int is_hwpoison_entry(swp_entry_t entry)
180{
181 return swp_type(entry) == SWP_HWPOISON;
182}
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700183
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700184static inline bool test_set_page_hwpoison(struct page *page)
185{
186 return TestSetPageHWPoison(page);
187}
188
Naoya Horiguchi8e304562015-09-08 15:03:24 -0700189static inline void num_poisoned_pages_inc(void)
190{
191 atomic_long_inc(&num_poisoned_pages);
192}
193
194static inline void num_poisoned_pages_dec(void)
195{
196 atomic_long_dec(&num_poisoned_pages);
197}
198
199static inline void num_poisoned_pages_add(long num)
200{
201 atomic_long_add(num, &num_poisoned_pages);
202}
203
204static inline void num_poisoned_pages_sub(long num)
205{
206 atomic_long_sub(num, &num_poisoned_pages);
207}
Andi Kleena7420aa2009-09-16 11:50:05 +0200208#else
209
210static inline swp_entry_t make_hwpoison_entry(struct page *page)
211{
212 return swp_entry(0, 0);
213}
214
215static inline int is_hwpoison_entry(swp_entry_t swp)
216{
217 return 0;
218}
Wanpeng Lida1b13c2015-09-08 15:03:27 -0700219
220static inline bool test_set_page_hwpoison(struct page *page)
221{
222 return false;
223}
224
225static inline void num_poisoned_pages_inc(void)
226{
227}
Andi Kleena7420aa2009-09-16 11:50:05 +0200228#endif
229
230#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
231static inline int non_swap_entry(swp_entry_t entry)
232{
233 return swp_type(entry) >= MAX_SWAPFILES;
234}
235#else
236static inline int non_swap_entry(swp_entry_t entry)
237{
238 return 0;
239}
240#endif
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700241
242#endif /* _LINUX_SWAPOPS_H */