blob: 09731f4174c7ed229a7f09e12c41bb13d5bc5523 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/kernel_stat.h>
13#include <linux/swap.h>
Hugh Dickins46017e92008-02-04 22:28:41 -080014#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/backing-dev.h>
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -070018#include <linux/blkdev.h>
Hugh Dickinsc484d412006-01-06 00:10:55 -080019#include <linux/pagevec.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/migrate.h>
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080021#include <linux/vmalloc.h>
Tim Chen67afa382017-02-22 15:45:39 -080022#include <linux/swap_slots.h>
Huang Ying38d8b4e2017-07-06 15:37:18 -070023#include <linux/huge_mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/pgtable.h>
Hugh Dickinsfa110882020-06-25 20:29:59 -070026#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/*
29 * swapper_space is a fiction, retained to simplify the path through
Jens Axboe7eaceac2011-03-10 08:52:07 +010030 * vmscan's shrink_page_list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070032static const struct address_space_operations swap_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 .writepage = swap_writepage,
Mel Gorman62c230b2012-07-31 16:44:55 -070034 .set_page_dirty = swap_set_page_dirty,
Andrew Morton1c939232014-10-09 15:27:59 -070035#ifdef CONFIG_MIGRATION
Christoph Lametere965f962006-02-01 03:05:41 -080036 .migratepage = migrate_page,
Andrew Morton1c939232014-10-09 15:27:59 -070037#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070038};
39
Changbin Du783cb682017-11-15 17:36:06 -080040struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
Colin Ian Kingf5c754d2018-04-05 16:25:05 -070042static bool enable_vma_readahead __read_mostly = true;
Huang Yingec560172017-09-06 16:24:36 -070043
Huang Yingec560172017-09-06 16:24:36 -070044#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
45#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
47#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
50#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52
53#define SWAP_RA_VAL(addr, win, hits) \
54 (((addr) & PAGE_MASK) | \
55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
56 ((hits) & SWAP_RA_HITS_MASK))
57
58/* Initial readahead hits is 4 to start up with a small window */
59#define GET_SWAP_RA_VAL(vma) \
60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
Huang Ying38d8b4e2017-07-06 15:37:18 -070063#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65static struct {
66 unsigned long add_total;
67 unsigned long del_total;
68 unsigned long find_success;
69 unsigned long find_total;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070} swap_cache_info;
71
Shaohua Li33806f02013-02-22 16:34:37 -080072unsigned long total_swapcache_pages(void)
73{
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080074 unsigned int i, j, nr;
Shaohua Li33806f02013-02-22 16:34:37 -080075 unsigned long ret = 0;
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080076 struct address_space *spaces;
Shaohua Li33806f02013-02-22 16:34:37 -080077
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080078 rcu_read_lock();
79 for (i = 0; i < MAX_SWAPFILES; i++) {
80 /*
81 * The corresponding entries in nr_swapper_spaces and
82 * swapper_spaces will be reused only after at least
83 * one grace period. So it is impossible for them
84 * belongs to different usage.
85 */
86 nr = nr_swapper_spaces[i];
87 spaces = rcu_dereference(swapper_spaces[i]);
88 if (!nr || !spaces)
89 continue;
90 for (j = 0; j < nr; j++)
91 ret += spaces[j].nrpages;
92 }
93 rcu_read_unlock();
Shaohua Li33806f02013-02-22 16:34:37 -080094 return ret;
95}
96
Shaohua Li579f8292014-02-06 12:04:21 -080097static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099void show_swap_cache_info(void)
100{
Shaohua Li33806f02013-02-22 16:34:37 -0800101 printk("%lu pages in swap cache\n", total_swapcache_pages());
Johannes Weiner2c97b7f2008-07-25 19:46:01 -0700102 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 swap_cache_info.add_total, swap_cache_info.del_total,
Hugh Dickinsbb63be02008-02-04 22:28:49 -0800104 swap_cache_info.find_success, swap_cache_info.find_total);
Shaohua Liec8acf22013-02-22 16:34:38 -0800105 printk("Free swap = %ldkB\n",
106 get_nr_swap_pages() << (PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
108}
109
110/*
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700111 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * but sets SwapCache flag and private instead of mapping and index.
113 */
Seth Jennings2f772e62013-04-29 15:08:34 -0700114int __add_to_swap_cache(struct page *page, swp_entry_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
Huang Ying38d8b4e2017-07-06 15:37:18 -0700116 int error, i, nr = hpage_nr_pages(page);
Shaohua Li33806f02013-02-22 16:34:37 -0800117 struct address_space *address_space;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700118 pgoff_t idx = swp_offset(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Sasha Levin309381fea2014-01-23 15:52:54 -0800120 VM_BUG_ON_PAGE(!PageLocked(page), page);
121 VM_BUG_ON_PAGE(PageSwapCache(page), page);
122 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
Hugh Dickins51726b12009-01-06 14:39:25 -0800123
Huang Ying38d8b4e2017-07-06 15:37:18 -0700124 page_ref_add(page, nr);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700125 SetPageSwapCache(page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700126
Shaohua Li33806f02013-02-22 16:34:37 -0800127 address_space = swap_address_space(entry);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700128 xa_lock_irq(&address_space->i_pages);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700129 for (i = 0; i < nr; i++) {
130 set_page_private(page + i, entry.val + i);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700131 error = radix_tree_insert(&address_space->i_pages,
Huang Ying38d8b4e2017-07-06 15:37:18 -0700132 idx + i, page + i);
133 if (unlikely(error))
134 break;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700135 }
Huang Ying38d8b4e2017-07-06 15:37:18 -0700136 if (likely(!error)) {
137 address_space->nrpages += nr;
138 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
139 ADD_CACHE_INFO(add_total, nr);
140 } else {
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700141 /*
142 * Only the context which have set SWAP_HAS_CACHE flag
143 * would call add_to_swap_cache().
144 * So add_to_swap_cache() doesn't returns -EEXIST.
145 */
146 VM_BUG_ON(error == -EEXIST);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700147 set_page_private(page + i, 0UL);
148 while (i--) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700149 radix_tree_delete(&address_space->i_pages, idx + i);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700150 set_page_private(page + i, 0UL);
151 }
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700152 ClearPageSwapCache(page);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700153 page_ref_sub(page, nr);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700154 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700155 xa_unlock_irq(&address_space->i_pages);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700156
157 return error;
158}
159
160
161int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
162{
163 int error;
164
Huang Ying38d8b4e2017-07-06 15:37:18 -0700165 error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 if (!error) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700167 error = __add_to_swap_cache(page, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 radix_tree_preload_end();
Hugh Dickinsfa1de902008-02-07 00:14:13 -0800169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 return error;
171}
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173/*
174 * This must be called only on pages that have
175 * been verified to be in the swap cache.
176 */
177void __delete_from_swap_cache(struct page *page)
178{
Shaohua Li33806f02013-02-22 16:34:37 -0800179 struct address_space *address_space;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700180 int i, nr = hpage_nr_pages(page);
181 swp_entry_t entry;
182 pgoff_t idx;
Shaohua Li33806f02013-02-22 16:34:37 -0800183
Sasha Levin309381fea2014-01-23 15:52:54 -0800184 VM_BUG_ON_PAGE(!PageLocked(page), page);
185 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
186 VM_BUG_ON_PAGE(PageWriteback(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Shaohua Li33806f02013-02-22 16:34:37 -0800188 entry.val = page_private(page);
189 address_space = swap_address_space(entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700190 idx = swp_offset(entry);
191 for (i = 0; i < nr; i++) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700192 radix_tree_delete(&address_space->i_pages, idx + i);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700193 set_page_private(page + i, 0);
194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 ClearPageSwapCache(page);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700196 address_space->nrpages -= nr;
197 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
198 ADD_CACHE_INFO(del_total, nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199}
200
201/**
202 * add_to_swap - allocate swap space for a page
203 * @page: page we want to move to swap
204 *
205 * Allocate swap space for the page and add the page to the
206 * swap cache. Caller needs to hold the page lock.
207 */
Minchan Kim0f074652017-07-06 15:37:24 -0700208int add_to_swap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 int err;
212
Sasha Levin309381fea2014-01-23 15:52:54 -0800213 VM_BUG_ON_PAGE(!PageLocked(page), page);
214 VM_BUG_ON_PAGE(!PageUptodate(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Huang Ying38d8b4e2017-07-06 15:37:18 -0700216 entry = get_swap_page(page);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700217 if (!entry.val)
Minchan Kim0f074652017-07-06 15:37:24 -0700218 return 0;
219
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700220 /*
221 * Radix-tree node allocations from PF_MEMALLOC contexts could
222 * completely exhaust the page allocator. __GFP_NOMEMALLOC
223 * stops emergency reserves from being allocated.
224 *
225 * TODO: this could cause a theoretical memory reclaim
226 * deadlock in the swap out path.
227 */
228 /*
Minchan Kim854e9ed2016-01-15 16:54:53 -0800229 * Add it to the swap cache.
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700230 */
231 err = add_to_swap_cache(page, entry,
232 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700233 /* -ENOMEM radix-tree allocation failure */
234 if (err)
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700235 /*
236 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
237 * clear SWAP_HAS_CACHE flag.
238 */
Minchan Kim0f074652017-07-06 15:37:24 -0700239 goto fail;
Shaohua Li96254562017-10-03 16:15:32 -0700240 /*
241 * Normally the page will be dirtied in unmap because its pte should be
242 * dirty. A special case is MADV_FREE page. The page'e pte could have
243 * dirty bit cleared but the page's SwapBacked bit is still set because
244 * clearing the dirty bit and SwapBacked bit has no lock protected. For
245 * such page, unmap will not set dirty bit for it, so page reclaim will
246 * not write the page out. This can cause data corruption when the page
247 * is swap in later. Always setting the dirty bit for the page solves
248 * the problem.
249 */
250 set_page_dirty(page);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700251
252 return 1;
253
Huang Ying38d8b4e2017-07-06 15:37:18 -0700254fail:
Minchan Kim0f074652017-07-06 15:37:24 -0700255 put_swap_page(page, entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700256 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
259/*
260 * This must be called only on pages that have
261 * been verified to be in the swap cache and locked.
262 * It will never put the page into the free list,
263 * the caller has a reference on the page.
264 */
265void delete_from_swap_cache(struct page *page)
266{
267 swp_entry_t entry;
Shaohua Li33806f02013-02-22 16:34:37 -0800268 struct address_space *address_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700270 entry.val = page_private(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Shaohua Li33806f02013-02-22 16:34:37 -0800272 address_space = swap_address_space(entry);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700273 xa_lock_irq(&address_space->i_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 __delete_from_swap_cache(page);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700275 xa_unlock_irq(&address_space->i_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Minchan Kim75f6d6d2017-07-06 15:37:21 -0700277 put_swap_page(page, entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700278 page_ref_sub(page, hpage_nr_pages(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281/*
282 * If we are the only user, then try to free up the swap cache.
283 *
284 * Its ok to check for PageSwapCache without the page lock
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800285 * here because we are going to recheck again inside
286 * try_to_free_swap() _with_ the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 * - Marcelo
288 */
289static inline void free_swap_cache(struct page *page)
290{
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800291 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
292 try_to_free_swap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 unlock_page(page);
294 }
295}
296
297/*
298 * Perform a free_page(), also freeing any swap cache associated with
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700299 * this page if it is the last user of the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 */
301void free_page_and_swap_cache(struct page *page)
302{
303 free_swap_cache(page);
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700304 if (!is_huge_zero_page(page))
Gerald Schaefer770a5372016-06-08 15:33:50 -0700305 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
308/*
309 * Passed an array of pages, drop them all from swapcache and then release
310 * them. They are removed from the LRU and freed if this is their last use.
311 */
312void free_pages_and_swap_cache(struct page **pages, int nr)
313{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 struct page **pagep = pages;
Michal Hockoaabfb572014-10-09 15:28:52 -0700315 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317 lru_add_drain();
Michal Hockoaabfb572014-10-09 15:28:52 -0700318 for (i = 0; i < nr; i++)
319 free_swap_cache(pagep[i]);
Mel Gormanc6f92f92017-11-15 17:37:55 -0800320 release_pages(pagep, nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700323static inline bool swap_use_vma_readahead(void)
324{
325 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
326}
327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328/*
329 * Lookup a swap entry in the swap cache. A found page will be returned
330 * unlocked and with its refcount incremented - we rely on the kernel
331 * lock getting page table operations atomic even if we drop the page
332 * lock before returning.
333 */
Huang Yingec560172017-09-06 16:24:36 -0700334struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
335 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 struct page *page;
338
Huang Yingf6ab1f72016-10-07 17:00:21 -0700339 page = find_get_page(swap_address_space(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Huang Yingec560172017-09-06 16:24:36 -0700341 INC_CACHE_INFO(find_total);
342 if (page) {
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700343 bool vma_ra = swap_use_vma_readahead();
344 bool readahead;
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 INC_CACHE_INFO(find_success);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700347 /*
348 * At the moment, we don't support PG_readahead for anon THP
349 * so let's bail out rather than confusing the readahead stat.
350 */
Huang Yingec560172017-09-06 16:24:36 -0700351 if (unlikely(PageTransCompound(page)))
352 return page;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700353
Huang Yingec560172017-09-06 16:24:36 -0700354 readahead = TestClearPageReadahead(page);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700355 if (vma && vma_ra) {
356 unsigned long ra_val;
357 int win, hits;
358
359 ra_val = GET_SWAP_RA_VAL(vma);
360 win = SWAP_RA_WIN(ra_val);
361 hits = SWAP_RA_HITS(ra_val);
Huang Yingec560172017-09-06 16:24:36 -0700362 if (readahead)
363 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
364 atomic_long_set(&vma->swap_readahead_info,
365 SWAP_RA_VAL(addr, win, hits));
366 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700367
Huang Yingec560172017-09-06 16:24:36 -0700368 if (readahead) {
Huang Yingcbc65df2017-09-06 16:24:29 -0700369 count_vm_event(SWAP_RA_HIT);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700370 if (!vma || !vma_ra)
Huang Yingec560172017-09-06 16:24:36 -0700371 atomic_inc(&swapin_readahead_hits);
Huang Yingcbc65df2017-09-06 16:24:29 -0700372 }
Shaohua Li579f8292014-02-06 12:04:21 -0800373 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return page;
376}
377
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700378struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
379 struct vm_area_struct *vma, unsigned long addr,
380 bool *new_page_allocated)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
382 struct page *found_page, *new_page = NULL;
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700383 struct address_space *swapper_space = swap_address_space(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 int err;
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700385 *new_page_allocated = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
387 do {
388 /*
389 * First check the swap cache. Since this is normally
390 * called after lookup_swap_cache() failed, re-calling
391 * that would confuse statistics.
392 */
Huang Yingf6ab1f72016-10-07 17:00:21 -0700393 found_page = find_get_page(swapper_space, swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 if (found_page)
395 break;
396
Huang Yingba81f832017-02-22 15:45:46 -0800397 /*
398 * Just skip read ahead for unused swap slot.
399 * During swap_off when swap_slot_cache is disabled,
400 * we have to handle the race between putting
401 * swap entry in swap cache and marking swap slot
402 * as SWAP_HAS_CACHE. That's done in later part of code or
403 * else swap_off will be aborted if we return NULL.
404 */
405 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
406 break;
Tim Chene8c26ab2017-02-22 15:45:29 -0800407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 /*
409 * Get a new page to read into from swap.
410 */
411 if (!new_page) {
Hugh Dickins02098fe2008-02-04 22:28:42 -0800412 new_page = alloc_page_vma(gfp_mask, vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 if (!new_page)
414 break; /* Out of memory */
415 }
416
417 /*
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700418 * call radix_tree_preload() while we can wait.
419 */
Hugh Dickinsfa110882020-06-25 20:29:59 -0700420 err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700421 if (err)
422 break;
423
424 /*
Hugh Dickinsf0009442008-02-04 22:28:49 -0800425 * Swap entry may have been freed since our caller observed it.
426 */
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700427 err = swapcache_prepare(entry);
Rafael Aquinicbab0e42013-06-12 14:04:49 -0700428 if (err == -EEXIST) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700429 radix_tree_preload_end();
Rafael Aquinicbab0e42013-06-12 14:04:49 -0700430 /*
431 * We might race against get_swap_page() and stumble
432 * across a SWAP_HAS_CACHE swap_map entry whose page
Huang Ying9c1cc2e2017-05-03 14:54:33 -0700433 * has not been brought into the swapcache yet.
Rafael Aquinicbab0e42013-06-12 14:04:49 -0700434 */
435 cond_resched();
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700436 continue;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700437 }
438 if (err) { /* swp entry is obsolete ? */
439 radix_tree_preload_end();
Hugh Dickinsf0009442008-02-04 22:28:49 -0800440 break;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700441 }
Hugh Dickinsf0009442008-02-04 22:28:49 -0800442
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700443 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800444 __SetPageLocked(new_page);
Hugh Dickinsfa9949d2016-05-19 17:12:41 -0700445 __SetPageSwapBacked(new_page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700446 err = __add_to_swap_cache(new_page, entry);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200447 if (likely(!err)) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700448 radix_tree_preload_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 /*
450 * Initiate read into locked page and return.
451 */
Rik van Rielc5fdae42008-10-18 20:26:36 -0700452 lru_cache_add_anon(new_page);
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700453 *new_page_allocated = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 return new_page;
455 }
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700456 radix_tree_preload_end();
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800457 __ClearPageLocked(new_page);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700458 /*
459 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
460 * clear SWAP_HAS_CACHE flag.
461 */
Minchan Kim75f6d6d2017-07-06 15:37:21 -0700462 put_swap_page(new_page, entry);
Hugh Dickinsf0009442008-02-04 22:28:49 -0800463 } while (err != -ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 if (new_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300466 put_page(new_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 return found_page;
468}
Hugh Dickins46017e92008-02-04 22:28:41 -0800469
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700470/*
471 * Locate a page of swap in physical memory, reserving swap cache space
472 * and reading the disk if it is not already cached.
473 * A failure return means that either the page allocation failed or that
474 * the swap entry is no longer in use.
475 */
476struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
Shaohua Li23955622017-07-10 15:47:11 -0700477 struct vm_area_struct *vma, unsigned long addr, bool do_poll)
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700478{
479 bool page_was_allocated;
480 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
481 vma, addr, &page_was_allocated);
482
483 if (page_was_allocated)
Shaohua Li23955622017-07-10 15:47:11 -0700484 swap_readpage(retpage, do_poll);
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700485
486 return retpage;
487}
488
Huang Yingec560172017-09-06 16:24:36 -0700489static unsigned int __swapin_nr_pages(unsigned long prev_offset,
490 unsigned long offset,
491 int hits,
492 int max_pages,
493 int prev_win)
Shaohua Li579f8292014-02-06 12:04:21 -0800494{
Huang Yingec560172017-09-06 16:24:36 -0700495 unsigned int pages, last_ra;
Shaohua Li579f8292014-02-06 12:04:21 -0800496
497 /*
498 * This heuristic has been found to work well on both sequential and
499 * random loads, swapping to hard disk or to SSD: please don't ask
500 * what the "+ 2" means, it just happens to work well, that's all.
501 */
Huang Yingec560172017-09-06 16:24:36 -0700502 pages = hits + 2;
Shaohua Li579f8292014-02-06 12:04:21 -0800503 if (pages == 2) {
504 /*
505 * We can have no readahead hits to judge by: but must not get
506 * stuck here forever, so check for an adjacent offset instead
507 * (and don't even bother to check whether swap type is same).
508 */
509 if (offset != prev_offset + 1 && offset != prev_offset - 1)
510 pages = 1;
Shaohua Li579f8292014-02-06 12:04:21 -0800511 } else {
512 unsigned int roundup = 4;
513 while (roundup < pages)
514 roundup <<= 1;
515 pages = roundup;
516 }
517
518 if (pages > max_pages)
519 pages = max_pages;
520
521 /* Don't shrink readahead too fast */
Huang Yingec560172017-09-06 16:24:36 -0700522 last_ra = prev_win / 2;
Shaohua Li579f8292014-02-06 12:04:21 -0800523 if (pages < last_ra)
524 pages = last_ra;
Huang Yingec560172017-09-06 16:24:36 -0700525
526 return pages;
527}
528
529static unsigned long swapin_nr_pages(unsigned long offset)
530{
531 static unsigned long prev_offset;
532 unsigned int hits, pages, max_pages;
533 static atomic_t last_readahead_pages;
534
535 max_pages = 1 << READ_ONCE(page_cluster);
536 if (max_pages <= 1)
537 return 1;
538
539 hits = atomic_xchg(&swapin_readahead_hits, 0);
540 pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
541 atomic_read(&last_readahead_pages));
542 if (!hits)
543 prev_offset = offset;
Shaohua Li579f8292014-02-06 12:04:21 -0800544 atomic_set(&last_readahead_pages, pages);
545
546 return pages;
547}
548
Hugh Dickins46017e92008-02-04 22:28:41 -0800549/**
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700550 * swap_cluster_readahead - swap in pages in hope we need them soon
Hugh Dickins46017e92008-02-04 22:28:41 -0800551 * @entry: swap entry of this memory
Randy Dunlap76824862008-03-19 17:00:40 -0700552 * @gfp_mask: memory allocation flags
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700553 * @vmf: fault information
Hugh Dickins46017e92008-02-04 22:28:41 -0800554 *
555 * Returns the struct page for entry and addr, after queueing swapin.
556 *
557 * Primitive swap readahead code. We simply read an aligned block of
558 * (1 << page_cluster) entries in the swap area. This method is chosen
559 * because it doesn't cost us any seek time. We also make sure to queue
560 * the 'original' request together with the readahead ones...
561 *
562 * This has been extended to use the NUMA policies from the mm triggering
563 * the readahead.
564 *
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700565 * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
Hugh Dickins46017e92008-02-04 22:28:41 -0800566 */
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700567struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
568 struct vm_fault *vmf)
Hugh Dickins46017e92008-02-04 22:28:41 -0800569{
Hugh Dickins46017e92008-02-04 22:28:41 -0800570 struct page *page;
Shaohua Li579f8292014-02-06 12:04:21 -0800571 unsigned long entry_offset = swp_offset(entry);
572 unsigned long offset = entry_offset;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700573 unsigned long start_offset, end_offset;
Shaohua Li579f8292014-02-06 12:04:21 -0800574 unsigned long mask;
Huang Yinge9a6eff2017-11-15 17:33:15 -0800575 struct swap_info_struct *si = swp_swap_info(entry);
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700576 struct blk_plug plug;
Huang Yingc4fa6302017-09-06 16:24:33 -0700577 bool do_poll = true, page_allocated;
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700578 struct vm_area_struct *vma = vmf->vma;
579 unsigned long addr = vmf->address;
Hugh Dickins46017e92008-02-04 22:28:41 -0800580
Shaohua Li579f8292014-02-06 12:04:21 -0800581 mask = swapin_nr_pages(offset) - 1;
582 if (!mask)
583 goto skip;
584
Shaohua Li23955622017-07-10 15:47:11 -0700585 do_poll = false;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700586 /* Read a page_cluster sized and aligned cluster around offset. */
587 start_offset = offset & ~mask;
588 end_offset = offset | mask;
589 if (!start_offset) /* First page is swap header. */
590 start_offset++;
Huang Yinge9a6eff2017-11-15 17:33:15 -0800591 if (end_offset >= si->max)
592 end_offset = si->max - 1;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700593
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700594 blk_start_plug(&plug);
Rik van Riel67f96aa2012-03-21 16:33:50 -0700595 for (offset = start_offset; offset <= end_offset ; offset++) {
Hugh Dickins46017e92008-02-04 22:28:41 -0800596 /* Ok, do the async read-ahead now */
Huang Yingc4fa6302017-09-06 16:24:33 -0700597 page = __read_swap_cache_async(
598 swp_entry(swp_type(entry), offset),
599 gfp_mask, vma, addr, &page_allocated);
Hugh Dickins46017e92008-02-04 22:28:41 -0800600 if (!page)
Rik van Riel67f96aa2012-03-21 16:33:50 -0700601 continue;
Huang Yingc4fa6302017-09-06 16:24:33 -0700602 if (page_allocated) {
603 swap_readpage(page, false);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700604 if (offset != entry_offset) {
Huang Yingc4fa6302017-09-06 16:24:33 -0700605 SetPageReadahead(page);
606 count_vm_event(SWAP_RA);
607 }
Huang Yingcbc65df2017-09-06 16:24:29 -0700608 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300609 put_page(page);
Hugh Dickins46017e92008-02-04 22:28:41 -0800610 }
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700611 blk_finish_plug(&plug);
612
Hugh Dickins46017e92008-02-04 22:28:41 -0800613 lru_add_drain(); /* Push any new pages onto the LRU now */
Shaohua Li579f8292014-02-06 12:04:21 -0800614skip:
Shaohua Li23955622017-07-10 15:47:11 -0700615 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
Hugh Dickins46017e92008-02-04 22:28:41 -0800616}
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800617
618int init_swap_address_space(unsigned int type, unsigned long nr_pages)
619{
620 struct address_space *spaces, *space;
621 unsigned int i, nr;
622
623 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
Kees Cook778e1cd2018-06-12 14:04:48 -0700624 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800625 if (!spaces)
626 return -ENOMEM;
627 for (i = 0; i < nr; i++) {
628 space = spaces + i;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700629 INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800630 atomic_set(&space->i_mmap_writable, 0);
631 space->a_ops = &swap_aops;
632 /* swap cache doesn't use writeback related tags */
633 mapping_set_no_writeback_tags(space);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800634 }
635 nr_swapper_spaces[type] = nr;
636 rcu_assign_pointer(swapper_spaces[type], spaces);
637
638 return 0;
639}
640
641void exit_swap_address_space(unsigned int type)
642{
643 struct address_space *spaces;
644
645 spaces = swapper_spaces[type];
646 nr_swapper_spaces[type] = 0;
647 rcu_assign_pointer(swapper_spaces[type], NULL);
648 synchronize_rcu();
649 kvfree(spaces);
650}
Huang Yingec560172017-09-06 16:24:36 -0700651
652static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
653 unsigned long faddr,
654 unsigned long lpfn,
655 unsigned long rpfn,
656 unsigned long *start,
657 unsigned long *end)
658{
659 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
660 PFN_DOWN(faddr & PMD_MASK));
661 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
662 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
663}
664
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700665static void swap_ra_info(struct vm_fault *vmf,
666 struct vma_swap_readahead *ra_info)
Huang Yingec560172017-09-06 16:24:36 -0700667{
668 struct vm_area_struct *vma = vmf->vma;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700669 unsigned long ra_val;
Huang Yingec560172017-09-06 16:24:36 -0700670 swp_entry_t entry;
671 unsigned long faddr, pfn, fpfn;
672 unsigned long start, end;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700673 pte_t *pte, *orig_pte;
Huang Yingec560172017-09-06 16:24:36 -0700674 unsigned int max_win, hits, prev_win, win, left;
675#ifndef CONFIG_64BIT
676 pte_t *tpte;
677#endif
678
Huang Ying61b63972017-10-13 15:58:29 -0700679 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
680 SWAP_RA_ORDER_CEILING);
681 if (max_win == 1) {
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700682 ra_info->win = 1;
683 return;
Huang Ying61b63972017-10-13 15:58:29 -0700684 }
685
Huang Yingec560172017-09-06 16:24:36 -0700686 faddr = vmf->address;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700687 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
688 entry = pte_to_swp_entry(*pte);
689 if ((unlikely(non_swap_entry(entry)))) {
690 pte_unmap(orig_pte);
691 return;
692 }
Huang Yingec560172017-09-06 16:24:36 -0700693
Huang Yingec560172017-09-06 16:24:36 -0700694 fpfn = PFN_DOWN(faddr);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700695 ra_val = GET_SWAP_RA_VAL(vma);
696 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
697 prev_win = SWAP_RA_WIN(ra_val);
698 hits = SWAP_RA_HITS(ra_val);
699 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
Huang Yingec560172017-09-06 16:24:36 -0700700 max_win, prev_win);
701 atomic_long_set(&vma->swap_readahead_info,
702 SWAP_RA_VAL(faddr, win, 0));
703
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700704 if (win == 1) {
705 pte_unmap(orig_pte);
706 return;
707 }
Huang Yingec560172017-09-06 16:24:36 -0700708
709 /* Copy the PTEs because the page table may be unmapped */
710 if (fpfn == pfn + 1)
711 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
712 else if (pfn == fpfn + 1)
713 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
714 &start, &end);
715 else {
716 left = (win - 1) / 2;
717 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
718 &start, &end);
719 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700720 ra_info->nr_pte = end - start;
721 ra_info->offset = fpfn - start;
722 pte -= ra_info->offset;
Huang Yingec560172017-09-06 16:24:36 -0700723#ifdef CONFIG_64BIT
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700724 ra_info->ptes = pte;
Huang Yingec560172017-09-06 16:24:36 -0700725#else
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700726 tpte = ra_info->ptes;
Huang Yingec560172017-09-06 16:24:36 -0700727 for (pfn = start; pfn != end; pfn++)
728 *tpte++ = *pte++;
729#endif
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700730 pte_unmap(orig_pte);
Huang Yingec560172017-09-06 16:24:36 -0700731}
732
Colin Ian Kingf5c754d2018-04-05 16:25:05 -0700733static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
734 struct vm_fault *vmf)
Huang Yingec560172017-09-06 16:24:36 -0700735{
736 struct blk_plug plug;
737 struct vm_area_struct *vma = vmf->vma;
738 struct page *page;
739 pte_t *pte, pentry;
740 swp_entry_t entry;
741 unsigned int i;
742 bool page_allocated;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700743 struct vma_swap_readahead ra_info = {0,};
Huang Yingec560172017-09-06 16:24:36 -0700744
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700745 swap_ra_info(vmf, &ra_info);
746 if (ra_info.win == 1)
Huang Yingec560172017-09-06 16:24:36 -0700747 goto skip;
748
749 blk_start_plug(&plug);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700750 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
Huang Yingec560172017-09-06 16:24:36 -0700751 i++, pte++) {
752 pentry = *pte;
753 if (pte_none(pentry))
754 continue;
755 if (pte_present(pentry))
756 continue;
757 entry = pte_to_swp_entry(pentry);
758 if (unlikely(non_swap_entry(entry)))
759 continue;
760 page = __read_swap_cache_async(entry, gfp_mask, vma,
761 vmf->address, &page_allocated);
762 if (!page)
763 continue;
764 if (page_allocated) {
765 swap_readpage(page, false);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700766 if (i != ra_info.offset) {
Huang Yingec560172017-09-06 16:24:36 -0700767 SetPageReadahead(page);
768 count_vm_event(SWAP_RA);
769 }
770 }
771 put_page(page);
772 }
773 blk_finish_plug(&plug);
774 lru_add_drain();
775skip:
776 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700777 ra_info.win == 1);
Huang Yingec560172017-09-06 16:24:36 -0700778}
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700779
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700780/**
781 * swapin_readahead - swap in pages in hope we need them soon
782 * @entry: swap entry of this memory
783 * @gfp_mask: memory allocation flags
784 * @vmf: fault information
785 *
786 * Returns the struct page for entry and addr, after queueing swapin.
787 *
788 * It's a main entry function for swap readahead. By the configuration,
789 * it will read ahead blocks by cluster-based(ie, physical disk based)
790 * or vma-based(ie, virtual address based on faulty address) readahead.
791 */
792struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
793 struct vm_fault *vmf)
794{
795 return swap_use_vma_readahead() ?
796 swap_vma_readahead(entry, gfp_mask, vmf) :
797 swap_cluster_readahead(entry, gfp_mask, vmf);
798}
799
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700800#ifdef CONFIG_SYSFS
801static ssize_t vma_ra_enabled_show(struct kobject *kobj,
802 struct kobj_attribute *attr, char *buf)
803{
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700804 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700805}
806static ssize_t vma_ra_enabled_store(struct kobject *kobj,
807 struct kobj_attribute *attr,
808 const char *buf, size_t count)
809{
810 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700811 enable_vma_readahead = true;
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700812 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700813 enable_vma_readahead = false;
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700814 else
815 return -EINVAL;
816
817 return count;
818}
819static struct kobj_attribute vma_ra_enabled_attr =
820 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
821 vma_ra_enabled_store);
822
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700823static struct attribute *swap_attrs[] = {
824 &vma_ra_enabled_attr.attr,
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700825 NULL,
826};
827
828static struct attribute_group swap_attr_group = {
829 .attrs = swap_attrs,
830};
831
832static int __init swap_init_sysfs(void)
833{
834 int err;
835 struct kobject *swap_kobj;
836
837 swap_kobj = kobject_create_and_add("swap", mm_kobj);
838 if (!swap_kobj) {
839 pr_err("failed to create swap kobject\n");
840 return -ENOMEM;
841 }
842 err = sysfs_create_group(swap_kobj, &swap_attr_group);
843 if (err) {
844 pr_err("failed to register swap group\n");
845 goto delete_obj;
846 }
847 return 0;
848
849delete_obj:
850 kobject_put(swap_kobj);
851 return err;
852}
853subsys_initcall(swap_init_sysfs);
854#endif