Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/swap_state.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 5 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 6 | * |
| 7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel_stat.h> |
| 12 | #include <linux/swap.h> |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 13 | #include <linux/swapops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/backing-dev.h> |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Hugh Dickins | c484d41 | 2006-01-06 00:10:55 -0800 | [diff] [blame] | 18 | #include <linux/pagevec.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/migrate.h> |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 20 | #include <linux/vmalloc.h> |
Tim Chen | 67afa38 | 2017-02-22 15:45:39 -0800 | [diff] [blame] | 21 | #include <linux/swap_slots.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | #include <asm/pgtable.h> |
| 24 | |
| 25 | /* |
| 26 | * swapper_space is a fiction, retained to simplify the path through |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 27 | * vmscan's shrink_page_list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | */ |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 29 | static const struct address_space_operations swap_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | .writepage = swap_writepage, |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 31 | .set_page_dirty = swap_set_page_dirty, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 32 | #ifdef CONFIG_MIGRATION |
Christoph Lameter | e965f96 | 2006-02-01 03:05:41 -0800 | [diff] [blame] | 33 | .migratepage = migrate_page, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 34 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | }; |
| 36 | |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 37 | struct address_space *swapper_spaces[MAX_SWAPFILES]; |
| 38 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) |
| 41 | |
| 42 | static struct { |
| 43 | unsigned long add_total; |
| 44 | unsigned long del_total; |
| 45 | unsigned long find_success; |
| 46 | unsigned long find_total; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } swap_cache_info; |
| 48 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 49 | unsigned long total_swapcache_pages(void) |
| 50 | { |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 51 | unsigned int i, j, nr; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 52 | unsigned long ret = 0; |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 53 | struct address_space *spaces; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 54 | |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 55 | rcu_read_lock(); |
| 56 | for (i = 0; i < MAX_SWAPFILES; i++) { |
| 57 | /* |
| 58 | * The corresponding entries in nr_swapper_spaces and |
| 59 | * swapper_spaces will be reused only after at least |
| 60 | * one grace period. So it is impossible for them |
| 61 | * belongs to different usage. |
| 62 | */ |
| 63 | nr = nr_swapper_spaces[i]; |
| 64 | spaces = rcu_dereference(swapper_spaces[i]); |
| 65 | if (!nr || !spaces) |
| 66 | continue; |
| 67 | for (j = 0; j < nr; j++) |
| 68 | ret += spaces[j].nrpages; |
| 69 | } |
| 70 | rcu_read_unlock(); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 71 | return ret; |
| 72 | } |
| 73 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 74 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
| 75 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | void show_swap_cache_info(void) |
| 77 | { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 78 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
Johannes Weiner | 2c97b7f | 2008-07-25 19:46:01 -0700 | [diff] [blame] | 79 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | swap_cache_info.add_total, swap_cache_info.del_total, |
Hugh Dickins | bb63be0 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 81 | swap_cache_info.find_success, swap_cache_info.find_total); |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 82 | printk("Free swap = %ldkB\n", |
| 83 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
| 85 | } |
| 86 | |
| 87 | /* |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 88 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | * but sets SwapCache flag and private instead of mapping and index. |
| 90 | */ |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 91 | int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | { |
| 93 | int error; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 94 | struct address_space *address_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 96 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 97 | VM_BUG_ON_PAGE(PageSwapCache(page), page); |
| 98 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 99 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 100 | get_page(page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 101 | SetPageSwapCache(page); |
| 102 | set_page_private(page, entry.val); |
| 103 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 104 | address_space = swap_address_space(entry); |
| 105 | spin_lock_irq(&address_space->tree_lock); |
| 106 | error = radix_tree_insert(&address_space->page_tree, |
Huang Ying | f6ab1f7 | 2016-10-07 17:00:21 -0700 | [diff] [blame] | 107 | swp_offset(entry), page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 108 | if (likely(!error)) { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 109 | address_space->nrpages++; |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 110 | __inc_node_page_state(page, NR_FILE_PAGES); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 111 | INC_CACHE_INFO(add_total); |
| 112 | } |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 113 | spin_unlock_irq(&address_space->tree_lock); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 114 | |
| 115 | if (unlikely(error)) { |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 116 | /* |
| 117 | * Only the context which have set SWAP_HAS_CACHE flag |
| 118 | * would call add_to_swap_cache(). |
| 119 | * So add_to_swap_cache() doesn't returns -EEXIST. |
| 120 | */ |
| 121 | VM_BUG_ON(error == -EEXIST); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 122 | set_page_private(page, 0UL); |
| 123 | ClearPageSwapCache(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 124 | put_page(page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | return error; |
| 128 | } |
| 129 | |
| 130 | |
| 131 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) |
| 132 | { |
| 133 | int error; |
| 134 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 135 | error = radix_tree_maybe_preload(gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | if (!error) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 137 | error = __add_to_swap_cache(page, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | radix_tree_preload_end(); |
Hugh Dickins | fa1de90 | 2008-02-07 00:14:13 -0800 | [diff] [blame] | 139 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | return error; |
| 141 | } |
| 142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | /* |
| 144 | * This must be called only on pages that have |
| 145 | * been verified to be in the swap cache. |
| 146 | */ |
| 147 | void __delete_from_swap_cache(struct page *page) |
| 148 | { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 149 | swp_entry_t entry; |
| 150 | struct address_space *address_space; |
| 151 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 152 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 153 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
| 154 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 156 | entry.val = page_private(page); |
| 157 | address_space = swap_address_space(entry); |
Huang Ying | f6ab1f7 | 2016-10-07 17:00:21 -0700 | [diff] [blame] | 158 | radix_tree_delete(&address_space->page_tree, swp_offset(entry)); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 159 | set_page_private(page, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | ClearPageSwapCache(page); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 161 | address_space->nrpages--; |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 162 | __dec_node_page_state(page, NR_FILE_PAGES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | INC_CACHE_INFO(del_total); |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | * add_to_swap - allocate swap space for a page |
| 168 | * @page: page we want to move to swap |
| 169 | * |
| 170 | * Allocate swap space for the page and add the page to the |
| 171 | * swap cache. Caller needs to hold the page lock. |
| 172 | */ |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 173 | int add_to_swap(struct page *page, struct list_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | { |
| 175 | swp_entry_t entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | int err; |
| 177 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 178 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 179 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 181 | entry = get_swap_page(); |
| 182 | if (!entry.val) |
| 183 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | |
Vladimir Davydov | 37e8435 | 2016-01-20 15:02:56 -0800 | [diff] [blame] | 185 | if (mem_cgroup_try_charge_swap(page, entry)) { |
| 186 | swapcache_free(entry); |
| 187 | return 0; |
| 188 | } |
| 189 | |
Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 190 | if (unlikely(PageTransHuge(page))) |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 191 | if (unlikely(split_huge_page_to_list(page, list))) { |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 192 | swapcache_free(entry); |
Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 193 | return 0; |
| 194 | } |
| 195 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 196 | /* |
| 197 | * Radix-tree node allocations from PF_MEMALLOC contexts could |
| 198 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
| 199 | * stops emergency reserves from being allocated. |
| 200 | * |
| 201 | * TODO: this could cause a theoretical memory reclaim |
| 202 | * deadlock in the swap out path. |
| 203 | */ |
| 204 | /* |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 205 | * Add it to the swap cache. |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 206 | */ |
| 207 | err = add_to_swap_cache(page, entry, |
| 208 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 210 | if (!err) { |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 211 | return 1; |
| 212 | } else { /* -ENOMEM radix-tree allocation failure */ |
| 213 | /* |
| 214 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 215 | * clear SWAP_HAS_CACHE flag. |
| 216 | */ |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 217 | swapcache_free(entry); |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 218 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * This must be called only on pages that have |
| 224 | * been verified to be in the swap cache and locked. |
| 225 | * It will never put the page into the free list, |
| 226 | * the caller has a reference on the page. |
| 227 | */ |
| 228 | void delete_from_swap_cache(struct page *page) |
| 229 | { |
| 230 | swp_entry_t entry; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 231 | struct address_space *address_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 233 | entry.val = page_private(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 235 | address_space = swap_address_space(entry); |
| 236 | spin_lock_irq(&address_space->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | __delete_from_swap_cache(page); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 238 | spin_unlock_irq(&address_space->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 240 | swapcache_free(entry); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 241 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | /* |
| 245 | * If we are the only user, then try to free up the swap cache. |
| 246 | * |
| 247 | * Its ok to check for PageSwapCache without the page lock |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 248 | * here because we are going to recheck again inside |
| 249 | * try_to_free_swap() _with_ the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | * - Marcelo |
| 251 | */ |
| 252 | static inline void free_swap_cache(struct page *page) |
| 253 | { |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 254 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
| 255 | try_to_free_swap(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | unlock_page(page); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Perform a free_page(), also freeing any swap cache associated with |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 262 | * this page if it is the last user of the page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | */ |
| 264 | void free_page_and_swap_cache(struct page *page) |
| 265 | { |
| 266 | free_swap_cache(page); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 267 | if (!is_huge_zero_page(page)) |
Gerald Schaefer | 770a537 | 2016-06-08 15:33:50 -0700 | [diff] [blame] | 268 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | /* |
| 272 | * Passed an array of pages, drop them all from swapcache and then release |
| 273 | * them. They are removed from the LRU and freed if this is their last use. |
| 274 | */ |
| 275 | void free_pages_and_swap_cache(struct page **pages, int nr) |
| 276 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | struct page **pagep = pages; |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 278 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | |
| 280 | lru_add_drain(); |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 281 | for (i = 0; i < nr; i++) |
| 282 | free_swap_cache(pagep[i]); |
| 283 | release_pages(pagep, nr, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | /* |
| 287 | * Lookup a swap entry in the swap cache. A found page will be returned |
| 288 | * unlocked and with its refcount incremented - we rely on the kernel |
| 289 | * lock getting page table operations atomic even if we drop the page |
| 290 | * lock before returning. |
| 291 | */ |
| 292 | struct page * lookup_swap_cache(swp_entry_t entry) |
| 293 | { |
| 294 | struct page *page; |
| 295 | |
Huang Ying | f6ab1f7 | 2016-10-07 17:00:21 -0700 | [diff] [blame] | 296 | page = find_get_page(swap_address_space(entry), swp_offset(entry)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 298 | if (page) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | INC_CACHE_INFO(find_success); |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 300 | if (TestClearPageReadahead(page)) |
| 301 | atomic_inc(&swapin_readahead_hits); |
| 302 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
| 304 | INC_CACHE_INFO(find_total); |
| 305 | return page; |
| 306 | } |
| 307 | |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 308 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 309 | struct vm_area_struct *vma, unsigned long addr, |
| 310 | bool *new_page_allocated) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | { |
| 312 | struct page *found_page, *new_page = NULL; |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 313 | struct address_space *swapper_space = swap_address_space(entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | int err; |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 315 | *new_page_allocated = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | |
| 317 | do { |
| 318 | /* |
| 319 | * First check the swap cache. Since this is normally |
| 320 | * called after lookup_swap_cache() failed, re-calling |
| 321 | * that would confuse statistics. |
| 322 | */ |
Huang Ying | f6ab1f7 | 2016-10-07 17:00:21 -0700 | [diff] [blame] | 323 | found_page = find_get_page(swapper_space, swp_offset(entry)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | if (found_page) |
| 325 | break; |
| 326 | |
Huang Ying | ba81f83 | 2017-02-22 15:45:46 -0800 | [diff] [blame] | 327 | /* |
| 328 | * Just skip read ahead for unused swap slot. |
| 329 | * During swap_off when swap_slot_cache is disabled, |
| 330 | * we have to handle the race between putting |
| 331 | * swap entry in swap cache and marking swap slot |
| 332 | * as SWAP_HAS_CACHE. That's done in later part of code or |
| 333 | * else swap_off will be aborted if we return NULL. |
| 334 | */ |
| 335 | if (!__swp_swapcount(entry) && swap_slot_cache_enabled) |
| 336 | break; |
Tim Chen | e8c26ab | 2017-02-22 15:45:29 -0800 | [diff] [blame] | 337 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | /* |
| 339 | * Get a new page to read into from swap. |
| 340 | */ |
| 341 | if (!new_page) { |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 342 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | if (!new_page) |
| 344 | break; /* Out of memory */ |
| 345 | } |
| 346 | |
| 347 | /* |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 348 | * call radix_tree_preload() while we can wait. |
| 349 | */ |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 350 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 351 | if (err) |
| 352 | break; |
| 353 | |
| 354 | /* |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 355 | * Swap entry may have been freed since our caller observed it. |
| 356 | */ |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 357 | err = swapcache_prepare(entry); |
Rafael Aquini | cbab0e4 | 2013-06-12 14:04:49 -0700 | [diff] [blame] | 358 | if (err == -EEXIST) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 359 | radix_tree_preload_end(); |
Rafael Aquini | cbab0e4 | 2013-06-12 14:04:49 -0700 | [diff] [blame] | 360 | /* |
| 361 | * We might race against get_swap_page() and stumble |
| 362 | * across a SWAP_HAS_CACHE swap_map entry whose page |
| 363 | * has not been brought into the swapcache yet, while |
| 364 | * the other end is scheduled away waiting on discard |
| 365 | * I/O completion at scan_swap_map(). |
| 366 | * |
| 367 | * In order to avoid turning this transitory state |
| 368 | * into a permanent loop around this -EEXIST case |
| 369 | * if !CONFIG_PREEMPT and the I/O completion happens |
| 370 | * to be waiting on the CPU waitqueue where we are now |
| 371 | * busy looping, we just conditionally invoke the |
| 372 | * scheduler here, if there are some more important |
| 373 | * tasks to run. |
| 374 | */ |
| 375 | cond_resched(); |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 376 | continue; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 377 | } |
| 378 | if (err) { /* swp entry is obsolete ? */ |
| 379 | radix_tree_preload_end(); |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 380 | break; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 381 | } |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 382 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 383 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 384 | __SetPageLocked(new_page); |
Hugh Dickins | fa9949d | 2016-05-19 17:12:41 -0700 | [diff] [blame] | 385 | __SetPageSwapBacked(new_page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 386 | err = __add_to_swap_cache(new_page, entry); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 387 | if (likely(!err)) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 388 | radix_tree_preload_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | /* |
| 390 | * Initiate read into locked page and return. |
| 391 | */ |
Rik van Riel | c5fdae4 | 2008-10-18 20:26:36 -0700 | [diff] [blame] | 392 | lru_cache_add_anon(new_page); |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 393 | *new_page_allocated = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | return new_page; |
| 395 | } |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 396 | radix_tree_preload_end(); |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 397 | __ClearPageLocked(new_page); |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 398 | /* |
| 399 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 400 | * clear SWAP_HAS_CACHE flag. |
| 401 | */ |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 402 | swapcache_free(entry); |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 403 | } while (err != -ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | |
| 405 | if (new_page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 406 | put_page(new_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | return found_page; |
| 408 | } |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 409 | |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 410 | /* |
| 411 | * Locate a page of swap in physical memory, reserving swap cache space |
| 412 | * and reading the disk if it is not already cached. |
| 413 | * A failure return means that either the page allocation failed or that |
| 414 | * the swap entry is no longer in use. |
| 415 | */ |
| 416 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 417 | struct vm_area_struct *vma, unsigned long addr) |
| 418 | { |
| 419 | bool page_was_allocated; |
| 420 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, |
| 421 | vma, addr, &page_was_allocated); |
| 422 | |
| 423 | if (page_was_allocated) |
| 424 | swap_readpage(retpage); |
| 425 | |
| 426 | return retpage; |
| 427 | } |
| 428 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 429 | static unsigned long swapin_nr_pages(unsigned long offset) |
| 430 | { |
| 431 | static unsigned long prev_offset; |
| 432 | unsigned int pages, max_pages, last_ra; |
| 433 | static atomic_t last_readahead_pages; |
| 434 | |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 435 | max_pages = 1 << READ_ONCE(page_cluster); |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 436 | if (max_pages <= 1) |
| 437 | return 1; |
| 438 | |
| 439 | /* |
| 440 | * This heuristic has been found to work well on both sequential and |
| 441 | * random loads, swapping to hard disk or to SSD: please don't ask |
| 442 | * what the "+ 2" means, it just happens to work well, that's all. |
| 443 | */ |
| 444 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; |
| 445 | if (pages == 2) { |
| 446 | /* |
| 447 | * We can have no readahead hits to judge by: but must not get |
| 448 | * stuck here forever, so check for an adjacent offset instead |
| 449 | * (and don't even bother to check whether swap type is same). |
| 450 | */ |
| 451 | if (offset != prev_offset + 1 && offset != prev_offset - 1) |
| 452 | pages = 1; |
| 453 | prev_offset = offset; |
| 454 | } else { |
| 455 | unsigned int roundup = 4; |
| 456 | while (roundup < pages) |
| 457 | roundup <<= 1; |
| 458 | pages = roundup; |
| 459 | } |
| 460 | |
| 461 | if (pages > max_pages) |
| 462 | pages = max_pages; |
| 463 | |
| 464 | /* Don't shrink readahead too fast */ |
| 465 | last_ra = atomic_read(&last_readahead_pages) / 2; |
| 466 | if (pages < last_ra) |
| 467 | pages = last_ra; |
| 468 | atomic_set(&last_readahead_pages, pages); |
| 469 | |
| 470 | return pages; |
| 471 | } |
| 472 | |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 473 | /** |
| 474 | * swapin_readahead - swap in pages in hope we need them soon |
| 475 | * @entry: swap entry of this memory |
Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 476 | * @gfp_mask: memory allocation flags |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 477 | * @vma: user vma this address belongs to |
| 478 | * @addr: target address for mempolicy |
| 479 | * |
| 480 | * Returns the struct page for entry and addr, after queueing swapin. |
| 481 | * |
| 482 | * Primitive swap readahead code. We simply read an aligned block of |
| 483 | * (1 << page_cluster) entries in the swap area. This method is chosen |
| 484 | * because it doesn't cost us any seek time. We also make sure to queue |
| 485 | * the 'original' request together with the readahead ones... |
| 486 | * |
| 487 | * This has been extended to use the NUMA policies from the mm triggering |
| 488 | * the readahead. |
| 489 | * |
| 490 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. |
| 491 | */ |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 492 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 493 | struct vm_area_struct *vma, unsigned long addr) |
| 494 | { |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 495 | struct page *page; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 496 | unsigned long entry_offset = swp_offset(entry); |
| 497 | unsigned long offset = entry_offset; |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 498 | unsigned long start_offset, end_offset; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 499 | unsigned long mask; |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 500 | struct blk_plug plug; |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 501 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 502 | mask = swapin_nr_pages(offset) - 1; |
| 503 | if (!mask) |
| 504 | goto skip; |
| 505 | |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 506 | /* Read a page_cluster sized and aligned cluster around offset. */ |
| 507 | start_offset = offset & ~mask; |
| 508 | end_offset = offset | mask; |
| 509 | if (!start_offset) /* First page is swap header. */ |
| 510 | start_offset++; |
| 511 | |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 512 | blk_start_plug(&plug); |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 513 | for (offset = start_offset; offset <= end_offset ; offset++) { |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 514 | /* Ok, do the async read-ahead now */ |
| 515 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 516 | gfp_mask, vma, addr); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 517 | if (!page) |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 518 | continue; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 519 | if (offset != entry_offset) |
| 520 | SetPageReadahead(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 521 | put_page(page); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 522 | } |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 523 | blk_finish_plug(&plug); |
| 524 | |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 525 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 526 | skip: |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 527 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 528 | } |
Huang, Ying | 4b3ef9d | 2017-02-22 15:45:26 -0800 | [diff] [blame] | 529 | |
| 530 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) |
| 531 | { |
| 532 | struct address_space *spaces, *space; |
| 533 | unsigned int i, nr; |
| 534 | |
| 535 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
| 536 | spaces = vzalloc(sizeof(struct address_space) * nr); |
| 537 | if (!spaces) |
| 538 | return -ENOMEM; |
| 539 | for (i = 0; i < nr; i++) { |
| 540 | space = spaces + i; |
| 541 | INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN); |
| 542 | atomic_set(&space->i_mmap_writable, 0); |
| 543 | space->a_ops = &swap_aops; |
| 544 | /* swap cache doesn't use writeback related tags */ |
| 545 | mapping_set_no_writeback_tags(space); |
| 546 | spin_lock_init(&space->tree_lock); |
| 547 | } |
| 548 | nr_swapper_spaces[type] = nr; |
| 549 | rcu_assign_pointer(swapper_spaces[type], spaces); |
| 550 | |
| 551 | return 0; |
| 552 | } |
| 553 | |
| 554 | void exit_swap_address_space(unsigned int type) |
| 555 | { |
| 556 | struct address_space *spaces; |
| 557 | |
| 558 | spaces = swapper_spaces[type]; |
| 559 | nr_swapper_spaces[type] = 0; |
| 560 | rcu_assign_pointer(swapper_spaces[type], NULL); |
| 561 | synchronize_rcu(); |
| 562 | kvfree(spaces); |
| 563 | } |