Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/swap_state.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 5 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 6 | * |
| 7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel_stat.h> |
| 12 | #include <linux/swap.h> |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 13 | #include <linux/swapops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/backing-dev.h> |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Hugh Dickins | c484d41 | 2006-01-06 00:10:55 -0800 | [diff] [blame] | 18 | #include <linux/pagevec.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/migrate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
| 21 | #include <asm/pgtable.h> |
| 22 | |
| 23 | /* |
| 24 | * swapper_space is a fiction, retained to simplify the path through |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 25 | * vmscan's shrink_page_list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | */ |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 27 | static const struct address_space_operations swap_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | .writepage = swap_writepage, |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 29 | .set_page_dirty = swap_set_page_dirty, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 30 | #ifdef CONFIG_MIGRATION |
Christoph Lameter | e965f96 | 2006-02-01 03:05:41 -0800 | [diff] [blame] | 31 | .migratepage = migrate_page, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 32 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | }; |
| 34 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 35 | struct address_space swapper_spaces[MAX_SWAPFILES] = { |
| 36 | [0 ... MAX_SWAPFILES - 1] = { |
| 37 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), |
David Herrmann | 4bb5f5d | 2014-08-08 14:25:25 -0700 | [diff] [blame] | 38 | .i_mmap_writable = ATOMIC_INIT(0), |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 39 | .a_ops = &swap_aops, |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 40 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
| 43 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) |
| 44 | |
| 45 | static struct { |
| 46 | unsigned long add_total; |
| 47 | unsigned long del_total; |
| 48 | unsigned long find_success; |
| 49 | unsigned long find_total; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | } swap_cache_info; |
| 51 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 52 | unsigned long total_swapcache_pages(void) |
| 53 | { |
| 54 | int i; |
| 55 | unsigned long ret = 0; |
| 56 | |
| 57 | for (i = 0; i < MAX_SWAPFILES; i++) |
| 58 | ret += swapper_spaces[i].nrpages; |
| 59 | return ret; |
| 60 | } |
| 61 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 62 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
| 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | void show_swap_cache_info(void) |
| 65 | { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 66 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
Johannes Weiner | 2c97b7f | 2008-07-25 19:46:01 -0700 | [diff] [blame] | 67 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | swap_cache_info.add_total, swap_cache_info.del_total, |
Hugh Dickins | bb63be0 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 69 | swap_cache_info.find_success, swap_cache_info.find_total); |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 70 | printk("Free swap = %ldkB\n", |
| 71 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
| 73 | } |
| 74 | |
| 75 | /* |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 76 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | * but sets SwapCache flag and private instead of mapping and index. |
| 78 | */ |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 79 | int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { |
| 81 | int error; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 82 | struct address_space *address_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 84 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 85 | VM_BUG_ON_PAGE(PageSwapCache(page), page); |
| 86 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 87 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 88 | get_page(page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 89 | SetPageSwapCache(page); |
| 90 | set_page_private(page, entry.val); |
| 91 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 92 | address_space = swap_address_space(entry); |
| 93 | spin_lock_irq(&address_space->tree_lock); |
| 94 | error = radix_tree_insert(&address_space->page_tree, |
| 95 | entry.val, page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 96 | if (likely(!error)) { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 97 | address_space->nrpages++; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 98 | __inc_zone_page_state(page, NR_FILE_PAGES); |
| 99 | INC_CACHE_INFO(add_total); |
| 100 | } |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 101 | spin_unlock_irq(&address_space->tree_lock); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 102 | |
| 103 | if (unlikely(error)) { |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 104 | /* |
| 105 | * Only the context which have set SWAP_HAS_CACHE flag |
| 106 | * would call add_to_swap_cache(). |
| 107 | * So add_to_swap_cache() doesn't returns -EEXIST. |
| 108 | */ |
| 109 | VM_BUG_ON(error == -EEXIST); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 110 | set_page_private(page, 0UL); |
| 111 | ClearPageSwapCache(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 112 | put_page(page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | return error; |
| 116 | } |
| 117 | |
| 118 | |
| 119 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) |
| 120 | { |
| 121 | int error; |
| 122 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 123 | error = radix_tree_maybe_preload(gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | if (!error) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 125 | error = __add_to_swap_cache(page, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | radix_tree_preload_end(); |
Hugh Dickins | fa1de90 | 2008-02-07 00:14:13 -0800 | [diff] [blame] | 127 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | return error; |
| 129 | } |
| 130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | /* |
| 132 | * This must be called only on pages that have |
| 133 | * been verified to be in the swap cache. |
| 134 | */ |
| 135 | void __delete_from_swap_cache(struct page *page) |
| 136 | { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 137 | swp_entry_t entry; |
| 138 | struct address_space *address_space; |
| 139 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 140 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 141 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
| 142 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 144 | entry.val = page_private(page); |
| 145 | address_space = swap_address_space(entry); |
| 146 | radix_tree_delete(&address_space->page_tree, page_private(page)); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 147 | set_page_private(page, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | ClearPageSwapCache(page); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 149 | address_space->nrpages--; |
Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 150 | __dec_zone_page_state(page, NR_FILE_PAGES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | INC_CACHE_INFO(del_total); |
| 152 | } |
| 153 | |
| 154 | /** |
| 155 | * add_to_swap - allocate swap space for a page |
| 156 | * @page: page we want to move to swap |
| 157 | * |
| 158 | * Allocate swap space for the page and add the page to the |
| 159 | * swap cache. Caller needs to hold the page lock. |
| 160 | */ |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 161 | int add_to_swap(struct page *page, struct list_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | { |
| 163 | swp_entry_t entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | int err; |
| 165 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 166 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 167 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 169 | entry = get_swap_page(); |
| 170 | if (!entry.val) |
| 171 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
Vladimir Davydov | 37e8435 | 2016-01-20 15:02:56 -0800 | [diff] [blame] | 173 | if (mem_cgroup_try_charge_swap(page, entry)) { |
| 174 | swapcache_free(entry); |
| 175 | return 0; |
| 176 | } |
| 177 | |
Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 178 | if (unlikely(PageTransHuge(page))) |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 179 | if (unlikely(split_huge_page_to_list(page, list))) { |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 180 | swapcache_free(entry); |
Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 181 | return 0; |
| 182 | } |
| 183 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 184 | /* |
| 185 | * Radix-tree node allocations from PF_MEMALLOC contexts could |
| 186 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
| 187 | * stops emergency reserves from being allocated. |
| 188 | * |
| 189 | * TODO: this could cause a theoretical memory reclaim |
| 190 | * deadlock in the swap out path. |
| 191 | */ |
| 192 | /* |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 193 | * Add it to the swap cache. |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 194 | */ |
| 195 | err = add_to_swap_cache(page, entry, |
| 196 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 198 | if (!err) { |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 199 | return 1; |
| 200 | } else { /* -ENOMEM radix-tree allocation failure */ |
| 201 | /* |
| 202 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 203 | * clear SWAP_HAS_CACHE flag. |
| 204 | */ |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 205 | swapcache_free(entry); |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 206 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * This must be called only on pages that have |
| 212 | * been verified to be in the swap cache and locked. |
| 213 | * It will never put the page into the free list, |
| 214 | * the caller has a reference on the page. |
| 215 | */ |
| 216 | void delete_from_swap_cache(struct page *page) |
| 217 | { |
| 218 | swp_entry_t entry; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 219 | struct address_space *address_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 221 | entry.val = page_private(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 223 | address_space = swap_address_space(entry); |
| 224 | spin_lock_irq(&address_space->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | __delete_from_swap_cache(page); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 226 | spin_unlock_irq(&address_space->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 228 | swapcache_free(entry); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 229 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } |
| 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /* |
| 233 | * If we are the only user, then try to free up the swap cache. |
| 234 | * |
| 235 | * Its ok to check for PageSwapCache without the page lock |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 236 | * here because we are going to recheck again inside |
| 237 | * try_to_free_swap() _with_ the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | * - Marcelo |
| 239 | */ |
| 240 | static inline void free_swap_cache(struct page *page) |
| 241 | { |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 242 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
| 243 | try_to_free_swap(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | unlock_page(page); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Perform a free_page(), also freeing any swap cache associated with |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 250 | * this page if it is the last user of the page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | */ |
| 252 | void free_page_and_swap_cache(struct page *page) |
| 253 | { |
| 254 | free_swap_cache(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 255 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | /* |
| 259 | * Passed an array of pages, drop them all from swapcache and then release |
| 260 | * them. They are removed from the LRU and freed if this is their last use. |
| 261 | */ |
| 262 | void free_pages_and_swap_cache(struct page **pages, int nr) |
| 263 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | struct page **pagep = pages; |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 265 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
| 267 | lru_add_drain(); |
Michal Hocko | aabfb57 | 2014-10-09 15:28:52 -0700 | [diff] [blame] | 268 | for (i = 0; i < nr; i++) |
| 269 | free_swap_cache(pagep[i]); |
| 270 | release_pages(pagep, nr, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } |
| 272 | |
| 273 | /* |
| 274 | * Lookup a swap entry in the swap cache. A found page will be returned |
| 275 | * unlocked and with its refcount incremented - we rely on the kernel |
| 276 | * lock getting page table operations atomic even if we drop the page |
| 277 | * lock before returning. |
| 278 | */ |
| 279 | struct page * lookup_swap_cache(swp_entry_t entry) |
| 280 | { |
| 281 | struct page *page; |
| 282 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 283 | page = find_get_page(swap_address_space(entry), entry.val); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 285 | if (page) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | INC_CACHE_INFO(find_success); |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 287 | if (TestClearPageReadahead(page)) |
| 288 | atomic_inc(&swapin_readahead_hits); |
| 289 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | |
| 291 | INC_CACHE_INFO(find_total); |
| 292 | return page; |
| 293 | } |
| 294 | |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 295 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 296 | struct vm_area_struct *vma, unsigned long addr, |
| 297 | bool *new_page_allocated) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | { |
| 299 | struct page *found_page, *new_page = NULL; |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 300 | struct address_space *swapper_space = swap_address_space(entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | int err; |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 302 | *new_page_allocated = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
| 304 | do { |
| 305 | /* |
| 306 | * First check the swap cache. Since this is normally |
| 307 | * called after lookup_swap_cache() failed, re-calling |
| 308 | * that would confuse statistics. |
| 309 | */ |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 310 | found_page = find_get_page(swapper_space, entry.val); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | if (found_page) |
| 312 | break; |
| 313 | |
| 314 | /* |
| 315 | * Get a new page to read into from swap. |
| 316 | */ |
| 317 | if (!new_page) { |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 318 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | if (!new_page) |
| 320 | break; /* Out of memory */ |
| 321 | } |
| 322 | |
| 323 | /* |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 324 | * call radix_tree_preload() while we can wait. |
| 325 | */ |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 326 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 327 | if (err) |
| 328 | break; |
| 329 | |
| 330 | /* |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 331 | * Swap entry may have been freed since our caller observed it. |
| 332 | */ |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 333 | err = swapcache_prepare(entry); |
Rafael Aquini | cbab0e4 | 2013-06-12 14:04:49 -0700 | [diff] [blame] | 334 | if (err == -EEXIST) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 335 | radix_tree_preload_end(); |
Rafael Aquini | cbab0e4 | 2013-06-12 14:04:49 -0700 | [diff] [blame] | 336 | /* |
| 337 | * We might race against get_swap_page() and stumble |
| 338 | * across a SWAP_HAS_CACHE swap_map entry whose page |
| 339 | * has not been brought into the swapcache yet, while |
| 340 | * the other end is scheduled away waiting on discard |
| 341 | * I/O completion at scan_swap_map(). |
| 342 | * |
| 343 | * In order to avoid turning this transitory state |
| 344 | * into a permanent loop around this -EEXIST case |
| 345 | * if !CONFIG_PREEMPT and the I/O completion happens |
| 346 | * to be waiting on the CPU waitqueue where we are now |
| 347 | * busy looping, we just conditionally invoke the |
| 348 | * scheduler here, if there are some more important |
| 349 | * tasks to run. |
| 350 | */ |
| 351 | cond_resched(); |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 352 | continue; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 353 | } |
| 354 | if (err) { /* swp entry is obsolete ? */ |
| 355 | radix_tree_preload_end(); |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 356 | break; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 357 | } |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 358 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 359 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 360 | __SetPageLocked(new_page); |
Hugh Dickins | fa9949d | 2016-05-19 17:12:41 -0700 | [diff] [blame] | 361 | __SetPageSwapBacked(new_page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 362 | err = __add_to_swap_cache(new_page, entry); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 363 | if (likely(!err)) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 364 | radix_tree_preload_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | /* |
| 366 | * Initiate read into locked page and return. |
| 367 | */ |
Rik van Riel | c5fdae4 | 2008-10-18 20:26:36 -0700 | [diff] [blame] | 368 | lru_cache_add_anon(new_page); |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 369 | *new_page_allocated = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | return new_page; |
| 371 | } |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 372 | radix_tree_preload_end(); |
Kirill A. Shutemov | 48c935a | 2016-01-15 16:51:24 -0800 | [diff] [blame] | 373 | __ClearPageLocked(new_page); |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 374 | /* |
| 375 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 376 | * clear SWAP_HAS_CACHE flag. |
| 377 | */ |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 378 | swapcache_free(entry); |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 379 | } while (err != -ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | |
| 381 | if (new_page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 382 | put_page(new_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | return found_page; |
| 384 | } |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 385 | |
Dmitry Safonov | 5b999aa | 2015-09-08 15:05:00 -0700 | [diff] [blame] | 386 | /* |
| 387 | * Locate a page of swap in physical memory, reserving swap cache space |
| 388 | * and reading the disk if it is not already cached. |
| 389 | * A failure return means that either the page allocation failed or that |
| 390 | * the swap entry is no longer in use. |
| 391 | */ |
| 392 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 393 | struct vm_area_struct *vma, unsigned long addr) |
| 394 | { |
| 395 | bool page_was_allocated; |
| 396 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, |
| 397 | vma, addr, &page_was_allocated); |
| 398 | |
| 399 | if (page_was_allocated) |
| 400 | swap_readpage(retpage); |
| 401 | |
| 402 | return retpage; |
| 403 | } |
| 404 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 405 | static unsigned long swapin_nr_pages(unsigned long offset) |
| 406 | { |
| 407 | static unsigned long prev_offset; |
| 408 | unsigned int pages, max_pages, last_ra; |
| 409 | static atomic_t last_readahead_pages; |
| 410 | |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 411 | max_pages = 1 << READ_ONCE(page_cluster); |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 412 | if (max_pages <= 1) |
| 413 | return 1; |
| 414 | |
| 415 | /* |
| 416 | * This heuristic has been found to work well on both sequential and |
| 417 | * random loads, swapping to hard disk or to SSD: please don't ask |
| 418 | * what the "+ 2" means, it just happens to work well, that's all. |
| 419 | */ |
| 420 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; |
| 421 | if (pages == 2) { |
| 422 | /* |
| 423 | * We can have no readahead hits to judge by: but must not get |
| 424 | * stuck here forever, so check for an adjacent offset instead |
| 425 | * (and don't even bother to check whether swap type is same). |
| 426 | */ |
| 427 | if (offset != prev_offset + 1 && offset != prev_offset - 1) |
| 428 | pages = 1; |
| 429 | prev_offset = offset; |
| 430 | } else { |
| 431 | unsigned int roundup = 4; |
| 432 | while (roundup < pages) |
| 433 | roundup <<= 1; |
| 434 | pages = roundup; |
| 435 | } |
| 436 | |
| 437 | if (pages > max_pages) |
| 438 | pages = max_pages; |
| 439 | |
| 440 | /* Don't shrink readahead too fast */ |
| 441 | last_ra = atomic_read(&last_readahead_pages) / 2; |
| 442 | if (pages < last_ra) |
| 443 | pages = last_ra; |
| 444 | atomic_set(&last_readahead_pages, pages); |
| 445 | |
| 446 | return pages; |
| 447 | } |
| 448 | |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 449 | /** |
| 450 | * swapin_readahead - swap in pages in hope we need them soon |
| 451 | * @entry: swap entry of this memory |
Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 452 | * @gfp_mask: memory allocation flags |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 453 | * @vma: user vma this address belongs to |
| 454 | * @addr: target address for mempolicy |
| 455 | * |
| 456 | * Returns the struct page for entry and addr, after queueing swapin. |
| 457 | * |
| 458 | * Primitive swap readahead code. We simply read an aligned block of |
| 459 | * (1 << page_cluster) entries in the swap area. This method is chosen |
| 460 | * because it doesn't cost us any seek time. We also make sure to queue |
| 461 | * the 'original' request together with the readahead ones... |
| 462 | * |
| 463 | * This has been extended to use the NUMA policies from the mm triggering |
| 464 | * the readahead. |
| 465 | * |
| 466 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. |
| 467 | */ |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 468 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 469 | struct vm_area_struct *vma, unsigned long addr) |
| 470 | { |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 471 | struct page *page; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 472 | unsigned long entry_offset = swp_offset(entry); |
| 473 | unsigned long offset = entry_offset; |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 474 | unsigned long start_offset, end_offset; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 475 | unsigned long mask; |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 476 | struct blk_plug plug; |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 477 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 478 | mask = swapin_nr_pages(offset) - 1; |
| 479 | if (!mask) |
| 480 | goto skip; |
| 481 | |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 482 | /* Read a page_cluster sized and aligned cluster around offset. */ |
| 483 | start_offset = offset & ~mask; |
| 484 | end_offset = offset | mask; |
| 485 | if (!start_offset) /* First page is swap header. */ |
| 486 | start_offset++; |
| 487 | |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 488 | blk_start_plug(&plug); |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 489 | for (offset = start_offset; offset <= end_offset ; offset++) { |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 490 | /* Ok, do the async read-ahead now */ |
| 491 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 492 | gfp_mask, vma, addr); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 493 | if (!page) |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 494 | continue; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 495 | if (offset != entry_offset) |
| 496 | SetPageReadahead(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 497 | put_page(page); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 498 | } |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 499 | blk_finish_plug(&plug); |
| 500 | |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 501 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 502 | skip: |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 503 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 504 | } |