Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/swap_state.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 5 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 6 | * |
| 7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel_stat.h> |
| 12 | #include <linux/swap.h> |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 13 | #include <linux/swapops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/backing-dev.h> |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 17 | #include <linux/blkdev.h> |
Hugh Dickins | c484d41 | 2006-01-06 00:10:55 -0800 | [diff] [blame] | 18 | #include <linux/pagevec.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 19 | #include <linux/migrate.h> |
KAMEZAWA Hiroyuki | 8c7c6e34 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 20 | #include <linux/page_cgroup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | #include <asm/pgtable.h> |
| 23 | |
| 24 | /* |
| 25 | * swapper_space is a fiction, retained to simplify the path through |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 26 | * vmscan's shrink_page_list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | */ |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 28 | static const struct address_space_operations swap_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | .writepage = swap_writepage, |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 30 | .set_page_dirty = swap_set_page_dirty, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame^] | 31 | #ifdef CONFIG_MIGRATION |
Christoph Lameter | e965f96 | 2006-02-01 03:05:41 -0800 | [diff] [blame] | 32 | .migratepage = migrate_page, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame^] | 33 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
| 36 | static struct backing_dev_info swap_backing_dev_info = { |
Jens Axboe | d993831 | 2009-06-12 14:45:52 +0200 | [diff] [blame] | 37 | .name = "swap", |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 38 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | }; |
| 40 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 41 | struct address_space swapper_spaces[MAX_SWAPFILES] = { |
| 42 | [0 ... MAX_SWAPFILES - 1] = { |
| 43 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), |
David Herrmann | 4bb5f5d | 2014-08-08 14:25:25 -0700 | [diff] [blame] | 44 | .i_mmap_writable = ATOMIC_INIT(0), |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 45 | .a_ops = &swap_aops, |
| 46 | .backing_dev_info = &swap_backing_dev_info, |
| 47 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) |
| 51 | |
| 52 | static struct { |
| 53 | unsigned long add_total; |
| 54 | unsigned long del_total; |
| 55 | unsigned long find_success; |
| 56 | unsigned long find_total; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | } swap_cache_info; |
| 58 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 59 | unsigned long total_swapcache_pages(void) |
| 60 | { |
| 61 | int i; |
| 62 | unsigned long ret = 0; |
| 63 | |
| 64 | for (i = 0; i < MAX_SWAPFILES; i++) |
| 65 | ret += swapper_spaces[i].nrpages; |
| 66 | return ret; |
| 67 | } |
| 68 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 69 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
| 70 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | void show_swap_cache_info(void) |
| 72 | { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 73 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
Johannes Weiner | 2c97b7f | 2008-07-25 19:46:01 -0700 | [diff] [blame] | 74 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | swap_cache_info.add_total, swap_cache_info.del_total, |
Hugh Dickins | bb63be0 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 76 | swap_cache_info.find_success, swap_cache_info.find_total); |
Shaohua Li | ec8acf2 | 2013-02-22 16:34:38 -0800 | [diff] [blame] | 77 | printk("Free swap = %ldkB\n", |
| 78 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
| 80 | } |
| 81 | |
| 82 | /* |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 83 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | * but sets SwapCache flag and private instead of mapping and index. |
| 85 | */ |
Seth Jennings | 2f772e6 | 2013-04-29 15:08:34 -0700 | [diff] [blame] | 86 | int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | { |
| 88 | int error; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 89 | struct address_space *address_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 91 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 92 | VM_BUG_ON_PAGE(PageSwapCache(page), page); |
| 93 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 94 | |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 95 | page_cache_get(page); |
| 96 | SetPageSwapCache(page); |
| 97 | set_page_private(page, entry.val); |
| 98 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 99 | address_space = swap_address_space(entry); |
| 100 | spin_lock_irq(&address_space->tree_lock); |
| 101 | error = radix_tree_insert(&address_space->page_tree, |
| 102 | entry.val, page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 103 | if (likely(!error)) { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 104 | address_space->nrpages++; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 105 | __inc_zone_page_state(page, NR_FILE_PAGES); |
| 106 | INC_CACHE_INFO(add_total); |
| 107 | } |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 108 | spin_unlock_irq(&address_space->tree_lock); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 109 | |
| 110 | if (unlikely(error)) { |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 111 | /* |
| 112 | * Only the context which have set SWAP_HAS_CACHE flag |
| 113 | * would call add_to_swap_cache(). |
| 114 | * So add_to_swap_cache() doesn't returns -EEXIST. |
| 115 | */ |
| 116 | VM_BUG_ON(error == -EEXIST); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 117 | set_page_private(page, 0UL); |
| 118 | ClearPageSwapCache(page); |
| 119 | page_cache_release(page); |
| 120 | } |
| 121 | |
| 122 | return error; |
| 123 | } |
| 124 | |
| 125 | |
| 126 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) |
| 127 | { |
| 128 | int error; |
| 129 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 130 | error = radix_tree_maybe_preload(gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | if (!error) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 132 | error = __add_to_swap_cache(page, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | radix_tree_preload_end(); |
Hugh Dickins | fa1de90 | 2008-02-07 00:14:13 -0800 | [diff] [blame] | 134 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | return error; |
| 136 | } |
| 137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | /* |
| 139 | * This must be called only on pages that have |
| 140 | * been verified to be in the swap cache. |
| 141 | */ |
| 142 | void __delete_from_swap_cache(struct page *page) |
| 143 | { |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 144 | swp_entry_t entry; |
| 145 | struct address_space *address_space; |
| 146 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 147 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 148 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
| 149 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 151 | entry.val = page_private(page); |
| 152 | address_space = swap_address_space(entry); |
| 153 | radix_tree_delete(&address_space->page_tree, page_private(page)); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 154 | set_page_private(page, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | ClearPageSwapCache(page); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 156 | address_space->nrpages--; |
Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 157 | __dec_zone_page_state(page, NR_FILE_PAGES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | INC_CACHE_INFO(del_total); |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * add_to_swap - allocate swap space for a page |
| 163 | * @page: page we want to move to swap |
| 164 | * |
| 165 | * Allocate swap space for the page and add the page to the |
| 166 | * swap cache. Caller needs to hold the page lock. |
| 167 | */ |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 168 | int add_to_swap(struct page *page, struct list_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | { |
| 170 | swp_entry_t entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | int err; |
| 172 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 173 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 174 | VM_BUG_ON_PAGE(!PageUptodate(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 176 | entry = get_swap_page(); |
| 177 | if (!entry.val) |
| 178 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | |
Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 180 | if (unlikely(PageTransHuge(page))) |
Shaohua Li | 5bc7b8a | 2013-04-29 15:08:36 -0700 | [diff] [blame] | 181 | if (unlikely(split_huge_page_to_list(page, list))) { |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 182 | swapcache_free(entry); |
Andrea Arcangeli | 3f04f62 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 183 | return 0; |
| 184 | } |
| 185 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 186 | /* |
| 187 | * Radix-tree node allocations from PF_MEMALLOC contexts could |
| 188 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
| 189 | * stops emergency reserves from being allocated. |
| 190 | * |
| 191 | * TODO: this could cause a theoretical memory reclaim |
| 192 | * deadlock in the swap out path. |
| 193 | */ |
| 194 | /* |
| 195 | * Add it to the swap cache and mark it dirty |
| 196 | */ |
| 197 | err = add_to_swap_cache(page, entry, |
| 198 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 200 | if (!err) { /* Success */ |
| 201 | SetPageDirty(page); |
| 202 | return 1; |
| 203 | } else { /* -ENOMEM radix-tree allocation failure */ |
| 204 | /* |
| 205 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 206 | * clear SWAP_HAS_CACHE flag. |
| 207 | */ |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 208 | swapcache_free(entry); |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 209 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | } |
| 211 | } |
| 212 | |
| 213 | /* |
| 214 | * This must be called only on pages that have |
| 215 | * been verified to be in the swap cache and locked. |
| 216 | * It will never put the page into the free list, |
| 217 | * the caller has a reference on the page. |
| 218 | */ |
| 219 | void delete_from_swap_cache(struct page *page) |
| 220 | { |
| 221 | swp_entry_t entry; |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 222 | struct address_space *address_space; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 224 | entry.val = page_private(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 226 | address_space = swap_address_space(entry); |
| 227 | spin_lock_irq(&address_space->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | __delete_from_swap_cache(page); |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 229 | spin_unlock_irq(&address_space->tree_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 231 | swapcache_free(entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | page_cache_release(page); |
| 233 | } |
| 234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | /* |
| 236 | * If we are the only user, then try to free up the swap cache. |
| 237 | * |
| 238 | * Its ok to check for PageSwapCache without the page lock |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 239 | * here because we are going to recheck again inside |
| 240 | * try_to_free_swap() _with_ the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | * - Marcelo |
| 242 | */ |
| 243 | static inline void free_swap_cache(struct page *page) |
| 244 | { |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 245 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
| 246 | try_to_free_swap(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | unlock_page(page); |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * Perform a free_page(), also freeing any swap cache associated with |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 253 | * this page if it is the last user of the page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | */ |
| 255 | void free_page_and_swap_cache(struct page *page) |
| 256 | { |
| 257 | free_swap_cache(page); |
| 258 | page_cache_release(page); |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * Passed an array of pages, drop them all from swapcache and then release |
| 263 | * them. They are removed from the LRU and freed if this is their last use. |
| 264 | */ |
| 265 | void free_pages_and_swap_cache(struct page **pages, int nr) |
| 266 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | struct page **pagep = pages; |
| 268 | |
| 269 | lru_add_drain(); |
| 270 | while (nr) { |
Hugh Dickins | c484d41 | 2006-01-06 00:10:55 -0800 | [diff] [blame] | 271 | int todo = min(nr, PAGEVEC_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | int i; |
| 273 | |
| 274 | for (i = 0; i < todo; i++) |
| 275 | free_swap_cache(pagep[i]); |
Mel Gorman | b745bc8 | 2014-06-04 16:10:22 -0700 | [diff] [blame] | 276 | release_pages(pagep, todo, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | pagep += todo; |
| 278 | nr -= todo; |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | /* |
| 283 | * Lookup a swap entry in the swap cache. A found page will be returned |
| 284 | * unlocked and with its refcount incremented - we rely on the kernel |
| 285 | * lock getting page table operations atomic even if we drop the page |
| 286 | * lock before returning. |
| 287 | */ |
| 288 | struct page * lookup_swap_cache(swp_entry_t entry) |
| 289 | { |
| 290 | struct page *page; |
| 291 | |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 292 | page = find_get_page(swap_address_space(entry), entry.val); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 294 | if (page) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | INC_CACHE_INFO(find_success); |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 296 | if (TestClearPageReadahead(page)) |
| 297 | atomic_inc(&swapin_readahead_hits); |
| 298 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
| 300 | INC_CACHE_INFO(find_total); |
| 301 | return page; |
| 302 | } |
| 303 | |
| 304 | /* |
| 305 | * Locate a page of swap in physical memory, reserving swap cache space |
| 306 | * and reading the disk if it is not already cached. |
| 307 | * A failure return means that either the page allocation failed or that |
| 308 | * the swap entry is no longer in use. |
| 309 | */ |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 310 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | struct vm_area_struct *vma, unsigned long addr) |
| 312 | { |
| 313 | struct page *found_page, *new_page = NULL; |
| 314 | int err; |
| 315 | |
| 316 | do { |
| 317 | /* |
| 318 | * First check the swap cache. Since this is normally |
| 319 | * called after lookup_swap_cache() failed, re-calling |
| 320 | * that would confuse statistics. |
| 321 | */ |
Shaohua Li | 33806f0 | 2013-02-22 16:34:37 -0800 | [diff] [blame] | 322 | found_page = find_get_page(swap_address_space(entry), |
| 323 | entry.val); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | if (found_page) |
| 325 | break; |
| 326 | |
| 327 | /* |
| 328 | * Get a new page to read into from swap. |
| 329 | */ |
| 330 | if (!new_page) { |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 331 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | if (!new_page) |
| 333 | break; /* Out of memory */ |
| 334 | } |
| 335 | |
| 336 | /* |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 337 | * call radix_tree_preload() while we can wait. |
| 338 | */ |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 339 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 340 | if (err) |
| 341 | break; |
| 342 | |
| 343 | /* |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 344 | * Swap entry may have been freed since our caller observed it. |
| 345 | */ |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 346 | err = swapcache_prepare(entry); |
Rafael Aquini | cbab0e4 | 2013-06-12 14:04:49 -0700 | [diff] [blame] | 347 | if (err == -EEXIST) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 348 | radix_tree_preload_end(); |
Rafael Aquini | cbab0e4 | 2013-06-12 14:04:49 -0700 | [diff] [blame] | 349 | /* |
| 350 | * We might race against get_swap_page() and stumble |
| 351 | * across a SWAP_HAS_CACHE swap_map entry whose page |
| 352 | * has not been brought into the swapcache yet, while |
| 353 | * the other end is scheduled away waiting on discard |
| 354 | * I/O completion at scan_swap_map(). |
| 355 | * |
| 356 | * In order to avoid turning this transitory state |
| 357 | * into a permanent loop around this -EEXIST case |
| 358 | * if !CONFIG_PREEMPT and the I/O completion happens |
| 359 | * to be waiting on the CPU waitqueue where we are now |
| 360 | * busy looping, we just conditionally invoke the |
| 361 | * scheduler here, if there are some more important |
| 362 | * tasks to run. |
| 363 | */ |
| 364 | cond_resched(); |
KAMEZAWA Hiroyuki | 355cfa7 | 2009-06-16 15:32:53 -0700 | [diff] [blame] | 365 | continue; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 366 | } |
| 367 | if (err) { /* swp entry is obsolete ? */ |
| 368 | radix_tree_preload_end(); |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 369 | break; |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 370 | } |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 371 | |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 372 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
Nick Piggin | f45840b | 2008-10-18 20:26:57 -0700 | [diff] [blame] | 373 | __set_page_locked(new_page); |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 374 | SetPageSwapBacked(new_page); |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 375 | err = __add_to_swap_cache(new_page, entry); |
Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 376 | if (likely(!err)) { |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 377 | radix_tree_preload_end(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | /* |
| 379 | * Initiate read into locked page and return. |
| 380 | */ |
Rik van Riel | c5fdae4 | 2008-10-18 20:26:36 -0700 | [diff] [blame] | 381 | lru_cache_add_anon(new_page); |
Minchan Kim | aca8bf3 | 2009-06-16 15:33:02 -0700 | [diff] [blame] | 382 | swap_readpage(new_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | return new_page; |
| 384 | } |
Daisuke Nishimura | 31a5639 | 2009-09-21 17:02:50 -0700 | [diff] [blame] | 385 | radix_tree_preload_end(); |
Rik van Riel | b2e1853 | 2008-10-18 20:26:30 -0700 | [diff] [blame] | 386 | ClearPageSwapBacked(new_page); |
Nick Piggin | f45840b | 2008-10-18 20:26:57 -0700 | [diff] [blame] | 387 | __clear_page_locked(new_page); |
Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 388 | /* |
| 389 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 390 | * clear SWAP_HAS_CACHE flag. |
| 391 | */ |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 392 | swapcache_free(entry); |
Hugh Dickins | f000944 | 2008-02-04 22:28:49 -0800 | [diff] [blame] | 393 | } while (err != -ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
| 395 | if (new_page) |
| 396 | page_cache_release(new_page); |
| 397 | return found_page; |
| 398 | } |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 399 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 400 | static unsigned long swapin_nr_pages(unsigned long offset) |
| 401 | { |
| 402 | static unsigned long prev_offset; |
| 403 | unsigned int pages, max_pages, last_ra; |
| 404 | static atomic_t last_readahead_pages; |
| 405 | |
| 406 | max_pages = 1 << ACCESS_ONCE(page_cluster); |
| 407 | if (max_pages <= 1) |
| 408 | return 1; |
| 409 | |
| 410 | /* |
| 411 | * This heuristic has been found to work well on both sequential and |
| 412 | * random loads, swapping to hard disk or to SSD: please don't ask |
| 413 | * what the "+ 2" means, it just happens to work well, that's all. |
| 414 | */ |
| 415 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; |
| 416 | if (pages == 2) { |
| 417 | /* |
| 418 | * We can have no readahead hits to judge by: but must not get |
| 419 | * stuck here forever, so check for an adjacent offset instead |
| 420 | * (and don't even bother to check whether swap type is same). |
| 421 | */ |
| 422 | if (offset != prev_offset + 1 && offset != prev_offset - 1) |
| 423 | pages = 1; |
| 424 | prev_offset = offset; |
| 425 | } else { |
| 426 | unsigned int roundup = 4; |
| 427 | while (roundup < pages) |
| 428 | roundup <<= 1; |
| 429 | pages = roundup; |
| 430 | } |
| 431 | |
| 432 | if (pages > max_pages) |
| 433 | pages = max_pages; |
| 434 | |
| 435 | /* Don't shrink readahead too fast */ |
| 436 | last_ra = atomic_read(&last_readahead_pages) / 2; |
| 437 | if (pages < last_ra) |
| 438 | pages = last_ra; |
| 439 | atomic_set(&last_readahead_pages, pages); |
| 440 | |
| 441 | return pages; |
| 442 | } |
| 443 | |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 444 | /** |
| 445 | * swapin_readahead - swap in pages in hope we need them soon |
| 446 | * @entry: swap entry of this memory |
Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 447 | * @gfp_mask: memory allocation flags |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 448 | * @vma: user vma this address belongs to |
| 449 | * @addr: target address for mempolicy |
| 450 | * |
| 451 | * Returns the struct page for entry and addr, after queueing swapin. |
| 452 | * |
| 453 | * Primitive swap readahead code. We simply read an aligned block of |
| 454 | * (1 << page_cluster) entries in the swap area. This method is chosen |
| 455 | * because it doesn't cost us any seek time. We also make sure to queue |
| 456 | * the 'original' request together with the readahead ones... |
| 457 | * |
| 458 | * This has been extended to use the NUMA policies from the mm triggering |
| 459 | * the readahead. |
| 460 | * |
| 461 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. |
| 462 | */ |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 463 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 464 | struct vm_area_struct *vma, unsigned long addr) |
| 465 | { |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 466 | struct page *page; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 467 | unsigned long entry_offset = swp_offset(entry); |
| 468 | unsigned long offset = entry_offset; |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 469 | unsigned long start_offset, end_offset; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 470 | unsigned long mask; |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 471 | struct blk_plug plug; |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 472 | |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 473 | mask = swapin_nr_pages(offset) - 1; |
| 474 | if (!mask) |
| 475 | goto skip; |
| 476 | |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 477 | /* Read a page_cluster sized and aligned cluster around offset. */ |
| 478 | start_offset = offset & ~mask; |
| 479 | end_offset = offset | mask; |
| 480 | if (!start_offset) /* First page is swap header. */ |
| 481 | start_offset++; |
| 482 | |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 483 | blk_start_plug(&plug); |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 484 | for (offset = start_offset; offset <= end_offset ; offset++) { |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 485 | /* Ok, do the async read-ahead now */ |
| 486 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 487 | gfp_mask, vma, addr); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 488 | if (!page) |
Rik van Riel | 67f96aa | 2012-03-21 16:33:50 -0700 | [diff] [blame] | 489 | continue; |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 490 | if (offset != entry_offset) |
| 491 | SetPageReadahead(page); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 492 | page_cache_release(page); |
| 493 | } |
Christian Ehrhardt | 3fb5c29 | 2012-07-31 16:41:44 -0700 | [diff] [blame] | 494 | blk_finish_plug(&plug); |
| 495 | |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 496 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
Shaohua Li | 579f829 | 2014-02-06 12:04:21 -0800 | [diff] [blame] | 497 | skip: |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 498 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
Hugh Dickins | 46017e9 | 2008-02-04 22:28:41 -0800 | [diff] [blame] | 499 | } |