blob: ef1f39139b71cb568666989fc98817bd52629b78 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel_stat.h>
12#include <linux/swap.h>
Hugh Dickins46017e92008-02-04 22:28:41 -080013#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/init.h>
15#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/backing-dev.h>
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -070017#include <linux/blkdev.h>
Hugh Dickinsc484d412006-01-06 00:10:55 -080018#include <linux/pagevec.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/migrate.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080020#include <linux/page_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/pgtable.h>
23
24/*
25 * swapper_space is a fiction, retained to simplify the path through
Jens Axboe7eaceac2011-03-10 08:52:07 +010026 * vmscan's shrink_page_list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070028static const struct address_space_operations swap_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 .writepage = swap_writepage,
Mel Gorman62c230b2012-07-31 16:44:55 -070030 .set_page_dirty = swap_set_page_dirty,
Andrew Morton1c939232014-10-09 15:27:59 -070031#ifdef CONFIG_MIGRATION
Christoph Lametere965f962006-02-01 03:05:41 -080032 .migratepage = migrate_page,
Andrew Morton1c939232014-10-09 15:27:59 -070033#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36static struct backing_dev_info swap_backing_dev_info = {
Jens Axboed9938312009-06-12 14:45:52 +020037 .name = "swap",
Rik van Riel4f98a2f2008-10-18 20:26:32 -070038 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
Linus Torvalds1da177e2005-04-16 15:20:36 -070039};
40
Shaohua Li33806f02013-02-22 16:34:37 -080041struct address_space swapper_spaces[MAX_SWAPFILES] = {
42 [0 ... MAX_SWAPFILES - 1] = {
43 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
David Herrmann4bb5f5d2014-08-08 14:25:25 -070044 .i_mmap_writable = ATOMIC_INIT(0),
Shaohua Li33806f02013-02-22 16:34:37 -080045 .a_ops = &swap_aops,
46 .backing_dev_info = &swap_backing_dev_info,
47 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070048};
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
51
52static struct {
53 unsigned long add_total;
54 unsigned long del_total;
55 unsigned long find_success;
56 unsigned long find_total;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057} swap_cache_info;
58
Shaohua Li33806f02013-02-22 16:34:37 -080059unsigned long total_swapcache_pages(void)
60{
61 int i;
62 unsigned long ret = 0;
63
64 for (i = 0; i < MAX_SWAPFILES; i++)
65 ret += swapper_spaces[i].nrpages;
66 return ret;
67}
68
Shaohua Li579f8292014-02-06 12:04:21 -080069static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071void show_swap_cache_info(void)
72{
Shaohua Li33806f02013-02-22 16:34:37 -080073 printk("%lu pages in swap cache\n", total_swapcache_pages());
Johannes Weiner2c97b7f2008-07-25 19:46:01 -070074 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 swap_cache_info.add_total, swap_cache_info.del_total,
Hugh Dickinsbb63be02008-02-04 22:28:49 -080076 swap_cache_info.find_success, swap_cache_info.find_total);
Shaohua Liec8acf22013-02-22 16:34:38 -080077 printk("Free swap = %ldkB\n",
78 get_nr_swap_pages() << (PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
80}
81
82/*
Daisuke Nishimura31a56392009-09-21 17:02:50 -070083 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 * but sets SwapCache flag and private instead of mapping and index.
85 */
Seth Jennings2f772e62013-04-29 15:08:34 -070086int __add_to_swap_cache(struct page *page, swp_entry_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 int error;
Shaohua Li33806f02013-02-22 16:34:37 -080089 struct address_space *address_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Sasha Levin309381fea2014-01-23 15:52:54 -080091 VM_BUG_ON_PAGE(!PageLocked(page), page);
92 VM_BUG_ON_PAGE(PageSwapCache(page), page);
93 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
Hugh Dickins51726b12009-01-06 14:39:25 -080094
Daisuke Nishimura31a56392009-09-21 17:02:50 -070095 page_cache_get(page);
96 SetPageSwapCache(page);
97 set_page_private(page, entry.val);
98
Shaohua Li33806f02013-02-22 16:34:37 -080099 address_space = swap_address_space(entry);
100 spin_lock_irq(&address_space->tree_lock);
101 error = radix_tree_insert(&address_space->page_tree,
102 entry.val, page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700103 if (likely(!error)) {
Shaohua Li33806f02013-02-22 16:34:37 -0800104 address_space->nrpages++;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700105 __inc_zone_page_state(page, NR_FILE_PAGES);
106 INC_CACHE_INFO(add_total);
107 }
Shaohua Li33806f02013-02-22 16:34:37 -0800108 spin_unlock_irq(&address_space->tree_lock);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700109
110 if (unlikely(error)) {
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700111 /*
112 * Only the context which have set SWAP_HAS_CACHE flag
113 * would call add_to_swap_cache().
114 * So add_to_swap_cache() doesn't returns -EEXIST.
115 */
116 VM_BUG_ON(error == -EEXIST);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700117 set_page_private(page, 0UL);
118 ClearPageSwapCache(page);
119 page_cache_release(page);
120 }
121
122 return error;
123}
124
125
126int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
127{
128 int error;
129
Jan Kara5e4c0d972013-09-11 14:26:05 -0700130 error = radix_tree_maybe_preload(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 if (!error) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700132 error = __add_to_swap_cache(page, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 radix_tree_preload_end();
Hugh Dickinsfa1de902008-02-07 00:14:13 -0800134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return error;
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
139 * This must be called only on pages that have
140 * been verified to be in the swap cache.
141 */
142void __delete_from_swap_cache(struct page *page)
143{
Shaohua Li33806f02013-02-22 16:34:37 -0800144 swp_entry_t entry;
145 struct address_space *address_space;
146
Sasha Levin309381fea2014-01-23 15:52:54 -0800147 VM_BUG_ON_PAGE(!PageLocked(page), page);
148 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
149 VM_BUG_ON_PAGE(PageWriteback(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Shaohua Li33806f02013-02-22 16:34:37 -0800151 entry.val = page_private(page);
152 address_space = swap_address_space(entry);
153 radix_tree_delete(&address_space->page_tree, page_private(page));
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700154 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 ClearPageSwapCache(page);
Shaohua Li33806f02013-02-22 16:34:37 -0800156 address_space->nrpages--;
Christoph Lameter347ce432006-06-30 01:55:35 -0700157 __dec_zone_page_state(page, NR_FILE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 INC_CACHE_INFO(del_total);
159}
160
161/**
162 * add_to_swap - allocate swap space for a page
163 * @page: page we want to move to swap
164 *
165 * Allocate swap space for the page and add the page to the
166 * swap cache. Caller needs to hold the page lock.
167 */
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700168int add_to_swap(struct page *page, struct list_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 int err;
172
Sasha Levin309381fea2014-01-23 15:52:54 -0800173 VM_BUG_ON_PAGE(!PageLocked(page), page);
174 VM_BUG_ON_PAGE(!PageUptodate(page), page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700176 entry = get_swap_page();
177 if (!entry.val)
178 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Andrea Arcangeli3f04f622011-01-13 15:46:47 -0800180 if (unlikely(PageTransHuge(page)))
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700181 if (unlikely(split_huge_page_to_list(page, list))) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700182 swapcache_free(entry);
Andrea Arcangeli3f04f622011-01-13 15:46:47 -0800183 return 0;
184 }
185
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700186 /*
187 * Radix-tree node allocations from PF_MEMALLOC contexts could
188 * completely exhaust the page allocator. __GFP_NOMEMALLOC
189 * stops emergency reserves from being allocated.
190 *
191 * TODO: this could cause a theoretical memory reclaim
192 * deadlock in the swap out path.
193 */
194 /*
195 * Add it to the swap cache and mark it dirty
196 */
197 err = add_to_swap_cache(page, entry,
198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700200 if (!err) { /* Success */
201 SetPageDirty(page);
202 return 1;
203 } else { /* -ENOMEM radix-tree allocation failure */
204 /*
205 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206 * clear SWAP_HAS_CACHE flag.
207 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700208 swapcache_free(entry);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700209 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211}
212
213/*
214 * This must be called only on pages that have
215 * been verified to be in the swap cache and locked.
216 * It will never put the page into the free list,
217 * the caller has a reference on the page.
218 */
219void delete_from_swap_cache(struct page *page)
220{
221 swp_entry_t entry;
Shaohua Li33806f02013-02-22 16:34:37 -0800222 struct address_space *address_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700224 entry.val = page_private(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Shaohua Li33806f02013-02-22 16:34:37 -0800226 address_space = swap_address_space(entry);
227 spin_lock_irq(&address_space->tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 __delete_from_swap_cache(page);
Shaohua Li33806f02013-02-22 16:34:37 -0800229 spin_unlock_irq(&address_space->tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700231 swapcache_free(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 page_cache_release(page);
233}
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235/*
236 * If we are the only user, then try to free up the swap cache.
237 *
238 * Its ok to check for PageSwapCache without the page lock
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800239 * here because we are going to recheck again inside
240 * try_to_free_swap() _with_ the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 * - Marcelo
242 */
243static inline void free_swap_cache(struct page *page)
244{
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800245 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
246 try_to_free_swap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 unlock_page(page);
248 }
249}
250
251/*
252 * Perform a free_page(), also freeing any swap cache associated with
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700253 * this page if it is the last user of the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 */
255void free_page_and_swap_cache(struct page *page)
256{
257 free_swap_cache(page);
258 page_cache_release(page);
259}
260
261/*
262 * Passed an array of pages, drop them all from swapcache and then release
263 * them. They are removed from the LRU and freed if this is their last use.
264 */
265void free_pages_and_swap_cache(struct page **pages, int nr)
266{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 struct page **pagep = pages;
268
269 lru_add_drain();
270 while (nr) {
Hugh Dickinsc484d412006-01-06 00:10:55 -0800271 int todo = min(nr, PAGEVEC_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 int i;
273
274 for (i = 0; i < todo; i++)
275 free_swap_cache(pagep[i]);
Mel Gormanb745bc82014-06-04 16:10:22 -0700276 release_pages(pagep, todo, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 pagep += todo;
278 nr -= todo;
279 }
280}
281
282/*
283 * Lookup a swap entry in the swap cache. A found page will be returned
284 * unlocked and with its refcount incremented - we rely on the kernel
285 * lock getting page table operations atomic even if we drop the page
286 * lock before returning.
287 */
288struct page * lookup_swap_cache(swp_entry_t entry)
289{
290 struct page *page;
291
Shaohua Li33806f02013-02-22 16:34:37 -0800292 page = find_get_page(swap_address_space(entry), entry.val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Shaohua Li579f8292014-02-06 12:04:21 -0800294 if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 INC_CACHE_INFO(find_success);
Shaohua Li579f8292014-02-06 12:04:21 -0800296 if (TestClearPageReadahead(page))
297 atomic_inc(&swapin_readahead_hits);
298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300 INC_CACHE_INFO(find_total);
301 return page;
302}
303
304/*
305 * Locate a page of swap in physical memory, reserving swap cache space
306 * and reading the disk if it is not already cached.
307 * A failure return means that either the page allocation failed or that
308 * the swap entry is no longer in use.
309 */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800310struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 struct vm_area_struct *vma, unsigned long addr)
312{
313 struct page *found_page, *new_page = NULL;
314 int err;
315
316 do {
317 /*
318 * First check the swap cache. Since this is normally
319 * called after lookup_swap_cache() failed, re-calling
320 * that would confuse statistics.
321 */
Shaohua Li33806f02013-02-22 16:34:37 -0800322 found_page = find_get_page(swap_address_space(entry),
323 entry.val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 if (found_page)
325 break;
326
327 /*
328 * Get a new page to read into from swap.
329 */
330 if (!new_page) {
Hugh Dickins02098fe2008-02-04 22:28:42 -0800331 new_page = alloc_page_vma(gfp_mask, vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 if (!new_page)
333 break; /* Out of memory */
334 }
335
336 /*
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700337 * call radix_tree_preload() while we can wait.
338 */
Jan Kara5e4c0d972013-09-11 14:26:05 -0700339 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700340 if (err)
341 break;
342
343 /*
Hugh Dickinsf0009442008-02-04 22:28:49 -0800344 * Swap entry may have been freed since our caller observed it.
345 */
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700346 err = swapcache_prepare(entry);
Rafael Aquinicbab0e42013-06-12 14:04:49 -0700347 if (err == -EEXIST) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700348 radix_tree_preload_end();
Rafael Aquinicbab0e42013-06-12 14:04:49 -0700349 /*
350 * We might race against get_swap_page() and stumble
351 * across a SWAP_HAS_CACHE swap_map entry whose page
352 * has not been brought into the swapcache yet, while
353 * the other end is scheduled away waiting on discard
354 * I/O completion at scan_swap_map().
355 *
356 * In order to avoid turning this transitory state
357 * into a permanent loop around this -EEXIST case
358 * if !CONFIG_PREEMPT and the I/O completion happens
359 * to be waiting on the CPU waitqueue where we are now
360 * busy looping, we just conditionally invoke the
361 * scheduler here, if there are some more important
362 * tasks to run.
363 */
364 cond_resched();
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700365 continue;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700366 }
367 if (err) { /* swp entry is obsolete ? */
368 radix_tree_preload_end();
Hugh Dickinsf0009442008-02-04 22:28:49 -0800369 break;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700370 }
Hugh Dickinsf0009442008-02-04 22:28:49 -0800371
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700372 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
Nick Pigginf45840b2008-10-18 20:26:57 -0700373 __set_page_locked(new_page);
Rik van Rielb2e18532008-10-18 20:26:30 -0700374 SetPageSwapBacked(new_page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700375 err = __add_to_swap_cache(new_page, entry);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200376 if (likely(!err)) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700377 radix_tree_preload_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /*
379 * Initiate read into locked page and return.
380 */
Rik van Rielc5fdae42008-10-18 20:26:36 -0700381 lru_cache_add_anon(new_page);
Minchan Kimaca8bf32009-06-16 15:33:02 -0700382 swap_readpage(new_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 return new_page;
384 }
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700385 radix_tree_preload_end();
Rik van Rielb2e18532008-10-18 20:26:30 -0700386 ClearPageSwapBacked(new_page);
Nick Pigginf45840b2008-10-18 20:26:57 -0700387 __clear_page_locked(new_page);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700388 /*
389 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
390 * clear SWAP_HAS_CACHE flag.
391 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700392 swapcache_free(entry);
Hugh Dickinsf0009442008-02-04 22:28:49 -0800393 } while (err != -ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 if (new_page)
396 page_cache_release(new_page);
397 return found_page;
398}
Hugh Dickins46017e92008-02-04 22:28:41 -0800399
Shaohua Li579f8292014-02-06 12:04:21 -0800400static unsigned long swapin_nr_pages(unsigned long offset)
401{
402 static unsigned long prev_offset;
403 unsigned int pages, max_pages, last_ra;
404 static atomic_t last_readahead_pages;
405
406 max_pages = 1 << ACCESS_ONCE(page_cluster);
407 if (max_pages <= 1)
408 return 1;
409
410 /*
411 * This heuristic has been found to work well on both sequential and
412 * random loads, swapping to hard disk or to SSD: please don't ask
413 * what the "+ 2" means, it just happens to work well, that's all.
414 */
415 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
416 if (pages == 2) {
417 /*
418 * We can have no readahead hits to judge by: but must not get
419 * stuck here forever, so check for an adjacent offset instead
420 * (and don't even bother to check whether swap type is same).
421 */
422 if (offset != prev_offset + 1 && offset != prev_offset - 1)
423 pages = 1;
424 prev_offset = offset;
425 } else {
426 unsigned int roundup = 4;
427 while (roundup < pages)
428 roundup <<= 1;
429 pages = roundup;
430 }
431
432 if (pages > max_pages)
433 pages = max_pages;
434
435 /* Don't shrink readahead too fast */
436 last_ra = atomic_read(&last_readahead_pages) / 2;
437 if (pages < last_ra)
438 pages = last_ra;
439 atomic_set(&last_readahead_pages, pages);
440
441 return pages;
442}
443
Hugh Dickins46017e92008-02-04 22:28:41 -0800444/**
445 * swapin_readahead - swap in pages in hope we need them soon
446 * @entry: swap entry of this memory
Randy Dunlap76824862008-03-19 17:00:40 -0700447 * @gfp_mask: memory allocation flags
Hugh Dickins46017e92008-02-04 22:28:41 -0800448 * @vma: user vma this address belongs to
449 * @addr: target address for mempolicy
450 *
451 * Returns the struct page for entry and addr, after queueing swapin.
452 *
453 * Primitive swap readahead code. We simply read an aligned block of
454 * (1 << page_cluster) entries in the swap area. This method is chosen
455 * because it doesn't cost us any seek time. We also make sure to queue
456 * the 'original' request together with the readahead ones...
457 *
458 * This has been extended to use the NUMA policies from the mm triggering
459 * the readahead.
460 *
461 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
462 */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800463struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
Hugh Dickins46017e92008-02-04 22:28:41 -0800464 struct vm_area_struct *vma, unsigned long addr)
465{
Hugh Dickins46017e92008-02-04 22:28:41 -0800466 struct page *page;
Shaohua Li579f8292014-02-06 12:04:21 -0800467 unsigned long entry_offset = swp_offset(entry);
468 unsigned long offset = entry_offset;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700469 unsigned long start_offset, end_offset;
Shaohua Li579f8292014-02-06 12:04:21 -0800470 unsigned long mask;
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700471 struct blk_plug plug;
Hugh Dickins46017e92008-02-04 22:28:41 -0800472
Shaohua Li579f8292014-02-06 12:04:21 -0800473 mask = swapin_nr_pages(offset) - 1;
474 if (!mask)
475 goto skip;
476
Rik van Riel67f96aa2012-03-21 16:33:50 -0700477 /* Read a page_cluster sized and aligned cluster around offset. */
478 start_offset = offset & ~mask;
479 end_offset = offset | mask;
480 if (!start_offset) /* First page is swap header. */
481 start_offset++;
482
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700483 blk_start_plug(&plug);
Rik van Riel67f96aa2012-03-21 16:33:50 -0700484 for (offset = start_offset; offset <= end_offset ; offset++) {
Hugh Dickins46017e92008-02-04 22:28:41 -0800485 /* Ok, do the async read-ahead now */
486 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
Hugh Dickins02098fe2008-02-04 22:28:42 -0800487 gfp_mask, vma, addr);
Hugh Dickins46017e92008-02-04 22:28:41 -0800488 if (!page)
Rik van Riel67f96aa2012-03-21 16:33:50 -0700489 continue;
Shaohua Li579f8292014-02-06 12:04:21 -0800490 if (offset != entry_offset)
491 SetPageReadahead(page);
Hugh Dickins46017e92008-02-04 22:28:41 -0800492 page_cache_release(page);
493 }
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700494 blk_finish_plug(&plug);
495
Hugh Dickins46017e92008-02-04 22:28:41 -0800496 lru_add_drain(); /* Push any new pages onto the LRU now */
Shaohua Li579f8292014-02-06 12:04:21 -0800497skip:
Hugh Dickins02098fe2008-02-04 22:28:42 -0800498 return read_swap_cache_async(entry, gfp_mask, vma, addr);
Hugh Dickins46017e92008-02-04 22:28:41 -0800499}