blob: 8738aa5b6df4b2e2f17f13cb4af055550472b87a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel_stat.h>
12#include <linux/swap.h>
Hugh Dickins46017e92008-02-04 22:28:41 -080013#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/init.h>
15#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/backing-dev.h>
Hugh Dickinsc484d412006-01-06 00:10:55 -080017#include <linux/pagevec.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080018#include <linux/migrate.h>
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -080019#include <linux/page_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#include <asm/pgtable.h>
22
23/*
24 * swapper_space is a fiction, retained to simplify the path through
Jens Axboe7eaceac2011-03-10 08:52:07 +010025 * vmscan's shrink_page_list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070027static const struct address_space_operations swap_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 .writepage = swap_writepage,
Hugh Dickinsaca50bd2012-04-23 11:14:50 -070029 .set_page_dirty = __set_page_dirty_no_writeback,
Christoph Lametere965f962006-02-01 03:05:41 -080030 .migratepage = migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -070031};
32
33static struct backing_dev_info swap_backing_dev_info = {
Jens Axboed9938312009-06-12 14:45:52 +020034 .name = "swap",
Rik van Riel4f98a2f2008-10-18 20:26:32 -070035 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
Linus Torvalds1da177e2005-04-16 15:20:36 -070036};
37
Shaohua Li2b899932013-02-22 16:34:37 -080038struct address_space swapper_spaces[MAX_SWAPFILES] = {
39 [0 ... MAX_SWAPFILES - 1] = {
40 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
David Herrmanna4d6f932014-08-08 14:25:25 -070041 .i_mmap_writable = ATOMIC_INIT(0),
Shaohua Li2b899932013-02-22 16:34:37 -080042 .a_ops = &swap_aops,
43 .backing_dev_info = &swap_backing_dev_info,
44 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070045};
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
48
49static struct {
50 unsigned long add_total;
51 unsigned long del_total;
52 unsigned long find_success;
53 unsigned long find_total;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054} swap_cache_info;
55
Shaohua Li2b899932013-02-22 16:34:37 -080056unsigned long total_swapcache_pages(void)
57{
58 int i;
59 unsigned long ret = 0;
60
61 for (i = 0; i < MAX_SWAPFILES; i++)
62 ret += swapper_spaces[i].nrpages;
63 return ret;
64}
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066void show_swap_cache_info(void)
67{
Shaohua Li2b899932013-02-22 16:34:37 -080068 printk("%lu pages in swap cache\n", total_swapcache_pages());
Johannes Weiner2c97b7f2008-07-25 19:46:01 -070069 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 swap_cache_info.add_total, swap_cache_info.del_total,
Hugh Dickinsbb63be02008-02-04 22:28:49 -080071 swap_cache_info.find_success, swap_cache_info.find_total);
Shaohua Li1abbca72013-02-22 16:34:38 -080072 printk("Free swap = %ldkB\n",
73 get_nr_swap_pages() << (PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
75}
76
77/*
Daisuke Nishimura31a56392009-09-21 17:02:50 -070078 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 * but sets SwapCache flag and private instead of mapping and index.
80 */
Daisuke Nishimura31a56392009-09-21 17:02:50 -070081static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 int error;
Shaohua Li2b899932013-02-22 16:34:37 -080084 struct address_space *address_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Hugh Dickins51726b12009-01-06 14:39:25 -080086 VM_BUG_ON(!PageLocked(page));
87 VM_BUG_ON(PageSwapCache(page));
88 VM_BUG_ON(!PageSwapBacked(page));
89
Daisuke Nishimura31a56392009-09-21 17:02:50 -070090 page_cache_get(page);
91 SetPageSwapCache(page);
92 set_page_private(page, entry.val);
93
Shaohua Li2b899932013-02-22 16:34:37 -080094 address_space = swap_address_space(entry);
95 spin_lock_irq(&address_space->tree_lock);
96 error = radix_tree_insert(&address_space->page_tree,
97 entry.val, page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -070098 if (likely(!error)) {
Shaohua Li2b899932013-02-22 16:34:37 -080099 address_space->nrpages++;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700100 __inc_zone_page_state(page, NR_FILE_PAGES);
Liam Mark6ffc0402015-02-27 12:59:00 -0800101 __inc_zone_page_state(page, NR_SWAPCACHE);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700102 INC_CACHE_INFO(add_total);
103 }
Shaohua Li2b899932013-02-22 16:34:37 -0800104 spin_unlock_irq(&address_space->tree_lock);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700105
106 if (unlikely(error)) {
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700107 /*
108 * Only the context which have set SWAP_HAS_CACHE flag
109 * would call add_to_swap_cache().
110 * So add_to_swap_cache() doesn't returns -EEXIST.
111 */
112 VM_BUG_ON(error == -EEXIST);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700113 set_page_private(page, 0UL);
114 ClearPageSwapCache(page);
115 page_cache_release(page);
116 }
117
118 return error;
119}
120
121
122int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
123{
124 int error;
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 error = radix_tree_preload(gfp_mask);
127 if (!error) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700128 error = __add_to_swap_cache(page, entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 radix_tree_preload_end();
Hugh Dickinsfa1de902008-02-07 00:14:13 -0800130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return error;
132}
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/*
135 * This must be called only on pages that have
136 * been verified to be in the swap cache.
137 */
138void __delete_from_swap_cache(struct page *page)
139{
Shaohua Li2b899932013-02-22 16:34:37 -0800140 swp_entry_t entry;
141 struct address_space *address_space;
142
Hugh Dickins51726b12009-01-06 14:39:25 -0800143 VM_BUG_ON(!PageLocked(page));
144 VM_BUG_ON(!PageSwapCache(page));
145 VM_BUG_ON(PageWriteback(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Shaohua Li2b899932013-02-22 16:34:37 -0800147 entry.val = page_private(page);
148 address_space = swap_address_space(entry);
149 radix_tree_delete(&address_space->page_tree, page_private(page));
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700150 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 ClearPageSwapCache(page);
Shaohua Li2b899932013-02-22 16:34:37 -0800152 address_space->nrpages--;
Christoph Lameter347ce432006-06-30 01:55:35 -0700153 __dec_zone_page_state(page, NR_FILE_PAGES);
Liam Mark6ffc0402015-02-27 12:59:00 -0800154 __dec_zone_page_state(page, NR_SWAPCACHE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 INC_CACHE_INFO(del_total);
156}
157
158/**
159 * add_to_swap - allocate swap space for a page
160 * @page: page we want to move to swap
161 *
162 * Allocate swap space for the page and add the page to the
163 * swap cache. Caller needs to hold the page lock.
164 */
Hugh Dickinsac47b002009-01-06 14:39:39 -0800165int add_to_swap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
167 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 int err;
169
Hugh Dickins51726b12009-01-06 14:39:25 -0800170 VM_BUG_ON(!PageLocked(page));
171 VM_BUG_ON(!PageUptodate(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700173 entry = get_swap_page();
174 if (!entry.val)
175 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Andrea Arcangeli3f04f622011-01-13 15:46:47 -0800177 if (unlikely(PageTransHuge(page)))
178 if (unlikely(split_huge_page(page))) {
179 swapcache_free(entry, NULL);
180 return 0;
181 }
182
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700183 /*
184 * Radix-tree node allocations from PF_MEMALLOC contexts could
185 * completely exhaust the page allocator. __GFP_NOMEMALLOC
186 * stops emergency reserves from being allocated.
187 *
188 * TODO: this could cause a theoretical memory reclaim
189 * deadlock in the swap out path.
190 */
191 /*
192 * Add it to the swap cache and mark it dirty
193 */
194 err = add_to_swap_cache(page, entry,
195 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700197 if (!err) { /* Success */
198 SetPageDirty(page);
199 return 1;
200 } else { /* -ENOMEM radix-tree allocation failure */
201 /*
202 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
203 * clear SWAP_HAS_CACHE flag.
204 */
205 swapcache_free(entry, NULL);
206 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 }
208}
209
210/*
211 * This must be called only on pages that have
212 * been verified to be in the swap cache and locked.
213 * It will never put the page into the free list,
214 * the caller has a reference on the page.
215 */
216void delete_from_swap_cache(struct page *page)
217{
218 swp_entry_t entry;
Shaohua Li2b899932013-02-22 16:34:37 -0800219 struct address_space *address_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700221 entry.val = page_private(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Shaohua Li2b899932013-02-22 16:34:37 -0800223 address_space = swap_address_space(entry);
224 spin_lock_irq(&address_space->tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 __delete_from_swap_cache(page);
Shaohua Li2b899932013-02-22 16:34:37 -0800226 spin_unlock_irq(&address_space->tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700228 swapcache_free(entry, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 page_cache_release(page);
230}
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232/*
233 * If we are the only user, then try to free up the swap cache.
234 *
235 * Its ok to check for PageSwapCache without the page lock
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800236 * here because we are going to recheck again inside
237 * try_to_free_swap() _with_ the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 * - Marcelo
239 */
240static inline void free_swap_cache(struct page *page)
241{
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800242 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
243 try_to_free_swap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 unlock_page(page);
245 }
246}
247
248/*
249 * Perform a free_page(), also freeing any swap cache associated with
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700250 * this page if it is the last user of the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 */
252void free_page_and_swap_cache(struct page *page)
253{
254 free_swap_cache(page);
255 page_cache_release(page);
256}
257
258/*
259 * Passed an array of pages, drop them all from swapcache and then release
260 * them. They are removed from the LRU and freed if this is their last use.
261 */
262void free_pages_and_swap_cache(struct page **pages, int nr)
263{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 struct page **pagep = pages;
265
266 lru_add_drain();
267 while (nr) {
Hugh Dickinsc484d412006-01-06 00:10:55 -0800268 int todo = min(nr, PAGEVEC_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 int i;
270
271 for (i = 0; i < todo; i++)
272 free_swap_cache(pagep[i]);
273 release_pages(pagep, todo, 0);
274 pagep += todo;
275 nr -= todo;
276 }
277}
278
279/*
280 * Lookup a swap entry in the swap cache. A found page will be returned
281 * unlocked and with its refcount incremented - we rely on the kernel
282 * lock getting page table operations atomic even if we drop the page
283 * lock before returning.
284 */
285struct page * lookup_swap_cache(swp_entry_t entry)
286{
287 struct page *page;
288
Shaohua Li2b899932013-02-22 16:34:37 -0800289 page = find_get_page(swap_address_space(entry), entry.val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291 if (page)
292 INC_CACHE_INFO(find_success);
293
294 INC_CACHE_INFO(find_total);
295 return page;
296}
297
298/*
299 * Locate a page of swap in physical memory, reserving swap cache space
300 * and reading the disk if it is not already cached.
301 * A failure return means that either the page allocation failed or that
302 * the swap entry is no longer in use.
303 */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800304struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 struct vm_area_struct *vma, unsigned long addr)
306{
307 struct page *found_page, *new_page = NULL;
308 int err;
309
310 do {
311 /*
312 * First check the swap cache. Since this is normally
313 * called after lookup_swap_cache() failed, re-calling
314 * that would confuse statistics.
315 */
Shaohua Li2b899932013-02-22 16:34:37 -0800316 found_page = find_get_page(swap_address_space(entry),
317 entry.val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 if (found_page)
319 break;
320
321 /*
322 * Get a new page to read into from swap.
323 */
324 if (!new_page) {
Hugh Dickins02098fe2008-02-04 22:28:42 -0800325 new_page = alloc_page_vma(gfp_mask, vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 if (!new_page)
327 break; /* Out of memory */
328 }
329
330 /*
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700331 * call radix_tree_preload() while we can wait.
332 */
333 err = radix_tree_preload(gfp_mask & GFP_KERNEL);
334 if (err)
335 break;
336
337 /*
Hugh Dickinsf0009442008-02-04 22:28:49 -0800338 * Swap entry may have been freed since our caller observed it.
339 */
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700340 err = swapcache_prepare(entry);
Rafael Aquini9e995ec2013-06-12 14:04:49 -0700341 if (err == -EEXIST) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700342 radix_tree_preload_end();
Rafael Aquini9e995ec2013-06-12 14:04:49 -0700343 /*
344 * We might race against get_swap_page() and stumble
345 * across a SWAP_HAS_CACHE swap_map entry whose page
346 * has not been brought into the swapcache yet, while
347 * the other end is scheduled away waiting on discard
348 * I/O completion at scan_swap_map().
349 *
350 * In order to avoid turning this transitory state
351 * into a permanent loop around this -EEXIST case
352 * if !CONFIG_PREEMPT and the I/O completion happens
353 * to be waiting on the CPU waitqueue where we are now
354 * busy looping, we just conditionally invoke the
355 * scheduler here, if there are some more important
356 * tasks to run.
357 */
358 cond_resched();
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700359 continue;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700360 }
361 if (err) { /* swp entry is obsolete ? */
362 radix_tree_preload_end();
Hugh Dickinsf0009442008-02-04 22:28:49 -0800363 break;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700364 }
Hugh Dickinsf0009442008-02-04 22:28:49 -0800365
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700366 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
Nick Pigginf45840b2008-10-18 20:26:57 -0700367 __set_page_locked(new_page);
Rik van Rielb2e18532008-10-18 20:26:30 -0700368 SetPageSwapBacked(new_page);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700369 err = __add_to_swap_cache(new_page, entry);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200370 if (likely(!err)) {
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700371 radix_tree_preload_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /*
373 * Initiate read into locked page and return.
374 */
Rik van Rielc5fdae42008-10-18 20:26:36 -0700375 lru_cache_add_anon(new_page);
Minchan Kimaca8bf32009-06-16 15:33:02 -0700376 swap_readpage(new_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 return new_page;
378 }
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700379 radix_tree_preload_end();
Rik van Rielb2e18532008-10-18 20:26:30 -0700380 ClearPageSwapBacked(new_page);
Nick Pigginf45840b2008-10-18 20:26:57 -0700381 __clear_page_locked(new_page);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700382 /*
383 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
384 * clear SWAP_HAS_CACHE flag.
385 */
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700386 swapcache_free(entry, NULL);
Hugh Dickinsf0009442008-02-04 22:28:49 -0800387 } while (err != -ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 if (new_page)
390 page_cache_release(new_page);
391 return found_page;
392}
Hugh Dickins46017e92008-02-04 22:28:41 -0800393
394/**
395 * swapin_readahead - swap in pages in hope we need them soon
396 * @entry: swap entry of this memory
Randy Dunlap76824862008-03-19 17:00:40 -0700397 * @gfp_mask: memory allocation flags
Hugh Dickins46017e92008-02-04 22:28:41 -0800398 * @vma: user vma this address belongs to
399 * @addr: target address for mempolicy
400 *
401 * Returns the struct page for entry and addr, after queueing swapin.
402 *
403 * Primitive swap readahead code. We simply read an aligned block of
404 * (1 << page_cluster) entries in the swap area. This method is chosen
405 * because it doesn't cost us any seek time. We also make sure to queue
406 * the 'original' request together with the readahead ones...
407 *
408 * This has been extended to use the NUMA policies from the mm triggering
409 * the readahead.
410 *
411 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
412 */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800413struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
Hugh Dickins46017e92008-02-04 22:28:41 -0800414 struct vm_area_struct *vma, unsigned long addr)
415{
Hugh Dickins46017e92008-02-04 22:28:41 -0800416 struct page *page;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700417 unsigned long offset = swp_offset(entry);
418 unsigned long start_offset, end_offset;
Vinayak Menonf95681e2015-11-20 13:37:08 +0530419 unsigned long mask = is_swap_fast(entry) ? 0 : (1UL << page_cluster) - 1;
Hugh Dickins46017e92008-02-04 22:28:41 -0800420
Rik van Riel67f96aa2012-03-21 16:33:50 -0700421 /* Read a page_cluster sized and aligned cluster around offset. */
422 start_offset = offset & ~mask;
423 end_offset = offset | mask;
424 if (!start_offset) /* First page is swap header. */
425 start_offset++;
426
427 for (offset = start_offset; offset <= end_offset ; offset++) {
Hugh Dickins46017e92008-02-04 22:28:41 -0800428 /* Ok, do the async read-ahead now */
429 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
Hugh Dickins02098fe2008-02-04 22:28:42 -0800430 gfp_mask, vma, addr);
Hugh Dickins46017e92008-02-04 22:28:41 -0800431 if (!page)
Rik van Riel67f96aa2012-03-21 16:33:50 -0700432 continue;
Hugh Dickins46017e92008-02-04 22:28:41 -0800433 page_cache_release(page);
434 }
435 lru_add_drain(); /* Push any new pages onto the LRU now */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800436 return read_swap_cache_async(entry, gfp_mask, vma, addr);
Hugh Dickins46017e92008-02-04 22:28:41 -0800437}