blob: 42cd38eba79f1ffb096db8683bc11dd5604fe367 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
Hugh Dickins46017e92008-02-04 22:28:41 -080013#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/buffer_head.h>
17#include <linux/backing-dev.h>
Hugh Dickinsc484d412006-01-06 00:10:55 -080018#include <linux/pagevec.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/migrate.h>
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -080020#include <linux/page_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/pgtable.h>
23
24/*
25 * swapper_space is a fiction, retained to simplify the path through
Anderson Briglia2706a1b2007-07-15 23:38:09 -070026 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * future use of radix_tree tags in the swap cache.
28 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070029static const struct address_space_operations swap_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 .writepage = swap_writepage,
31 .sync_page = block_sync_page,
32 .set_page_dirty = __set_page_dirty_nobuffers,
Christoph Lametere965f962006-02-01 03:05:41 -080033 .migratepage = migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36static struct backing_dev_info swap_backing_dev_info = {
Rik van Riel4f98a2f2008-10-18 20:26:32 -070037 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 .unplug_io_fn = swap_unplug_io_fn,
39};
40
41struct address_space swapper_space = {
42 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
Nick Piggin19fd6232008-07-25 19:45:32 -070043 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 .a_ops = &swap_aops,
45 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
46 .backing_dev_info = &swap_backing_dev_info,
47};
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
50
51static struct {
52 unsigned long add_total;
53 unsigned long del_total;
54 unsigned long find_success;
55 unsigned long find_total;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056} swap_cache_info;
57
58void show_swap_cache_info(void)
59{
Johannes Weiner2c97b7f2008-07-25 19:46:01 -070060 printk("%lu pages in swap cache\n", total_swapcache_pages);
61 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 swap_cache_info.add_total, swap_cache_info.del_total,
Hugh Dickinsbb63be02008-02-04 22:28:49 -080063 swap_cache_info.find_success, swap_cache_info.find_total);
Hugh Dickins07279cd2008-08-20 14:09:05 -070064 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
66}
67
68/*
Nick Piggine2867812008-07-25 19:45:30 -070069 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 * but sets SwapCache flag and private instead of mapping and index.
71 */
Hugh Dickins73b12622008-02-04 22:28:50 -080072int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 int error;
75
Hugh Dickins51726b12009-01-06 14:39:25 -080076 VM_BUG_ON(!PageLocked(page));
77 VM_BUG_ON(PageSwapCache(page));
78 VM_BUG_ON(!PageSwapBacked(page));
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 error = radix_tree_preload(gfp_mask);
81 if (!error) {
Nick Piggine2867812008-07-25 19:45:30 -070082 page_cache_get(page);
83 SetPageSwapCache(page);
84 set_page_private(page, entry.val);
85
Nick Piggin19fd6232008-07-25 19:45:32 -070086 spin_lock_irq(&swapper_space.tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 error = radix_tree_insert(&swapper_space.page_tree,
88 entry.val, page);
Nick Piggine2867812008-07-25 19:45:30 -070089 if (likely(!error)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 total_swapcache_pages++;
Christoph Lameter347ce432006-06-30 01:55:35 -070091 __inc_zone_page_state(page, NR_FILE_PAGES);
Hugh Dickinsbb63be02008-02-04 22:28:49 -080092 INC_CACHE_INFO(add_total);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
Nick Piggin19fd6232008-07-25 19:45:32 -070094 spin_unlock_irq(&swapper_space.tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 radix_tree_preload_end();
Nick Piggine2867812008-07-25 19:45:30 -070096
97 if (unlikely(error)) {
98 set_page_private(page, 0UL);
99 ClearPageSwapCache(page);
100 page_cache_release(page);
101 }
Hugh Dickinsfa1de902008-02-07 00:14:13 -0800102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 return error;
104}
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*
107 * This must be called only on pages that have
108 * been verified to be in the swap cache.
109 */
110void __delete_from_swap_cache(struct page *page)
111{
Hugh Dickins51726b12009-01-06 14:39:25 -0800112 VM_BUG_ON(!PageLocked(page));
113 VM_BUG_ON(!PageSwapCache(page));
114 VM_BUG_ON(PageWriteback(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700116 radix_tree_delete(&swapper_space.page_tree, page_private(page));
117 set_page_private(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 ClearPageSwapCache(page);
119 total_swapcache_pages--;
Christoph Lameter347ce432006-06-30 01:55:35 -0700120 __dec_zone_page_state(page, NR_FILE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 INC_CACHE_INFO(del_total);
122}
123
124/**
125 * add_to_swap - allocate swap space for a page
126 * @page: page we want to move to swap
127 *
128 * Allocate swap space for the page and add the page to the
129 * swap cache. Caller needs to hold the page lock.
130 */
Hugh Dickinsac47b002009-01-06 14:39:39 -0800131int add_to_swap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 int err;
135
Hugh Dickins51726b12009-01-06 14:39:25 -0800136 VM_BUG_ON(!PageLocked(page));
137 VM_BUG_ON(!PageUptodate(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 for (;;) {
140 entry = get_swap_page();
141 if (!entry.val)
142 return 0;
143
Nick Pigginbd53b712005-05-01 08:58:37 -0700144 /*
145 * Radix-tree node allocations from PF_MEMALLOC contexts could
146 * completely exhaust the page allocator. __GFP_NOMEMALLOC
147 * stops emergency reserves from being allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 *
Nick Pigginbd53b712005-05-01 08:58:37 -0700149 * TODO: this could cause a theoretical memory reclaim
150 * deadlock in the swap out path.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 /*
153 * Add it to the swap cache and mark it dirty
154 */
Hugh Dickinsf0009442008-02-04 22:28:49 -0800155 err = add_to_swap_cache(page, entry,
Hugh Dickinsac47b002009-01-06 14:39:39 -0800156 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 switch (err) {
159 case 0: /* Success */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 SetPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return 1;
162 case -EEXIST:
163 /* Raced with "speculative" read_swap_cache_async */
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700164 swapcache_free(entry, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 continue;
166 default:
167 /* -ENOMEM radix-tree allocation failure */
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700168 swapcache_free(entry, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 return 0;
170 }
171 }
172}
173
174/*
175 * This must be called only on pages that have
176 * been verified to be in the swap cache and locked.
177 * It will never put the page into the free list,
178 * the caller has a reference on the page.
179 */
180void delete_from_swap_cache(struct page *page)
181{
182 swp_entry_t entry;
183
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700184 entry.val = page_private(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Nick Piggin19fd6232008-07-25 19:45:32 -0700186 spin_lock_irq(&swapper_space.tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 __delete_from_swap_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700188 spin_unlock_irq(&swapper_space.tree_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700190 swapcache_free(entry, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 page_cache_release(page);
192}
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/*
195 * If we are the only user, then try to free up the swap cache.
196 *
197 * Its ok to check for PageSwapCache without the page lock
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800198 * here because we are going to recheck again inside
199 * try_to_free_swap() _with_ the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * - Marcelo
201 */
202static inline void free_swap_cache(struct page *page)
203{
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800204 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
205 try_to_free_swap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 unlock_page(page);
207 }
208}
209
210/*
211 * Perform a free_page(), also freeing any swap cache associated with
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700212 * this page if it is the last user of the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 */
214void free_page_and_swap_cache(struct page *page)
215{
216 free_swap_cache(page);
217 page_cache_release(page);
218}
219
220/*
221 * Passed an array of pages, drop them all from swapcache and then release
222 * them. They are removed from the LRU and freed if this is their last use.
223 */
224void free_pages_and_swap_cache(struct page **pages, int nr)
225{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 struct page **pagep = pages;
227
228 lru_add_drain();
229 while (nr) {
Hugh Dickinsc484d412006-01-06 00:10:55 -0800230 int todo = min(nr, PAGEVEC_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 int i;
232
233 for (i = 0; i < todo; i++)
234 free_swap_cache(pagep[i]);
235 release_pages(pagep, todo, 0);
236 pagep += todo;
237 nr -= todo;
238 }
239}
240
241/*
242 * Lookup a swap entry in the swap cache. A found page will be returned
243 * unlocked and with its refcount incremented - we rely on the kernel
244 * lock getting page table operations atomic even if we drop the page
245 * lock before returning.
246 */
247struct page * lookup_swap_cache(swp_entry_t entry)
248{
249 struct page *page;
250
251 page = find_get_page(&swapper_space, entry.val);
252
253 if (page)
254 INC_CACHE_INFO(find_success);
255
256 INC_CACHE_INFO(find_total);
257 return page;
258}
259
260/*
261 * Locate a page of swap in physical memory, reserving swap cache space
262 * and reading the disk if it is not already cached.
263 * A failure return means that either the page allocation failed or that
264 * the swap entry is no longer in use.
265 */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800266struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 struct vm_area_struct *vma, unsigned long addr)
268{
269 struct page *found_page, *new_page = NULL;
270 int err;
271
272 do {
273 /*
274 * First check the swap cache. Since this is normally
275 * called after lookup_swap_cache() failed, re-calling
276 * that would confuse statistics.
277 */
278 found_page = find_get_page(&swapper_space, entry.val);
279 if (found_page)
280 break;
281
282 /*
283 * Get a new page to read into from swap.
284 */
285 if (!new_page) {
Hugh Dickins02098fe2008-02-04 22:28:42 -0800286 new_page = alloc_page_vma(gfp_mask, vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 if (!new_page)
288 break; /* Out of memory */
289 }
290
291 /*
Hugh Dickinsf0009442008-02-04 22:28:49 -0800292 * Swap entry may have been freed since our caller observed it.
293 */
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700294 err = swapcache_prepare(entry);
295 if (err == -EEXIST) /* seems racy */
296 continue;
297 if (err) /* swp entry is obsolete ? */
Hugh Dickinsf0009442008-02-04 22:28:49 -0800298 break;
299
300 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 * Associate the page with swap entry in the swap cache.
Hugh Dickinsf0009442008-02-04 22:28:49 -0800302 * May fail (-EEXIST) if there is already a page associated
303 * with this entry in the swap cache: added by a racing
304 * read_swap_cache_async, or add_to_swap or shmem_writepage
305 * re-using the just freed swap entry for an existing page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 * May fail (-ENOMEM) if radix-tree node allocation failed.
307 */
Nick Pigginf45840b2008-10-18 20:26:57 -0700308 __set_page_locked(new_page);
Rik van Rielb2e18532008-10-18 20:26:30 -0700309 SetPageSwapBacked(new_page);
Hugh Dickinsf0009442008-02-04 22:28:49 -0800310 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
Nick Piggin529ae9a2008-08-02 12:01:03 +0200311 if (likely(!err)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 /*
313 * Initiate read into locked page and return.
314 */
Rik van Rielc5fdae42008-10-18 20:26:36 -0700315 lru_cache_add_anon(new_page);
Minchan Kimaca8bf32009-06-16 15:33:02 -0700316 swap_readpage(new_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 return new_page;
318 }
Rik van Rielb2e18532008-10-18 20:26:30 -0700319 ClearPageSwapBacked(new_page);
Nick Pigginf45840b2008-10-18 20:26:57 -0700320 __clear_page_locked(new_page);
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700321 swapcache_free(entry, NULL);
Hugh Dickinsf0009442008-02-04 22:28:49 -0800322 } while (err != -ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 if (new_page)
325 page_cache_release(new_page);
326 return found_page;
327}
Hugh Dickins46017e92008-02-04 22:28:41 -0800328
329/**
330 * swapin_readahead - swap in pages in hope we need them soon
331 * @entry: swap entry of this memory
Randy Dunlap76824862008-03-19 17:00:40 -0700332 * @gfp_mask: memory allocation flags
Hugh Dickins46017e92008-02-04 22:28:41 -0800333 * @vma: user vma this address belongs to
334 * @addr: target address for mempolicy
335 *
336 * Returns the struct page for entry and addr, after queueing swapin.
337 *
338 * Primitive swap readahead code. We simply read an aligned block of
339 * (1 << page_cluster) entries in the swap area. This method is chosen
340 * because it doesn't cost us any seek time. We also make sure to queue
341 * the 'original' request together with the readahead ones...
342 *
343 * This has been extended to use the NUMA policies from the mm triggering
344 * the readahead.
345 *
346 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
347 */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800348struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
Hugh Dickins46017e92008-02-04 22:28:41 -0800349 struct vm_area_struct *vma, unsigned long addr)
350{
351 int nr_pages;
352 struct page *page;
353 unsigned long offset;
354 unsigned long end_offset;
355
356 /*
357 * Get starting offset for readaround, and number of pages to read.
358 * Adjust starting address by readbehind (for NUMA interleave case)?
359 * No, it's very unlikely that swap layout would follow vma layout,
360 * more likely that neighbouring swap pages came from the same node:
361 * so use the same "addr" to choose the same node for each swap read.
362 */
363 nr_pages = valid_swaphandles(entry, &offset);
364 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
365 /* Ok, do the async read-ahead now */
366 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
Hugh Dickins02098fe2008-02-04 22:28:42 -0800367 gfp_mask, vma, addr);
Hugh Dickins46017e92008-02-04 22:28:41 -0800368 if (!page)
369 break;
370 page_cache_release(page);
371 }
372 lru_add_drain(); /* Push any new pages onto the LRU now */
Hugh Dickins02098fe2008-02-04 22:28:42 -0800373 return read_swap_cache_async(entry, gfp_mask, vma, addr);
Hugh Dickins46017e92008-02-04 22:28:41 -0800374}