blob: 34f02923744c921fa2d990ec68f220a698049362 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_io.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95,
7 * Asynchronous swapping added 30.12.95. Stephen Tweedie
8 * Removed race in async swapping. 14.4.1996. Bruno Haible
9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
11 */
12
13#include <linux/mm.h>
14#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/pagemap.h>
17#include <linux/swap.h>
18#include <linux/bio.h>
19#include <linux/swapops.h>
20#include <linux/writeback.h>
Dan Magenheimer38b5faf2012-04-09 17:08:06 -060021#include <linux/frontswap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/pgtable.h>
23
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080024static struct bio *get_swap_bio(gfp_t gfp_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 struct page *page, bio_end_io_t end_io)
26{
27 struct bio *bio;
28
29 bio = bio_alloc(gfp_flags, 1);
30 if (bio) {
Lee Schermerhornd4906e12009-12-14 17:58:49 -080031 bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080032 bio->bi_sector <<= PAGE_SHIFT - 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 bio->bi_io_vec[0].bv_page = page;
34 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
35 bio->bi_io_vec[0].bv_offset = 0;
36 bio->bi_vcnt = 1;
37 bio->bi_idx = 0;
38 bio->bi_size = PAGE_SIZE;
39 bio->bi_end_io = end_io;
40 }
41 return bio;
42}
43
NeilBrown6712ecf2007-09-27 12:47:43 +020044static void end_swap_bio_write(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
46 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
47 struct page *page = bio->bi_io_vec[0].bv_page;
48
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070049 if (!uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070051 /*
52 * We failed to write the page out to swap-space.
53 * Re-dirty the page in order to avoid it being reclaimed.
54 * Also print a dire warning that things will go BAD (tm)
55 * very quickly.
56 *
57 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
58 */
59 set_page_dirty(page);
60 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
61 imajor(bio->bi_bdev->bd_inode),
62 iminor(bio->bi_bdev->bd_inode),
63 (unsigned long long)bio->bi_sector);
64 ClearPageReclaim(page);
65 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 end_page_writeback(page);
67 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
NeilBrown6712ecf2007-09-27 12:47:43 +020070void end_swap_bio_read(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
73 struct page *page = bio->bi_io_vec[0].bv_page;
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 if (!uptodate) {
76 SetPageError(page);
77 ClearPageUptodate(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070078 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
79 imajor(bio->bi_bdev->bd_inode),
80 iminor(bio->bi_bdev->bd_inode),
81 (unsigned long long)bio->bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 } else {
83 SetPageUptodate(page);
84 }
85 unlock_page(page);
86 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
89/*
90 * We may have stale swap cache pages in memory: notice
91 * them here and get rid of the unnecessary final write.
92 */
93int swap_writepage(struct page *page, struct writeback_control *wbc)
94{
95 struct bio *bio;
96 int ret = 0, rw = WRITE;
97
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -080098 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 unlock_page(page);
100 goto out;
101 }
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400102 if (frontswap_store(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600103 set_page_writeback(page);
104 unlock_page(page);
105 end_page_writeback(page);
106 goto out;
107 }
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800108 bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 if (bio == NULL) {
110 set_page_dirty(page);
111 unlock_page(page);
112 ret = -ENOMEM;
113 goto out;
114 }
115 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe721a9602011-03-09 11:56:30 +0100116 rw |= REQ_SYNC;
Christoph Lameterf8891e52006-06-30 01:55:45 -0700117 count_vm_event(PSWPOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 set_page_writeback(page);
119 unlock_page(page);
120 submit_bio(rw, bio);
121out:
122 return ret;
123}
124
Minchan Kimaca8bf32009-06-16 15:33:02 -0700125int swap_readpage(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
127 struct bio *bio;
128 int ret = 0;
129
Hugh Dickins51726b12009-01-06 14:39:25 -0800130 VM_BUG_ON(!PageLocked(page));
131 VM_BUG_ON(PageUptodate(page));
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400132 if (frontswap_load(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600133 SetPageUptodate(page);
134 unlock_page(page);
135 goto out;
136 }
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800137 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (bio == NULL) {
139 unlock_page(page);
140 ret = -ENOMEM;
141 goto out;
142 }
Christoph Lameterf8891e52006-06-30 01:55:45 -0700143 count_vm_event(PSWPIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 submit_bio(READ, bio);
145out:
146 return ret;
147}