blob: 31a3b962230a930cecab5d223f43d6d790f2610a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_io.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95,
7 * Asynchronous swapping added 30.12.95. Stephen Tweedie
8 * Removed race in async swapping. 14.4.1996. Bruno Haible
9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
11 */
12
13#include <linux/mm.h>
14#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/pagemap.h>
17#include <linux/swap.h>
18#include <linux/bio.h>
19#include <linux/swapops.h>
20#include <linux/writeback.h>
21#include <asm/pgtable.h>
22
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080023static struct bio *get_swap_bio(gfp_t gfp_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 struct page *page, bio_end_io_t end_io)
25{
26 struct bio *bio;
27
28 bio = bio_alloc(gfp_flags, 1);
29 if (bio) {
Lee Schermerhornd4906e12009-12-14 17:58:49 -080030 bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080031 bio->bi_sector <<= PAGE_SHIFT - 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 bio->bi_io_vec[0].bv_page = page;
33 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
34 bio->bi_io_vec[0].bv_offset = 0;
35 bio->bi_vcnt = 1;
36 bio->bi_idx = 0;
37 bio->bi_size = PAGE_SIZE;
38 bio->bi_end_io = end_io;
39 }
40 return bio;
41}
42
NeilBrown6712ecf2007-09-27 12:47:43 +020043static void end_swap_bio_write(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044{
45 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
46 struct page *page = bio->bi_io_vec[0].bv_page;
47
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070048 if (!uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070050 /*
51 * We failed to write the page out to swap-space.
52 * Re-dirty the page in order to avoid it being reclaimed.
53 * Also print a dire warning that things will go BAD (tm)
54 * very quickly.
55 *
56 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
57 */
58 set_page_dirty(page);
59 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
60 imajor(bio->bi_bdev->bd_inode),
61 iminor(bio->bi_bdev->bd_inode),
62 (unsigned long long)bio->bi_sector);
63 ClearPageReclaim(page);
64 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 end_page_writeback(page);
66 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067}
68
NeilBrown6712ecf2007-09-27 12:47:43 +020069void end_swap_bio_read(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070{
71 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
72 struct page *page = bio->bi_io_vec[0].bv_page;
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 if (!uptodate) {
75 SetPageError(page);
76 ClearPageUptodate(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070077 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
78 imajor(bio->bi_bdev->bd_inode),
79 iminor(bio->bi_bdev->bd_inode),
80 (unsigned long long)bio->bi_sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 } else {
82 SetPageUptodate(page);
83 }
84 unlock_page(page);
85 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
88/*
89 * We may have stale swap cache pages in memory: notice
90 * them here and get rid of the unnecessary final write.
91 */
92int swap_writepage(struct page *page, struct writeback_control *wbc)
93{
94 struct bio *bio;
95 int ret = 0, rw = WRITE;
96
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -080097 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 unlock_page(page);
99 goto out;
100 }
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800101 bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 if (bio == NULL) {
103 set_page_dirty(page);
104 unlock_page(page);
105 ret = -ENOMEM;
106 goto out;
107 }
108 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe93dbb392009-02-16 10:25:40 +0100109 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700110 count_vm_event(PSWPOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 set_page_writeback(page);
112 unlock_page(page);
113 submit_bio(rw, bio);
114out:
115 return ret;
116}
117
Minchan Kimaca8bf32009-06-16 15:33:02 -0700118int swap_readpage(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 struct bio *bio;
121 int ret = 0;
122
Hugh Dickins51726b12009-01-06 14:39:25 -0800123 VM_BUG_ON(!PageLocked(page));
124 VM_BUG_ON(PageUptodate(page));
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800125 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 if (bio == NULL) {
127 unlock_page(page);
128 ret = -ENOMEM;
129 goto out;
130 }
Christoph Lameterf8891e52006-06-30 01:55:45 -0700131 count_vm_event(PSWPIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 submit_bio(READ, bio);
133out:
134 return ret;
135}