blob: 1499e1c34f4350fceaac92c7c69cc8a05607fc90 [file] [log] [blame]
Minchan Kim9cbf01d2013-07-03 15:01:24 -07001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
21#include <linux/writeback.h>
Minchan Kim9cbf01d2013-07-03 15:01:24 -070022#include <linux/blkdev.h>
Olav Haugan76111582013-11-14 09:14:11 -080023#include <linux/ratelimit.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/pgtable.h>
25
Olav Haugan76111582013-11-14 09:14:11 -080026/*
27 * We don't need to see swap errors more than once every 1 second to know
28 * that a problem is occurring.
29 */
30#define SWAP_ERROR_LOG_RATE_MS 1000
31
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080032static struct bio *get_swap_bio(gfp_t gfp_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 struct page *page, bio_end_io_t end_io)
34{
35 struct bio *bio;
36
37 bio = bio_alloc(gfp_flags, 1);
38 if (bio) {
Lee Schermerhornd4906e12009-12-14 17:58:49 -080039 bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080040 bio->bi_sector <<= PAGE_SHIFT - 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 bio->bi_io_vec[0].bv_page = page;
42 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
43 bio->bi_io_vec[0].bv_offset = 0;
44 bio->bi_vcnt = 1;
45 bio->bi_idx = 0;
46 bio->bi_size = PAGE_SIZE;
47 bio->bi_end_io = end_io;
48 }
49 return bio;
50}
51
NeilBrown6712ecf2007-09-27 12:47:43 +020052static void end_swap_bio_write(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
54 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
55 struct page *page = bio->bi_io_vec[0].bv_page;
Olav Haugan76111582013-11-14 09:14:11 -080056 static unsigned long swap_error_rs_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070058 if (!uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070060 /*
61 * We failed to write the page out to swap-space.
62 * Re-dirty the page in order to avoid it being reclaimed.
63 * Also print a dire warning that things will go BAD (tm)
64 * very quickly.
65 *
66 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
67 */
68 set_page_dirty(page);
Olav Haugan76111582013-11-14 09:14:11 -080069 if (printk_timed_ratelimit(&swap_error_rs_time,
70 SWAP_ERROR_LOG_RATE_MS))
71 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070072 imajor(bio->bi_bdev->bd_inode),
73 iminor(bio->bi_bdev->bd_inode),
74 (unsigned long long)bio->bi_sector);
75 ClearPageReclaim(page);
76 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 end_page_writeback(page);
78 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
NeilBrown6712ecf2007-09-27 12:47:43 +020081void end_swap_bio_read(struct bio *bio, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -070082{
83 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
84 struct page *page = bio->bi_io_vec[0].bv_page;
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 if (!uptodate) {
87 SetPageError(page);
88 ClearPageUptodate(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070089 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
90 imajor(bio->bi_bdev->bd_inode),
91 iminor(bio->bi_bdev->bd_inode),
92 (unsigned long long)bio->bi_sector);
Minchan Kim9cbf01d2013-07-03 15:01:24 -070093 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 }
Minchan Kim9cbf01d2013-07-03 15:01:24 -070095
96 SetPageUptodate(page);
97
98 /*
99 * There is no guarantee that the page is in swap cache - the software
100 * suspend code (at least) uses end_swap_bio_read() against a non-
101 * swapcache page. So we must check PG_swapcache before proceeding with
102 * this optimization.
103 */
104 if (likely(PageSwapCache(page))) {
105 struct swap_info_struct *sis;
106
107 sis = page_swap_info(page);
108 if (sis->flags & SWP_BLKDEV) {
109 /*
110 * The swap subsystem performs lazy swap slot freeing,
111 * expecting that the page will be swapped out again.
112 * So we can avoid an unnecessary write if the page
113 * isn't redirtied.
114 * This is good for real swap storage because we can
115 * reduce unnecessary I/O and enhance wear-leveling
116 * if an SSD is used as the as swap device.
117 * But if in-memory swap device (eg zram) is used,
118 * this causes a duplicated copy between uncompressed
119 * data in VM-owned memory and compressed data in
120 * zram-owned memory. So let's free zram-owned memory
121 * and make the VM-owned decompressed page *dirty*,
122 * so the page should be swapped out somewhere again if
123 * we again wish to reclaim it.
124 */
125 struct gendisk *disk = sis->bdev->bd_disk;
126 if (disk->fops->swap_slot_free_notify) {
127 swp_entry_t entry;
128 unsigned long offset;
129
130 entry.val = page_private(page);
131 offset = swp_offset(entry);
132
133 SetPageDirty(page);
134 disk->fops->swap_slot_free_notify(sis->bdev,
135 offset);
136 }
137 }
138 }
139
140out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 unlock_page(page);
142 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
145/*
146 * We may have stale swap cache pages in memory: notice
147 * them here and get rid of the unnecessary final write.
148 */
149int swap_writepage(struct page *page, struct writeback_control *wbc)
150{
151 struct bio *bio;
152 int ret = 0, rw = WRITE;
153
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800154 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 unlock_page(page);
156 goto out;
157 }
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800158 bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 if (bio == NULL) {
160 set_page_dirty(page);
161 unlock_page(page);
162 ret = -ENOMEM;
163 goto out;
164 }
165 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboe721a9602011-03-09 11:56:30 +0100166 rw |= REQ_SYNC;
Christoph Lameterf8891e52006-06-30 01:55:45 -0700167 count_vm_event(PSWPOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 set_page_writeback(page);
169 unlock_page(page);
170 submit_bio(rw, bio);
171out:
172 return ret;
173}
174
Minchan Kimaca8bf32009-06-16 15:33:02 -0700175int swap_readpage(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 struct bio *bio;
178 int ret = 0;
179
Hugh Dickins51726b12009-01-06 14:39:25 -0800180 VM_BUG_ON(!PageLocked(page));
181 VM_BUG_ON(PageUptodate(page));
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800182 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 if (bio == NULL) {
184 unlock_page(page);
185 ret = -ENOMEM;
186 goto out;
187 }
Christoph Lameterf8891e52006-06-30 01:55:45 -0700188 count_vm_event(PSWPIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 submit_bio(READ, bio);
190out:
191 return ret;
192}