Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/page_io.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 5 | * |
| 6 | * Swap reorganised 29.12.95, |
| 7 | * Asynchronous swapping added 30.12.95. Stephen Tweedie |
| 8 | * Removed race in async swapping. 14.4.1996. Bruno Haible |
| 9 | * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie |
| 10 | * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman |
| 11 | */ |
| 12 | |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/kernel_stat.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/swap.h> |
| 18 | #include <linux/bio.h> |
| 19 | #include <linux/swapops.h> |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 20 | #include <linux/buffer_head.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/writeback.h> |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 22 | #include <linux/frontswap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/pgtable.h> |
| 24 | |
Hugh Dickins | f29ad6a | 2009-12-14 17:58:40 -0800 | [diff] [blame] | 25 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | struct page *page, bio_end_io_t end_io) |
| 27 | { |
| 28 | struct bio *bio; |
| 29 | |
| 30 | bio = bio_alloc(gfp_flags, 1); |
| 31 | if (bio) { |
Lee Schermerhorn | d4906e1 | 2009-12-14 17:58:49 -0800 | [diff] [blame] | 32 | bio->bi_sector = map_swap_page(page, &bio->bi_bdev); |
Hugh Dickins | f29ad6a | 2009-12-14 17:58:40 -0800 | [diff] [blame] | 33 | bio->bi_sector <<= PAGE_SHIFT - 9; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | bio->bi_io_vec[0].bv_page = page; |
| 35 | bio->bi_io_vec[0].bv_len = PAGE_SIZE; |
| 36 | bio->bi_io_vec[0].bv_offset = 0; |
| 37 | bio->bi_vcnt = 1; |
| 38 | bio->bi_idx = 0; |
| 39 | bio->bi_size = PAGE_SIZE; |
| 40 | bio->bi_end_io = end_io; |
| 41 | } |
| 42 | return bio; |
| 43 | } |
| 44 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 45 | static void end_swap_bio_write(struct bio *bio, int err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | { |
| 47 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 48 | struct page *page = bio->bi_io_vec[0].bv_page; |
| 49 | |
Peter Zijlstra | 6ddab3b | 2006-09-25 23:31:26 -0700 | [diff] [blame] | 50 | if (!uptodate) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | SetPageError(page); |
Peter Zijlstra | 6ddab3b | 2006-09-25 23:31:26 -0700 | [diff] [blame] | 52 | /* |
| 53 | * We failed to write the page out to swap-space. |
| 54 | * Re-dirty the page in order to avoid it being reclaimed. |
| 55 | * Also print a dire warning that things will go BAD (tm) |
| 56 | * very quickly. |
| 57 | * |
| 58 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() |
| 59 | */ |
| 60 | set_page_dirty(page); |
| 61 | printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", |
| 62 | imajor(bio->bi_bdev->bd_inode), |
| 63 | iminor(bio->bi_bdev->bd_inode), |
| 64 | (unsigned long long)bio->bi_sector); |
| 65 | ClearPageReclaim(page); |
| 66 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | end_page_writeback(page); |
| 68 | bio_put(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 71 | void end_swap_bio_read(struct bio *bio, int err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { |
| 73 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 74 | struct page *page = bio->bi_io_vec[0].bv_page; |
| 75 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | if (!uptodate) { |
| 77 | SetPageError(page); |
| 78 | ClearPageUptodate(page); |
Peter Zijlstra | 6ddab3b | 2006-09-25 23:31:26 -0700 | [diff] [blame] | 79 | printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", |
| 80 | imajor(bio->bi_bdev->bd_inode), |
| 81 | iminor(bio->bi_bdev->bd_inode), |
| 82 | (unsigned long long)bio->bi_sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } else { |
| 84 | SetPageUptodate(page); |
| 85 | } |
| 86 | unlock_page(page); |
| 87 | bio_put(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Mel Gorman | a509bc1 | 2012-07-31 16:44:57 -0700 | [diff] [blame] | 90 | int generic_swapfile_activate(struct swap_info_struct *sis, |
| 91 | struct file *swap_file, |
| 92 | sector_t *span) |
| 93 | { |
| 94 | struct address_space *mapping = swap_file->f_mapping; |
| 95 | struct inode *inode = mapping->host; |
| 96 | unsigned blocks_per_page; |
| 97 | unsigned long page_no; |
| 98 | unsigned blkbits; |
| 99 | sector_t probe_block; |
| 100 | sector_t last_block; |
| 101 | sector_t lowest_block = -1; |
| 102 | sector_t highest_block = 0; |
| 103 | int nr_extents = 0; |
| 104 | int ret; |
| 105 | |
| 106 | blkbits = inode->i_blkbits; |
| 107 | blocks_per_page = PAGE_SIZE >> blkbits; |
| 108 | |
| 109 | /* |
| 110 | * Map all the blocks into the extent list. This code doesn't try |
| 111 | * to be very smart. |
| 112 | */ |
| 113 | probe_block = 0; |
| 114 | page_no = 0; |
| 115 | last_block = i_size_read(inode) >> blkbits; |
| 116 | while ((probe_block + blocks_per_page) <= last_block && |
| 117 | page_no < sis->max) { |
| 118 | unsigned block_in_page; |
| 119 | sector_t first_block; |
| 120 | |
| 121 | first_block = bmap(inode, probe_block); |
| 122 | if (first_block == 0) |
| 123 | goto bad_bmap; |
| 124 | |
| 125 | /* |
| 126 | * It must be PAGE_SIZE aligned on-disk |
| 127 | */ |
| 128 | if (first_block & (blocks_per_page - 1)) { |
| 129 | probe_block++; |
| 130 | goto reprobe; |
| 131 | } |
| 132 | |
| 133 | for (block_in_page = 1; block_in_page < blocks_per_page; |
| 134 | block_in_page++) { |
| 135 | sector_t block; |
| 136 | |
| 137 | block = bmap(inode, probe_block + block_in_page); |
| 138 | if (block == 0) |
| 139 | goto bad_bmap; |
| 140 | if (block != first_block + block_in_page) { |
| 141 | /* Discontiguity */ |
| 142 | probe_block++; |
| 143 | goto reprobe; |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | first_block >>= (PAGE_SHIFT - blkbits); |
| 148 | if (page_no) { /* exclude the header page */ |
| 149 | if (first_block < lowest_block) |
| 150 | lowest_block = first_block; |
| 151 | if (first_block > highest_block) |
| 152 | highest_block = first_block; |
| 153 | } |
| 154 | |
| 155 | /* |
| 156 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks |
| 157 | */ |
| 158 | ret = add_swap_extent(sis, page_no, 1, first_block); |
| 159 | if (ret < 0) |
| 160 | goto out; |
| 161 | nr_extents += ret; |
| 162 | page_no++; |
| 163 | probe_block += blocks_per_page; |
| 164 | reprobe: |
| 165 | continue; |
| 166 | } |
| 167 | ret = nr_extents; |
| 168 | *span = 1 + highest_block - lowest_block; |
| 169 | if (page_no == 0) |
| 170 | page_no = 1; /* force Empty message */ |
| 171 | sis->max = page_no; |
| 172 | sis->pages = page_no - 1; |
| 173 | sis->highest_bit = page_no - 1; |
| 174 | out: |
| 175 | return ret; |
| 176 | bad_bmap: |
| 177 | printk(KERN_ERR "swapon: swapfile has holes\n"); |
| 178 | ret = -EINVAL; |
| 179 | goto out; |
| 180 | } |
| 181 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | /* |
| 183 | * We may have stale swap cache pages in memory: notice |
| 184 | * them here and get rid of the unnecessary final write. |
| 185 | */ |
| 186 | int swap_writepage(struct page *page, struct writeback_control *wbc) |
| 187 | { |
| 188 | struct bio *bio; |
| 189 | int ret = 0, rw = WRITE; |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 190 | struct swap_info_struct *sis = page_swap_info(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 192 | if (try_to_free_swap(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | unlock_page(page); |
| 194 | goto out; |
| 195 | } |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 196 | if (frontswap_store(page) == 0) { |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 197 | set_page_writeback(page); |
| 198 | unlock_page(page); |
| 199 | end_page_writeback(page); |
| 200 | goto out; |
| 201 | } |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 202 | |
| 203 | if (sis->flags & SWP_FILE) { |
| 204 | struct kiocb kiocb; |
| 205 | struct file *swap_file = sis->swap_file; |
| 206 | struct address_space *mapping = swap_file->f_mapping; |
| 207 | struct iovec iov = { |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 208 | .iov_base = kmap(page), |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 209 | .iov_len = PAGE_SIZE, |
| 210 | }; |
| 211 | |
| 212 | init_sync_kiocb(&kiocb, swap_file); |
| 213 | kiocb.ki_pos = page_file_offset(page); |
| 214 | kiocb.ki_left = PAGE_SIZE; |
| 215 | kiocb.ki_nbytes = PAGE_SIZE; |
| 216 | |
| 217 | unlock_page(page); |
| 218 | ret = mapping->a_ops->direct_IO(KERNEL_WRITE, |
| 219 | &kiocb, &iov, |
| 220 | kiocb.ki_pos, 1); |
Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 221 | kunmap(page); |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 222 | if (ret == PAGE_SIZE) { |
| 223 | count_vm_event(PSWPOUT); |
| 224 | ret = 0; |
| 225 | } |
| 226 | return ret; |
| 227 | } |
| 228 | |
Hugh Dickins | f29ad6a | 2009-12-14 17:58:40 -0800 | [diff] [blame] | 229 | bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | if (bio == NULL) { |
| 231 | set_page_dirty(page); |
| 232 | unlock_page(page); |
| 233 | ret = -ENOMEM; |
| 234 | goto out; |
| 235 | } |
| 236 | if (wbc->sync_mode == WB_SYNC_ALL) |
Jens Axboe | 721a960 | 2011-03-09 11:56:30 +0100 | [diff] [blame] | 237 | rw |= REQ_SYNC; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 238 | count_vm_event(PSWPOUT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | set_page_writeback(page); |
| 240 | unlock_page(page); |
| 241 | submit_bio(rw, bio); |
| 242 | out: |
| 243 | return ret; |
| 244 | } |
| 245 | |
Minchan Kim | aca8bf3 | 2009-06-16 15:33:02 -0700 | [diff] [blame] | 246 | int swap_readpage(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | { |
| 248 | struct bio *bio; |
| 249 | int ret = 0; |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 250 | struct swap_info_struct *sis = page_swap_info(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | |
Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 252 | VM_BUG_ON(!PageLocked(page)); |
| 253 | VM_BUG_ON(PageUptodate(page)); |
Konrad Rzeszutek Wilk | 165c8ae | 2012-05-15 11:32:15 -0400 | [diff] [blame] | 254 | if (frontswap_load(page) == 0) { |
Dan Magenheimer | 38b5faf | 2012-04-09 17:08:06 -0600 | [diff] [blame] | 255 | SetPageUptodate(page); |
| 256 | unlock_page(page); |
| 257 | goto out; |
| 258 | } |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 259 | |
| 260 | if (sis->flags & SWP_FILE) { |
| 261 | struct file *swap_file = sis->swap_file; |
| 262 | struct address_space *mapping = swap_file->f_mapping; |
| 263 | |
| 264 | ret = mapping->a_ops->readpage(swap_file, page); |
| 265 | if (!ret) |
| 266 | count_vm_event(PSWPIN); |
| 267 | return ret; |
| 268 | } |
| 269 | |
Hugh Dickins | f29ad6a | 2009-12-14 17:58:40 -0800 | [diff] [blame] | 270 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | if (bio == NULL) { |
| 272 | unlock_page(page); |
| 273 | ret = -ENOMEM; |
| 274 | goto out; |
| 275 | } |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 276 | count_vm_event(PSWPIN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | submit_bio(READ, bio); |
| 278 | out: |
| 279 | return ret; |
| 280 | } |
Mel Gorman | 62c230b | 2012-07-31 16:44:55 -0700 | [diff] [blame] | 281 | |
| 282 | int swap_set_page_dirty(struct page *page) |
| 283 | { |
| 284 | struct swap_info_struct *sis = page_swap_info(page); |
| 285 | |
| 286 | if (sis->flags & SWP_FILE) { |
| 287 | struct address_space *mapping = sis->swap_file->f_mapping; |
| 288 | return mapping->a_ops->set_page_dirty(page); |
| 289 | } else { |
| 290 | return __set_page_dirty_no_writeback(page); |
| 291 | } |
| 292 | } |