blob: 83e5613082cff45039c7d6b10776230879837a35 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/page_io.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95,
7 * Asynchronous swapping added 30.12.95. Stephen Tweedie
8 * Removed race in async swapping. 14.4.1996. Bruno Haible
9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
11 */
12
13#include <linux/mm.h>
14#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/pagemap.h>
17#include <linux/swap.h>
18#include <linux/bio.h>
19#include <linux/swapops.h>
Mel Gorman62c230b2012-07-31 16:44:55 -070020#include <linux/buffer_head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/writeback.h>
Dan Magenheimer38b5faf2012-04-09 17:08:06 -060022#include <linux/frontswap.h>
Minchan Kimb430e9d2013-07-03 15:01:24 -070023#include <linux/blkdev.h>
Minchan Kim2a6ed9d2019-11-30 17:58:29 -080024#include <linux/psi.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080025#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/pgtable.h>
27
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -080028static struct bio *get_swap_bio(gfp_t gfp_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 struct page *page, bio_end_io_t end_io)
30{
31 struct bio *bio;
32
33 bio = bio_alloc(gfp_flags, 1);
34 if (bio) {
Kent Overstreet4f024f32013-10-11 15:44:27 -070035 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
36 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 bio->bi_end_io = end_io;
Kent Overstreet6cf66b42014-12-22 12:48:42 +010038
39 bio_add_page(bio, page, PAGE_SIZE, 0);
40 BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 }
42 return bio;
43}
44
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020045void end_swap_bio_write(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046{
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 struct page *page = bio->bi_io_vec[0].bv_page;
48
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020049 if (bio->bi_error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 SetPageError(page);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070051 /*
52 * We failed to write the page out to swap-space.
53 * Re-dirty the page in order to avoid it being reclaimed.
54 * Also print a dire warning that things will go BAD (tm)
55 * very quickly.
56 *
57 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
58 */
59 set_page_dirty(page);
Vinayak Menon5450ff12018-03-23 11:25:43 +053060 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
Joe Perches11705322016-03-17 14:19:50 -070061 imajor(bio->bi_bdev->bd_inode),
62 iminor(bio->bi_bdev->bd_inode),
63 (unsigned long long)bio->bi_iter.bi_sector);
Peter Zijlstra6ddab3b2006-09-25 23:31:26 -070064 ClearPageReclaim(page);
65 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 end_page_writeback(page);
67 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
Minchan Kim3f2b1a02016-03-22 14:24:36 -070070static void swap_slot_free_notify(struct page *page)
71{
72 struct swap_info_struct *sis;
73 struct gendisk *disk;
74
75 /*
76 * There is no guarantee that the page is in swap cache - the software
77 * suspend code (at least) uses end_swap_bio_read() against a non-
78 * swapcache page. So we must check PG_swapcache before proceeding with
79 * this optimization.
80 */
81 if (unlikely(!PageSwapCache(page)))
82 return;
83
84 sis = page_swap_info(page);
85 if (!(sis->flags & SWP_BLKDEV))
86 return;
87
88 /*
89 * The swap subsystem performs lazy swap slot freeing,
90 * expecting that the page will be swapped out again.
91 * So we can avoid an unnecessary write if the page
92 * isn't redirtied.
93 * This is good for real swap storage because we can
94 * reduce unnecessary I/O and enhance wear-leveling
95 * if an SSD is used as the as swap device.
96 * But if in-memory swap device (eg zram) is used,
97 * this causes a duplicated copy between uncompressed
98 * data in VM-owned memory and compressed data in
99 * zram-owned memory. So let's free zram-owned memory
100 * and make the VM-owned decompressed page *dirty*,
101 * so the page should be swapped out somewhere again if
102 * we again wish to reclaim it.
103 */
104 disk = sis->bdev->bd_disk;
105 if (disk->fops->swap_slot_free_notify) {
106 swp_entry_t entry;
107 unsigned long offset;
108
109 entry.val = page_private(page);
110 offset = swp_offset(entry);
111
112 SetPageDirty(page);
113 disk->fops->swap_slot_free_notify(sis->bdev,
114 offset);
115 }
116}
117
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200118static void end_swap_bio_read(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 struct page *page = bio->bi_io_vec[0].bv_page;
121
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200122 if (bio->bi_error) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 SetPageError(page);
124 ClearPageUptodate(page);
Joe Perches11705322016-03-17 14:19:50 -0700125 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
126 imajor(bio->bi_bdev->bd_inode),
127 iminor(bio->bi_bdev->bd_inode),
128 (unsigned long long)bio->bi_iter.bi_sector);
Minchan Kimb430e9d2013-07-03 15:01:24 -0700129 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
Minchan Kimb430e9d2013-07-03 15:01:24 -0700131
132 SetPageUptodate(page);
Minchan Kim3f2b1a02016-03-22 14:24:36 -0700133 swap_slot_free_notify(page);
Minchan Kimb430e9d2013-07-03 15:01:24 -0700134out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 unlock_page(page);
136 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Mel Gormana509bc12012-07-31 16:44:57 -0700139int generic_swapfile_activate(struct swap_info_struct *sis,
140 struct file *swap_file,
141 sector_t *span)
142{
143 struct address_space *mapping = swap_file->f_mapping;
144 struct inode *inode = mapping->host;
145 unsigned blocks_per_page;
146 unsigned long page_no;
147 unsigned blkbits;
148 sector_t probe_block;
149 sector_t last_block;
150 sector_t lowest_block = -1;
151 sector_t highest_block = 0;
152 int nr_extents = 0;
153 int ret;
154
155 blkbits = inode->i_blkbits;
156 blocks_per_page = PAGE_SIZE >> blkbits;
157
158 /*
159 * Map all the blocks into the extent list. This code doesn't try
160 * to be very smart.
161 */
162 probe_block = 0;
163 page_no = 0;
164 last_block = i_size_read(inode) >> blkbits;
165 while ((probe_block + blocks_per_page) <= last_block &&
166 page_no < sis->max) {
167 unsigned block_in_page;
168 sector_t first_block;
169
Mikulas Patocka7e4411b2016-07-28 15:48:47 -0700170 cond_resched();
171
Mel Gormana509bc12012-07-31 16:44:57 -0700172 first_block = bmap(inode, probe_block);
173 if (first_block == 0)
174 goto bad_bmap;
175
176 /*
177 * It must be PAGE_SIZE aligned on-disk
178 */
179 if (first_block & (blocks_per_page - 1)) {
180 probe_block++;
181 goto reprobe;
182 }
183
184 for (block_in_page = 1; block_in_page < blocks_per_page;
185 block_in_page++) {
186 sector_t block;
187
188 block = bmap(inode, probe_block + block_in_page);
189 if (block == 0)
190 goto bad_bmap;
191 if (block != first_block + block_in_page) {
192 /* Discontiguity */
193 probe_block++;
194 goto reprobe;
195 }
196 }
197
198 first_block >>= (PAGE_SHIFT - blkbits);
199 if (page_no) { /* exclude the header page */
200 if (first_block < lowest_block)
201 lowest_block = first_block;
202 if (first_block > highest_block)
203 highest_block = first_block;
204 }
205
206 /*
207 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
208 */
209 ret = add_swap_extent(sis, page_no, 1, first_block);
210 if (ret < 0)
211 goto out;
212 nr_extents += ret;
213 page_no++;
214 probe_block += blocks_per_page;
215reprobe:
216 continue;
217 }
218 ret = nr_extents;
219 *span = 1 + highest_block - lowest_block;
220 if (page_no == 0)
221 page_no = 1; /* force Empty message */
222 sis->max = page_no;
223 sis->pages = page_no - 1;
224 sis->highest_bit = page_no - 1;
225out:
226 return ret;
227bad_bmap:
Joe Perches11705322016-03-17 14:19:50 -0700228 pr_err("swapon: swapfile has holes\n");
Mel Gormana509bc12012-07-31 16:44:57 -0700229 ret = -EINVAL;
230 goto out;
231}
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233/*
234 * We may have stale swap cache pages in memory: notice
235 * them here and get rid of the unnecessary final write.
236 */
237int swap_writepage(struct page *page, struct writeback_control *wbc)
238{
Seth Jennings2f772e62013-04-29 15:08:34 -0700239 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800241 if (try_to_free_swap(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 unlock_page(page);
243 goto out;
244 }
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400245 if (frontswap_store(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600246 set_page_writeback(page);
247 unlock_page(page);
248 end_page_writeback(page);
249 goto out;
250 }
Seth Jennings1eec6702013-04-29 15:08:35 -0700251 ret = __swap_writepage(page, wbc, end_swap_bio_write);
Seth Jennings2f772e62013-04-29 15:08:34 -0700252out:
253 return ret;
254}
255
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700256static sector_t swap_page_sector(struct page *page)
257{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300258 return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700259}
260
Seth Jennings1eec6702013-04-29 15:08:35 -0700261int __swap_writepage(struct page *page, struct writeback_control *wbc,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200262 bio_end_io_t end_write_func)
Seth Jennings2f772e62013-04-29 15:08:34 -0700263{
264 struct bio *bio;
Mike Christie4e49ea42016-06-05 14:31:41 -0500265 int ret;
Seth Jennings2f772e62013-04-29 15:08:34 -0700266 struct swap_info_struct *sis = page_swap_info(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700267
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700268 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700269 if (sis->flags & SWP_FILE) {
270 struct kiocb kiocb;
271 struct file *swap_file = sis->swap_file;
272 struct address_space *mapping = swap_file->f_mapping;
Al Viro62a80672014-04-04 23:12:29 -0400273 struct bio_vec bv = {
274 .bv_page = page,
275 .bv_len = PAGE_SIZE,
276 .bv_offset = 0
Mel Gorman62c230b2012-07-31 16:44:55 -0700277 };
Al Viro05afcb72015-01-23 01:08:07 -0500278 struct iov_iter from;
Mel Gorman62c230b2012-07-31 16:44:55 -0700279
Al Viro05afcb72015-01-23 01:08:07 -0500280 iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE);
Mel Gorman62c230b2012-07-31 16:44:55 -0700281 init_sync_kiocb(&kiocb, swap_file);
282 kiocb.ki_pos = page_file_offset(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700283
Mel Gorman0cdc4442013-04-29 15:08:48 -0700284 set_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700285 unlock_page(page);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700286 ret = mapping->a_ops->direct_IO(&kiocb, &from);
Mel Gorman62c230b2012-07-31 16:44:55 -0700287 if (ret == PAGE_SIZE) {
288 count_vm_event(PSWPOUT);
289 ret = 0;
Jerome Marchand2d30d312013-04-29 15:08:47 -0700290 } else {
Mel Gorman0cdc4442013-04-29 15:08:48 -0700291 /*
292 * In the case of swap-over-nfs, this can be a
293 * temporary failure if the system has limited
294 * memory for allocating transmit buffers.
295 * Mark the page dirty and avoid
296 * rotate_reclaimable_page but rate-limit the
297 * messages but do not flag PageError like
298 * the normal direct-to-bio case as it could
299 * be temporary.
300 */
Jerome Marchand2d30d312013-04-29 15:08:47 -0700301 set_page_dirty(page);
Mel Gorman0cdc4442013-04-29 15:08:48 -0700302 ClearPageReclaim(page);
Joe Perches11705322016-03-17 14:19:50 -0700303 pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
304 page_file_offset(page));
Mel Gorman62c230b2012-07-31 16:44:55 -0700305 }
Mel Gorman0cdc4442013-04-29 15:08:48 -0700306 end_page_writeback(page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700307 return ret;
308 }
309
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700310 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
311 if (!ret) {
312 count_vm_event(PSWPOUT);
313 return 0;
314 }
315
316 ret = 0;
Seth Jennings1eec6702013-04-29 15:08:35 -0700317 bio = get_swap_bio(GFP_NOIO, page, end_write_func);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 if (bio == NULL) {
319 set_page_dirty(page);
320 unlock_page(page);
321 ret = -ENOMEM;
322 goto out;
323 }
324 if (wbc->sync_mode == WB_SYNC_ALL)
Jens Axboeba13e832016-08-01 09:38:44 -0600325 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC);
326 else
327 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700328 count_vm_event(PSWPOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 set_page_writeback(page);
330 unlock_page(page);
Mike Christie4e49ea42016-06-05 14:31:41 -0500331 submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332out:
333 return ret;
334}
335
Minchan Kimaca8bf32009-06-16 15:33:02 -0700336int swap_readpage(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 struct bio *bio;
339 int ret = 0;
Mel Gorman62c230b2012-07-31 16:44:55 -0700340 struct swap_info_struct *sis = page_swap_info(page);
Minchan Kim2a6ed9d2019-11-30 17:58:29 -0800341 unsigned long pflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700343 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Sasha Levin309381fea2014-01-23 15:52:54 -0800344 VM_BUG_ON_PAGE(!PageLocked(page), page);
345 VM_BUG_ON_PAGE(PageUptodate(page), page);
Minchan Kim2a6ed9d2019-11-30 17:58:29 -0800346
347 /*
348 * Count submission time as memory stall. When the device is congested,
349 * or the submitting cgroup IO-throttled, submission can be a
350 * significant part of overall IO time.
351 */
352 psi_memstall_enter(&pflags);
353
Konrad Rzeszutek Wilk165c8ae2012-05-15 11:32:15 -0400354 if (frontswap_load(page) == 0) {
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600355 SetPageUptodate(page);
356 unlock_page(page);
357 goto out;
358 }
Mel Gorman62c230b2012-07-31 16:44:55 -0700359
360 if (sis->flags & SWP_FILE) {
361 struct file *swap_file = sis->swap_file;
362 struct address_space *mapping = swap_file->f_mapping;
363
364 ret = mapping->a_ops->readpage(swap_file, page);
365 if (!ret)
366 count_vm_event(PSWPIN);
Minchan Kim2a6ed9d2019-11-30 17:58:29 -0800367 goto out;
Mel Gorman62c230b2012-07-31 16:44:55 -0700368 }
369
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700370 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
371 if (!ret) {
Minchan Kimb06bad12016-04-28 16:18:41 -0700372 if (trylock_page(page)) {
373 swap_slot_free_notify(page);
374 unlock_page(page);
375 }
376
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700377 count_vm_event(PSWPIN);
Minchan Kim2a6ed9d2019-11-30 17:58:29 -0800378 goto out;
Matthew Wilcoxdd6bd0d2014-06-04 16:07:48 -0700379 }
380
381 ret = 0;
Hugh Dickinsf29ad6a2009-12-14 17:58:40 -0800382 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (bio == NULL) {
384 unlock_page(page);
385 ret = -ENOMEM;
386 goto out;
387 }
Mike Christie95fe6c12016-06-05 14:31:48 -0500388 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700389 count_vm_event(PSWPIN);
Mike Christie4e49ea42016-06-05 14:31:41 -0500390 submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391out:
Minchan Kim2a6ed9d2019-11-30 17:58:29 -0800392 psi_memstall_leave(&pflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return ret;
394}
Mel Gorman62c230b2012-07-31 16:44:55 -0700395
396int swap_set_page_dirty(struct page *page)
397{
398 struct swap_info_struct *sis = page_swap_info(page);
399
400 if (sis->flags & SWP_FILE) {
401 struct address_space *mapping = sis->swap_file->f_mapping;
Andrew Mortoncc30c5d2016-10-07 17:00:52 -0700402
403 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
Mel Gorman62c230b2012-07-31 16:44:55 -0700404 return mapping->a_ops->set_page_dirty(page);
405 } else {
406 return __set_page_dirty_no_writeback(page);
407 }
408}