blob: 601c26198e9ea98936096c6cb95ae1ee717d3c62 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/mpage.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to preparing and submitting BIOs which contain
7 * multiple pagecache pages.
8 *
Francois Camie1f8e872008-10-15 22:01:59 -07009 * 15May2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Initial version
11 * 27Jun2002 axboe@suse.de
12 * use bio_add_page() to build bio's just the right size
13 */
14
15#include <linux/kernel.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050016#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
18#include <linux/kdev_t.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/bio.h>
21#include <linux/fs.h>
22#include <linux/buffer_head.h>
23#include <linux/blkdev.h>
24#include <linux/highmem.h>
25#include <linux/prefetch.h>
26#include <linux/mpage.h>
Andrew Morton02c43632016-03-15 14:55:15 -070027#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/writeback.h>
29#include <linux/backing-dev.h>
30#include <linux/pagevec.h>
Dan Magenheimerc515e1f2011-05-26 10:01:43 -060031#include <linux/cleancache.h>
Akinobu Mita4db96b72014-10-09 15:26:55 -070032#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Mohan Srinivasan25cc70f2016-12-14 16:39:51 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/android_fs.h>
36
37EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
38EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
39EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
40EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
Jaegeuk Kim10c44c02019-08-15 19:29:22 -070041EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_start);
42EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_end);
Mohan Srinivasan25cc70f2016-12-14 16:39:51 -080043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
45 * I/O completion handler for multipage BIOs.
46 *
47 * The mpage code never puts partial pages into a BIO (except for end-of-file).
48 * If a page does not map to a contiguous run of blocks then it simply falls
49 * back to block_read_full_page().
50 *
51 * Why is this? If a page's completion depends on a number of different BIOs
52 * which can complete in any order (or at the same time) then determining the
53 * status of that page is hard. See end_buffer_async_read() for the details.
54 * There is no point in duplicating all that complexity.
55 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020056static void mpage_end_io(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Kent Overstreet2c30c712013-11-07 12:20:26 -080058 struct bio_vec *bv;
59 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Mohan Srinivasan25cc70f2016-12-14 16:39:51 -080061 if (trace_android_fs_dataread_end_enabled() &&
62 (bio_data_dir(bio) == READ)) {
63 struct page *first_page = bio->bi_io_vec[0].bv_page;
64
65 if (first_page != NULL)
66 trace_android_fs_dataread_end(first_page->mapping->host,
67 page_offset(first_page),
68 bio->bi_iter.bi_size);
69 }
70
Kent Overstreet2c30c712013-11-07 12:20:26 -080071 bio_for_each_segment_all(bv, bio, i) {
72 struct page *page = bv->bv_page;
Jens Axboec11f0c02016-08-05 08:11:04 -060073 page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
Kent Overstreet2c30c712013-11-07 12:20:26 -080074 }
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
Mike Christieeed25cd2016-06-05 14:31:59 -050079static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
Mohan Srinivasan25cc70f2016-12-14 16:39:51 -080081 if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) {
82 struct page *first_page = bio->bi_io_vec[0].bv_page;
83
84 if (first_page != NULL) {
Mohan Srinivasan009e6082017-02-10 14:26:23 -080085 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
86
87 path = android_fstrace_get_pathname(pathbuf,
88 MAX_TRACE_PATHBUF_LEN,
89 first_page->mapping->host);
Mohan Srinivasan25cc70f2016-12-14 16:39:51 -080090 trace_android_fs_dataread_start(
91 first_page->mapping->host,
92 page_offset(first_page),
93 bio->bi_iter.bi_size,
94 current->pid,
Mohan Srinivasan009e6082017-02-10 14:26:23 -080095 path,
Mohan Srinivasan25cc70f2016-12-14 16:39:51 -080096 current->comm);
97 }
98 }
Hai Shanc32b0d42011-01-13 15:45:51 -080099 bio->bi_end_io = mpage_end_io;
Mike Christieeed25cd2016-06-05 14:31:59 -0500100 bio_set_op_attrs(bio, op, op_flags);
101 guard_bio_eod(op, bio);
Mike Christie4e49ea42016-06-05 14:31:41 -0500102 submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 return NULL;
104}
105
106static struct bio *
107mpage_alloc(struct block_device *bdev,
108 sector_t first_sector, int nr_vecs,
Al Virodd0fc662005-10-07 07:46:04 +0100109 gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
111 struct bio *bio;
112
Michal Hocko8a5c7432016-07-26 15:24:53 -0700113 /* Restrict the given (page cache) mask for slab allocations */
114 gfp_flags &= GFP_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 bio = bio_alloc(gfp_flags, nr_vecs);
116
117 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
118 while (!bio && (nr_vecs /= 2))
119 bio = bio_alloc(gfp_flags, nr_vecs);
120 }
121
122 if (bio) {
123 bio->bi_bdev = bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700124 bio->bi_iter.bi_sector = first_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 }
126 return bio;
127}
128
129/*
130 * support function for mpage_readpages. The fs supplied get_block might
131 * return an up to date buffer. This is used to map that buffer into
132 * the page, which allows readpage to avoid triggering a duplicate call
133 * to get_block.
134 *
135 * The idea is to avoid adding buffers to pages that don't already have
136 * them. So when the buffer is up to date and the page size == block size,
137 * this marks the page up to date instead of adding new buffers.
138 */
139static void
140map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
141{
142 struct inode *inode = page->mapping->host;
143 struct buffer_head *page_bh, *head;
144 int block = 0;
145
146 if (!page_has_buffers(page)) {
147 /*
148 * don't make any buffers if there is only one buffer on
149 * the page and the page just needs to be set up to date
150 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300151 if (inode->i_blkbits == PAGE_SHIFT &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 buffer_uptodate(bh)) {
153 SetPageUptodate(page);
154 return;
155 }
Fabian Frederick61604a22017-02-27 14:28:32 -0800156 create_empty_buffers(page, i_blocksize(inode), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 }
158 head = page_buffers(page);
159 page_bh = head;
160 do {
161 if (block == page_block) {
162 page_bh->b_state = bh->b_state;
163 page_bh->b_bdev = bh->b_bdev;
164 page_bh->b_blocknr = bh->b_blocknr;
165 break;
166 }
167 page_bh = page_bh->b_this_page;
168 block++;
169 } while (page_bh != head);
170}
171
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800172/*
173 * This is the worker routine which does all the work of mapping the disk
174 * blocks and constructs largest possible bios, submits them for IO if the
175 * blocks are not contiguous on the disk.
176 *
177 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
178 * represent the validity of its disk mapping and to decide when to do the next
179 * get_block() call.
180 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181static struct bio *
182do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800183 sector_t *last_block_in_bio, struct buffer_head *map_bh,
Michal Hocko063d99b2015-10-15 15:28:24 -0700184 unsigned long *first_logical_block, get_block_t get_block,
185 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct inode *inode = page->mapping->host;
188 const unsigned blkbits = inode->i_blkbits;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300189 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 const unsigned blocksize = 1 << blkbits;
191 sector_t block_in_file;
192 sector_t last_block;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800193 sector_t last_block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 sector_t blocks[MAX_BUF_PER_PAGE];
195 unsigned page_block;
196 unsigned first_hole = blocks_per_page;
197 struct block_device *bdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 int length;
199 int fully_mapped = 1;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800200 unsigned nblocks;
201 unsigned relative_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 if (page_has_buffers(page))
204 goto confused;
205
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300206 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800207 last_block = block_in_file + nr_pages * blocks_per_page;
208 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
209 if (last_block > last_block_in_file)
210 last_block = last_block_in_file;
211 page_block = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800213 /*
214 * Map blocks using the result from the previous get_blocks call first.
215 */
216 nblocks = map_bh->b_size >> blkbits;
217 if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
218 block_in_file < (*first_logical_block + nblocks)) {
219 unsigned map_offset = block_in_file - *first_logical_block;
220 unsigned last = nblocks - map_offset;
221
222 for (relative_block = 0; ; relative_block++) {
223 if (relative_block == last) {
224 clear_buffer_mapped(map_bh);
225 break;
226 }
227 if (page_block == blocks_per_page)
228 break;
229 blocks[page_block] = map_bh->b_blocknr + map_offset +
230 relative_block;
231 page_block++;
232 block_in_file++;
233 }
234 bdev = map_bh->b_bdev;
235 }
236
237 /*
238 * Then do more get_blocks calls until we are done with this page.
239 */
240 map_bh->b_page = page;
241 while (page_block < blocks_per_page) {
242 map_bh->b_state = 0;
243 map_bh->b_size = 0;
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (block_in_file < last_block) {
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800246 map_bh->b_size = (last_block-block_in_file) << blkbits;
247 if (get_block(inode, block_in_file, map_bh, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 goto confused;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800249 *first_logical_block = block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
251
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800252 if (!buffer_mapped(map_bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 fully_mapped = 0;
254 if (first_hole == blocks_per_page)
255 first_hole = page_block;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800256 page_block++;
257 block_in_file++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 continue;
259 }
260
261 /* some filesystems will copy data into the page during
262 * the get_block call, in which case we don't want to
263 * read it again. map_buffer_to_page copies the data
264 * we just collected from get_block into the page's buffers
265 * so readpage doesn't have to repeat the get_block call
266 */
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800267 if (buffer_uptodate(map_bh)) {
268 map_buffer_to_page(page, map_bh, page_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 goto confused;
270 }
271
272 if (first_hole != blocks_per_page)
273 goto confused; /* hole -> non-hole */
274
275 /* Contiguous blocks? */
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800276 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 goto confused;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800278 nblocks = map_bh->b_size >> blkbits;
279 for (relative_block = 0; ; relative_block++) {
280 if (relative_block == nblocks) {
281 clear_buffer_mapped(map_bh);
282 break;
283 } else if (page_block == blocks_per_page)
284 break;
285 blocks[page_block] = map_bh->b_blocknr+relative_block;
286 page_block++;
287 block_in_file++;
288 }
289 bdev = map_bh->b_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 }
291
292 if (first_hole != blocks_per_page) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300293 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (first_hole == 0) {
295 SetPageUptodate(page);
296 unlock_page(page);
297 goto out;
298 }
299 } else if (fully_mapped) {
300 SetPageMappedToDisk(page);
301 }
302
Dan Magenheimerc515e1f2011-05-26 10:01:43 -0600303 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
304 cleancache_get_page(page) == 0) {
305 SetPageUptodate(page);
306 goto confused;
307 }
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 /*
310 * This page will go to BIO. Do we need to send this BIO off first?
311 */
312 if (bio && (*last_block_in_bio != blocks[0] - 1))
Mike Christieeed25cd2016-06-05 14:31:59 -0500313 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315alloc_new:
316 if (bio == NULL) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700317 if (first_hole == blocks_per_page) {
318 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
319 page))
320 goto out;
321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
Michal Hocko063d99b2015-10-15 15:28:24 -0700323 min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 if (bio == NULL)
325 goto confused;
326 }
327
328 length = first_hole << blkbits;
329 if (bio_add_page(bio, page, length, 0) < length) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500330 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 goto alloc_new;
332 }
333
Miquel van Smoorenburg38c8e612009-01-06 14:39:02 -0800334 relative_block = block_in_file - *first_logical_block;
335 nblocks = map_bh->b_size >> blkbits;
336 if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
337 (first_hole != blocks_per_page))
Mike Christieeed25cd2016-06-05 14:31:59 -0500338 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 else
340 *last_block_in_bio = blocks[blocks_per_page - 1];
341out:
342 return bio;
343
344confused:
345 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500346 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 if (!PageUptodate(page))
348 block_read_full_page(page, get_block);
349 else
350 unlock_page(page);
351 goto out;
352}
353
Martin Waitz67be2dd2005-05-01 08:59:26 -0700354/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800355 * mpage_readpages - populate an address space with some pages & start reads against them
Martin Waitz67be2dd2005-05-01 08:59:26 -0700356 * @mapping: the address_space
357 * @pages: The address of a list_head which contains the target pages. These
358 * pages have their ->index populated and are otherwise uninitialised.
Martin Waitz67be2dd2005-05-01 08:59:26 -0700359 * The page at @pages->prev has the lowest file offset, and reads should be
360 * issued in @pages->prev to @pages->next order.
Martin Waitz67be2dd2005-05-01 08:59:26 -0700361 * @nr_pages: The number of pages at *@pages
362 * @get_block: The filesystem's block mapper function.
363 *
364 * This function walks the pages and the blocks within each page, building and
365 * emitting large BIOs.
366 *
367 * If anything unusual happens, such as:
368 *
369 * - encountering a page which has buffers
370 * - encountering a page which has a non-hole after a hole
371 * - encountering a page with non-contiguous blocks
372 *
373 * then this code just gives up and calls the buffer_head-based read function.
374 * It does handle a page which has holes at the end - that is a common case:
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300375 * the end-of-file on blocksize < PAGE_SIZE setups.
Martin Waitz67be2dd2005-05-01 08:59:26 -0700376 *
377 * BH_Boundary explanation:
378 *
379 * There is a problem. The mpage read code assembles several pages, gets all
380 * their disk mappings, and then submits them all. That's fine, but obtaining
381 * the disk mappings may require I/O. Reads of indirect blocks, for example.
382 *
383 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
384 * submitted in the following order:
385 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
Randy Dunlap78a4a502008-02-29 22:02:31 -0800386 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700387 * because the indirect block has to be read to get the mappings of blocks
388 * 13,14,15,16. Obviously, this impacts performance.
389 *
390 * So what we do it to allow the filesystem's get_block() function to set
391 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
392 * after this one will require I/O against a block which is probably close to
393 * this one. So you should push what I/O you have currently accumulated.
394 *
395 * This all causes the disk requests to be issued in the correct order.
396 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397int
398mpage_readpages(struct address_space *mapping, struct list_head *pages,
399 unsigned nr_pages, get_block_t get_block)
400{
401 struct bio *bio = NULL;
402 unsigned page_idx;
403 sector_t last_block_in_bio = 0;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800404 struct buffer_head map_bh;
405 unsigned long first_logical_block = 0;
Michal Hocko8a5c7432016-07-26 15:24:53 -0700406 gfp_t gfp = readahead_gfp_mask(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Aneesh Kumar K.V79ffab32009-05-13 15:13:42 -0400408 map_bh.b_state = 0;
409 map_bh.b_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Andrew Morton02c43632016-03-15 14:55:15 -0700411 struct page *page = lru_to_page(pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 prefetchw(&page->flags);
414 list_del(&page->lru);
Nick Piggineb2be182007-10-16 01:24:57 -0700415 if (!add_to_page_cache_lru(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -0700416 page->index,
417 gfp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 bio = do_mpage_readpage(bio, page,
419 nr_pages - page_idx,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800420 &last_block_in_bio, &map_bh,
421 &first_logical_block,
Michal Hocko063d99b2015-10-15 15:28:24 -0700422 get_block, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300424 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 BUG_ON(!list_empty(pages));
427 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500428 mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 return 0;
430}
431EXPORT_SYMBOL(mpage_readpages);
432
433/*
434 * This isn't called much at all
435 */
436int mpage_readpage(struct page *page, get_block_t get_block)
437{
438 struct bio *bio = NULL;
439 sector_t last_block_in_bio = 0;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800440 struct buffer_head map_bh;
441 unsigned long first_logical_block = 0;
Michal Hockoc62d2552015-11-06 16:28:49 -0800442 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Aneesh Kumar K.V79ffab32009-05-13 15:13:42 -0400444 map_bh.b_state = 0;
445 map_bh.b_size = 0;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800446 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
Michal Hocko063d99b2015-10-15 15:28:24 -0700447 &map_bh, &first_logical_block, get_block, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500449 mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return 0;
451}
452EXPORT_SYMBOL(mpage_readpage);
453
454/*
455 * Writing is not so simple.
456 *
457 * If the page has buffers then they will be used for obtaining the disk
458 * mapping. We only support pages which are fully mapped-and-dirty, with a
459 * special case for pages which are unmapped at the end: end-of-file.
460 *
461 * If the page has no buffers (preferred) then the page is mapped here.
462 *
463 * If all blocks are found to be contiguous then the page can go into the
464 * BIO. Otherwise fall back to the mapping's writepage().
465 *
466 * FIXME: This code wants an estimate of how many pages are still to be
467 * written, so it can intelligently allocate a suitably-sized BIO. For now,
468 * just allocate full-size (16-page) BIOs.
469 */
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700470
Dmitri Vorobievced117c2009-03-31 00:41:20 +0300471struct mpage_data {
472 struct bio *bio;
473 sector_t last_block_in_bio;
474 get_block_t *get_block;
475 unsigned use_writepage;
476};
477
Matthew Wilcox90768ee2014-06-04 16:07:44 -0700478/*
479 * We have our BIO, so we can now mark the buffers clean. Make
480 * sure to only clean buffers which we know we'll be writing.
481 */
482static void clean_buffers(struct page *page, unsigned first_unmapped)
483{
484 unsigned buffer_counter = 0;
485 struct buffer_head *bh, *head;
486 if (!page_has_buffers(page))
487 return;
488 head = page_buffers(page);
489 bh = head;
490
491 do {
492 if (buffer_counter++ == first_unmapped)
493 break;
494 clear_buffer_dirty(bh);
495 bh = bh->b_this_page;
496 } while (bh != head);
497
498 /*
499 * we cannot drop the bh if the page is not uptodate or a concurrent
500 * readpage would fail to serialize with the bh and it would read from
501 * disk before we reach the platter.
502 */
503 if (buffer_heads_over_limit && PageUptodate(page))
504 try_to_free_buffers(page);
505}
506
Matthew Wilcox133ca5c2017-10-13 15:58:15 -0700507/*
508 * For situations where we want to clean all buffers attached to a page.
509 * We don't need to calculate how many buffers are attached to the page,
510 * we just need to specify a number larger than the maximum number of buffers.
511 */
512void clean_page_buffers(struct page *page)
513{
514 clean_buffers(page, ~0U);
515}
516
Dmitri Vorobievced117c2009-03-31 00:41:20 +0300517static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
Alex Tomas29a814d2008-07-11 19:27:31 -0400518 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700520 struct mpage_data *mpd = data;
521 struct bio *bio = mpd->bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 struct address_space *mapping = page->mapping;
523 struct inode *inode = page->mapping->host;
524 const unsigned blkbits = inode->i_blkbits;
525 unsigned long end_index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300526 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 sector_t last_block;
528 sector_t block_in_file;
529 sector_t blocks[MAX_BUF_PER_PAGE];
530 unsigned page_block;
531 unsigned first_unmapped = blocks_per_page;
532 struct block_device *bdev = NULL;
533 int boundary = 0;
534 sector_t boundary_block = 0;
535 struct block_device *boundary_bdev = NULL;
536 int length;
537 struct buffer_head map_bh;
538 loff_t i_size = i_size_read(inode);
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700539 int ret = 0;
Mike Christieeed25cd2016-06-05 14:31:59 -0500540 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 if (page_has_buffers(page)) {
543 struct buffer_head *head = page_buffers(page);
544 struct buffer_head *bh = head;
545
546 /* If they're all mapped and dirty, do it */
547 page_block = 0;
548 do {
549 BUG_ON(buffer_locked(bh));
550 if (!buffer_mapped(bh)) {
551 /*
552 * unmapped dirty buffers are created by
553 * __set_page_dirty_buffers -> mmapped data
554 */
555 if (buffer_dirty(bh))
556 goto confused;
557 if (first_unmapped == blocks_per_page)
558 first_unmapped = page_block;
559 continue;
560 }
561
562 if (first_unmapped != blocks_per_page)
563 goto confused; /* hole -> non-hole */
564
565 if (!buffer_dirty(bh) || !buffer_uptodate(bh))
566 goto confused;
567 if (page_block) {
568 if (bh->b_blocknr != blocks[page_block-1] + 1)
569 goto confused;
570 }
571 blocks[page_block++] = bh->b_blocknr;
572 boundary = buffer_boundary(bh);
573 if (boundary) {
574 boundary_block = bh->b_blocknr;
575 boundary_bdev = bh->b_bdev;
576 }
577 bdev = bh->b_bdev;
578 } while ((bh = bh->b_this_page) != head);
579
580 if (first_unmapped)
581 goto page_is_mapped;
582
583 /*
584 * Page has buffers, but they are all unmapped. The page was
585 * created by pagein or read over a hole which was handled by
586 * block_read_full_page(). If this address_space is also
587 * using mpage_readpages then this can rarely happen.
588 */
589 goto confused;
590 }
591
592 /*
593 * The page has no buffers: map it to disk
594 */
595 BUG_ON(!PageUptodate(page));
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300596 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 last_block = (i_size - 1) >> blkbits;
598 map_bh.b_page = page;
599 for (page_block = 0; page_block < blocks_per_page; ) {
600
601 map_bh.b_state = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -0800602 map_bh.b_size = 1 << blkbits;
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700603 if (mpd->get_block(inode, block_in_file, &map_bh, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 goto confused;
605 if (buffer_new(&map_bh))
606 unmap_underlying_metadata(map_bh.b_bdev,
607 map_bh.b_blocknr);
608 if (buffer_boundary(&map_bh)) {
609 boundary_block = map_bh.b_blocknr;
610 boundary_bdev = map_bh.b_bdev;
611 }
612 if (page_block) {
613 if (map_bh.b_blocknr != blocks[page_block-1] + 1)
614 goto confused;
615 }
616 blocks[page_block++] = map_bh.b_blocknr;
617 boundary = buffer_boundary(&map_bh);
618 bdev = map_bh.b_bdev;
619 if (block_in_file == last_block)
620 break;
621 block_in_file++;
622 }
623 BUG_ON(page_block == 0);
624
625 first_unmapped = page_block;
626
627page_is_mapped:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300628 end_index = i_size >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 if (page->index >= end_index) {
630 /*
631 * The page straddles i_size. It must be zeroed out on each
Adam Buchbinder2a61aa42009-12-11 16:35:40 -0500632 * and every writepage invocation because it may be mmapped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 * "A file is mapped in multiples of the page size. For a file
634 * that is not a multiple of the page size, the remaining memory
635 * is zeroed when mapped, and writes to that region are not
636 * written out to the file."
637 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300638 unsigned offset = i_size & (PAGE_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 if (page->index > end_index || !offset)
641 goto confused;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300642 zero_user_segment(page, offset, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 }
644
645 /*
646 * This page will go to BIO. Do we need to send this BIO off first?
647 */
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700648 if (bio && mpd->last_block_in_bio != blocks[0] - 1)
Mike Christieeed25cd2016-06-05 14:31:59 -0500649 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651alloc_new:
652 if (bio == NULL) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700653 if (first_unmapped == blocks_per_page) {
654 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
Matthew Wilcox133ca5c2017-10-13 15:58:15 -0700655 page, wbc))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700656 goto out;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200659 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (bio == NULL)
661 goto confused;
Tejun Heo429b3fb2015-05-22 17:14:04 -0400662
Tejun Heob16b1de2015-06-02 08:39:48 -0600663 wbc_init_bio(wbc, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 }
665
666 /*
667 * Must try to add the page before marking the buffer clean or
668 * the confused fail path above (OOM) will be very confused when
669 * it finds all bh marked clean (i.e. it will not write anything)
670 */
Tejun Heo2a814902015-05-28 14:50:51 -0400671 wbc_account_io(wbc, page, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 length = first_unmapped << blkbits;
673 if (bio_add_page(bio, page, length, 0) < length) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500674 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 goto alloc_new;
676 }
677
Matthew Wilcox90768ee2014-06-04 16:07:44 -0700678 clean_buffers(page, first_unmapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 BUG_ON(PageWriteback(page));
681 set_page_writeback(page);
682 unlock_page(page);
683 if (boundary || (first_unmapped != blocks_per_page)) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500684 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (boundary_block) {
686 write_boundary_block(boundary_bdev,
687 boundary_block, 1 << blkbits);
688 }
689 } else {
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700690 mpd->last_block_in_bio = blocks[blocks_per_page - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
692 goto out;
693
694confused:
695 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500696 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700698 if (mpd->use_writepage) {
699 ret = mapping->a_ops->writepage(page, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 } else {
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700701 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 goto out;
703 }
704 /*
705 * The caller has a ref on the inode, so *mapping is stable
706 */
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700707 mapping_set_error(mapping, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708out:
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700709 mpd->bio = bio;
710 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
712
713/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800714 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 * @mapping: address space structure to write
716 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
717 * @get_block: the filesystem's block mapper function.
718 * If this is NULL then use a_ops->writepage. Otherwise, go
719 * direct-to-BIO.
720 *
721 * This is a library function, which implements the writepages()
722 * address_space_operation.
723 *
724 * If a page is already under I/O, generic_writepages() skips it, even
725 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
726 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
727 * and msync() need to guarantee that all the data which was dirty at the time
728 * the call was made get new I/O started against them. If wbc->sync_mode is
729 * WB_SYNC_ALL then we were called for data integrity and we must wait for
730 * existing IO to complete.
731 */
732int
733mpage_writepages(struct address_space *mapping,
734 struct writeback_control *wbc, get_block_t get_block)
735{
Jens Axboe2ed1a6b2010-06-22 12:52:14 +0200736 struct blk_plug plug;
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700737 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Jens Axboe2ed1a6b2010-06-22 12:52:14 +0200739 blk_start_plug(&plug);
740
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700741 if (!get_block)
742 ret = generic_writepages(mapping, wbc);
743 else {
744 struct mpage_data mpd = {
745 .bio = NULL,
746 .last_block_in_bio = 0,
747 .get_block = get_block,
748 .use_writepage = 1,
749 };
750
751 ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
Roman Pen5948edb2015-09-15 08:27:25 -0600752 if (mpd.bio) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500753 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
754 WRITE_SYNC : 0);
755 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
Roman Pen5948edb2015-09-15 08:27:25 -0600756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 }
Jens Axboe2ed1a6b2010-06-22 12:52:14 +0200758 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 return ret;
760}
761EXPORT_SYMBOL(mpage_writepages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763int mpage_writepage(struct page *page, get_block_t get_block,
764 struct writeback_control *wbc)
765{
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700766 struct mpage_data mpd = {
767 .bio = NULL,
768 .last_block_in_bio = 0,
769 .get_block = get_block,
770 .use_writepage = 0,
771 };
772 int ret = __mpage_writepage(page, wbc, &mpd);
Roman Pen5948edb2015-09-15 08:27:25 -0600773 if (mpd.bio) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500774 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
775 WRITE_SYNC : 0);
776 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
Roman Pen5948edb2015-09-15 08:27:25 -0600777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 return ret;
779}
780EXPORT_SYMBOL(mpage_writepage);