blob: c991faec70b928686714bd2679b8715985e4dc9a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/mpage.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to preparing and submitting BIOs which contain
7 * multiple pagecache pages.
8 *
Francois Camie1f8e872008-10-15 22:01:59 -07009 * 15May2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * Initial version
11 * 27Jun2002 axboe@suse.de
12 * use bio_add_page() to build bio's just the right size
13 */
14
15#include <linux/kernel.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050016#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
18#include <linux/kdev_t.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/bio.h>
21#include <linux/fs.h>
22#include <linux/buffer_head.h>
23#include <linux/blkdev.h>
24#include <linux/highmem.h>
25#include <linux/prefetch.h>
26#include <linux/mpage.h>
Andrew Morton02c43632016-03-15 14:55:15 -070027#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/writeback.h>
29#include <linux/backing-dev.h>
30#include <linux/pagevec.h>
Dan Magenheimerc515e1f2011-05-26 10:01:43 -060031#include <linux/cleancache.h>
Akinobu Mita4db96b72014-10-09 15:26:55 -070032#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
35 * I/O completion handler for multipage BIOs.
36 *
37 * The mpage code never puts partial pages into a BIO (except for end-of-file).
38 * If a page does not map to a contiguous run of blocks then it simply falls
39 * back to block_read_full_page().
40 *
41 * Why is this? If a page's completion depends on a number of different BIOs
42 * which can complete in any order (or at the same time) then determining the
43 * status of that page is hard. See end_buffer_async_read() for the details.
44 * There is no point in duplicating all that complexity.
45 */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020046static void mpage_end_io(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
Kent Overstreet2c30c712013-11-07 12:20:26 -080048 struct bio_vec *bv;
49 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Kent Overstreet2c30c712013-11-07 12:20:26 -080051 bio_for_each_segment_all(bv, bio, i) {
52 struct page *page = bv->bv_page;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020053 page_endio(page, op_is_write(bio_op(bio)),
54 blk_status_to_errno(bio->bi_status));
Kent Overstreet2c30c712013-11-07 12:20:26 -080055 }
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
Mike Christieeed25cd2016-06-05 14:31:59 -050060static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Hai Shanc32b0d42011-01-13 15:45:51 -080062 bio->bi_end_io = mpage_end_io;
Mike Christieeed25cd2016-06-05 14:31:59 -050063 bio_set_op_attrs(bio, op, op_flags);
64 guard_bio_eod(op, bio);
Mike Christie4e49ea42016-06-05 14:31:41 -050065 submit_bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 return NULL;
67}
68
69static struct bio *
70mpage_alloc(struct block_device *bdev,
71 sector_t first_sector, int nr_vecs,
Al Virodd0fc662005-10-07 07:46:04 +010072 gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 struct bio *bio;
75
Michal Hocko8a5c7432016-07-26 15:24:53 -070076 /* Restrict the given (page cache) mask for slab allocations */
77 gfp_flags &= GFP_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 bio = bio_alloc(gfp_flags, nr_vecs);
79
80 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
81 while (!bio && (nr_vecs /= 2))
82 bio = bio_alloc(gfp_flags, nr_vecs);
83 }
84
85 if (bio) {
Christoph Hellwig74d46992017-08-23 19:10:32 +020086 bio_set_dev(bio, bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -070087 bio->bi_iter.bi_sector = first_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 }
89 return bio;
90}
91
92/*
93 * support function for mpage_readpages. The fs supplied get_block might
94 * return an up to date buffer. This is used to map that buffer into
95 * the page, which allows readpage to avoid triggering a duplicate call
96 * to get_block.
97 *
98 * The idea is to avoid adding buffers to pages that don't already have
99 * them. So when the buffer is up to date and the page size == block size,
100 * this marks the page up to date instead of adding new buffers.
101 */
102static void
103map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
104{
105 struct inode *inode = page->mapping->host;
106 struct buffer_head *page_bh, *head;
107 int block = 0;
108
109 if (!page_has_buffers(page)) {
110 /*
111 * don't make any buffers if there is only one buffer on
112 * the page and the page just needs to be set up to date
113 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300114 if (inode->i_blkbits == PAGE_SHIFT &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 buffer_uptodate(bh)) {
116 SetPageUptodate(page);
117 return;
118 }
Fabian Frederick93407472017-02-27 14:28:32 -0800119 create_empty_buffers(page, i_blocksize(inode), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 }
121 head = page_buffers(page);
122 page_bh = head;
123 do {
124 if (block == page_block) {
125 page_bh->b_state = bh->b_state;
126 page_bh->b_bdev = bh->b_bdev;
127 page_bh->b_blocknr = bh->b_blocknr;
128 break;
129 }
130 page_bh = page_bh->b_this_page;
131 block++;
132 } while (page_bh != head);
133}
134
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800135/*
136 * This is the worker routine which does all the work of mapping the disk
137 * blocks and constructs largest possible bios, submits them for IO if the
138 * blocks are not contiguous on the disk.
139 *
140 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
141 * represent the validity of its disk mapping and to decide when to do the next
142 * get_block() call.
143 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static struct bio *
145do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800146 sector_t *last_block_in_bio, struct buffer_head *map_bh,
Michal Hocko063d99b2015-10-15 15:28:24 -0700147 unsigned long *first_logical_block, get_block_t get_block,
148 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
150 struct inode *inode = page->mapping->host;
151 const unsigned blkbits = inode->i_blkbits;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300152 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 const unsigned blocksize = 1 << blkbits;
154 sector_t block_in_file;
155 sector_t last_block;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800156 sector_t last_block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 sector_t blocks[MAX_BUF_PER_PAGE];
158 unsigned page_block;
159 unsigned first_hole = blocks_per_page;
160 struct block_device *bdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 int length;
162 int fully_mapped = 1;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800163 unsigned nblocks;
164 unsigned relative_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 if (page_has_buffers(page))
167 goto confused;
168
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300169 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800170 last_block = block_in_file + nr_pages * blocks_per_page;
171 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
172 if (last_block > last_block_in_file)
173 last_block = last_block_in_file;
174 page_block = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800176 /*
177 * Map blocks using the result from the previous get_blocks call first.
178 */
179 nblocks = map_bh->b_size >> blkbits;
180 if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
181 block_in_file < (*first_logical_block + nblocks)) {
182 unsigned map_offset = block_in_file - *first_logical_block;
183 unsigned last = nblocks - map_offset;
184
185 for (relative_block = 0; ; relative_block++) {
186 if (relative_block == last) {
187 clear_buffer_mapped(map_bh);
188 break;
189 }
190 if (page_block == blocks_per_page)
191 break;
192 blocks[page_block] = map_bh->b_blocknr + map_offset +
193 relative_block;
194 page_block++;
195 block_in_file++;
196 }
197 bdev = map_bh->b_bdev;
198 }
199
200 /*
201 * Then do more get_blocks calls until we are done with this page.
202 */
203 map_bh->b_page = page;
204 while (page_block < blocks_per_page) {
205 map_bh->b_state = 0;
206 map_bh->b_size = 0;
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (block_in_file < last_block) {
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800209 map_bh->b_size = (last_block-block_in_file) << blkbits;
210 if (get_block(inode, block_in_file, map_bh, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 goto confused;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800212 *first_logical_block = block_in_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800215 if (!buffer_mapped(map_bh)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 fully_mapped = 0;
217 if (first_hole == blocks_per_page)
218 first_hole = page_block;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800219 page_block++;
220 block_in_file++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 continue;
222 }
223
224 /* some filesystems will copy data into the page during
225 * the get_block call, in which case we don't want to
226 * read it again. map_buffer_to_page copies the data
227 * we just collected from get_block into the page's buffers
228 * so readpage doesn't have to repeat the get_block call
229 */
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800230 if (buffer_uptodate(map_bh)) {
231 map_buffer_to_page(page, map_bh, page_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 goto confused;
233 }
234
235 if (first_hole != blocks_per_page)
236 goto confused; /* hole -> non-hole */
237
238 /* Contiguous blocks? */
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800239 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 goto confused;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800241 nblocks = map_bh->b_size >> blkbits;
242 for (relative_block = 0; ; relative_block++) {
243 if (relative_block == nblocks) {
244 clear_buffer_mapped(map_bh);
245 break;
246 } else if (page_block == blocks_per_page)
247 break;
248 blocks[page_block] = map_bh->b_blocknr+relative_block;
249 page_block++;
250 block_in_file++;
251 }
252 bdev = map_bh->b_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 }
254
255 if (first_hole != blocks_per_page) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300256 zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 if (first_hole == 0) {
258 SetPageUptodate(page);
259 unlock_page(page);
260 goto out;
261 }
262 } else if (fully_mapped) {
263 SetPageMappedToDisk(page);
264 }
265
Dan Magenheimerc515e1f2011-05-26 10:01:43 -0600266 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
267 cleancache_get_page(page) == 0) {
268 SetPageUptodate(page);
269 goto confused;
270 }
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /*
273 * This page will go to BIO. Do we need to send this BIO off first?
274 */
275 if (bio && (*last_block_in_bio != blocks[0] - 1))
Mike Christieeed25cd2016-06-05 14:31:59 -0500276 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278alloc_new:
279 if (bio == NULL) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700280 if (first_hole == blocks_per_page) {
281 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
282 page))
283 goto out;
284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
Michal Hocko063d99b2015-10-15 15:28:24 -0700286 min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 if (bio == NULL)
288 goto confused;
289 }
290
291 length = first_hole << blkbits;
292 if (bio_add_page(bio, page, length, 0) < length) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500293 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 goto alloc_new;
295 }
296
Miquel van Smoorenburg38c8e612009-01-06 14:39:02 -0800297 relative_block = block_in_file - *first_logical_block;
298 nblocks = map_bh->b_size >> blkbits;
299 if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
300 (first_hole != blocks_per_page))
Mike Christieeed25cd2016-06-05 14:31:59 -0500301 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 else
303 *last_block_in_bio = blocks[blocks_per_page - 1];
304out:
305 return bio;
306
307confused:
308 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500309 bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 if (!PageUptodate(page))
311 block_read_full_page(page, get_block);
312 else
313 unlock_page(page);
314 goto out;
315}
316
Martin Waitz67be2dd2005-05-01 08:59:26 -0700317/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800318 * mpage_readpages - populate an address space with some pages & start reads against them
Martin Waitz67be2dd2005-05-01 08:59:26 -0700319 * @mapping: the address_space
320 * @pages: The address of a list_head which contains the target pages. These
321 * pages have their ->index populated and are otherwise uninitialised.
Martin Waitz67be2dd2005-05-01 08:59:26 -0700322 * The page at @pages->prev has the lowest file offset, and reads should be
323 * issued in @pages->prev to @pages->next order.
Martin Waitz67be2dd2005-05-01 08:59:26 -0700324 * @nr_pages: The number of pages at *@pages
325 * @get_block: The filesystem's block mapper function.
326 *
327 * This function walks the pages and the blocks within each page, building and
328 * emitting large BIOs.
329 *
330 * If anything unusual happens, such as:
331 *
332 * - encountering a page which has buffers
333 * - encountering a page which has a non-hole after a hole
334 * - encountering a page with non-contiguous blocks
335 *
336 * then this code just gives up and calls the buffer_head-based read function.
337 * It does handle a page which has holes at the end - that is a common case:
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300338 * the end-of-file on blocksize < PAGE_SIZE setups.
Martin Waitz67be2dd2005-05-01 08:59:26 -0700339 *
340 * BH_Boundary explanation:
341 *
342 * There is a problem. The mpage read code assembles several pages, gets all
343 * their disk mappings, and then submits them all. That's fine, but obtaining
344 * the disk mappings may require I/O. Reads of indirect blocks, for example.
345 *
346 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
347 * submitted in the following order:
Mauro Carvalho Chehab0117d422017-05-12 07:45:42 -0300348 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700349 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
Randy Dunlap78a4a502008-02-29 22:02:31 -0800350 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700351 * because the indirect block has to be read to get the mappings of blocks
352 * 13,14,15,16. Obviously, this impacts performance.
353 *
354 * So what we do it to allow the filesystem's get_block() function to set
355 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
356 * after this one will require I/O against a block which is probably close to
357 * this one. So you should push what I/O you have currently accumulated.
358 *
359 * This all causes the disk requests to be issued in the correct order.
360 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361int
362mpage_readpages(struct address_space *mapping, struct list_head *pages,
363 unsigned nr_pages, get_block_t get_block)
364{
365 struct bio *bio = NULL;
366 unsigned page_idx;
367 sector_t last_block_in_bio = 0;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800368 struct buffer_head map_bh;
369 unsigned long first_logical_block = 0;
Michal Hocko8a5c7432016-07-26 15:24:53 -0700370 gfp_t gfp = readahead_gfp_mask(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Aneesh Kumar K.V79ffab32009-05-13 15:13:42 -0400372 map_bh.b_state = 0;
373 map_bh.b_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
Andrew Morton02c43632016-03-15 14:55:15 -0700375 struct page *page = lru_to_page(pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
377 prefetchw(&page->flags);
378 list_del(&page->lru);
Nick Piggineb2be182007-10-16 01:24:57 -0700379 if (!add_to_page_cache_lru(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -0700380 page->index,
381 gfp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 bio = do_mpage_readpage(bio, page,
383 nr_pages - page_idx,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800384 &last_block_in_bio, &map_bh,
385 &first_logical_block,
Michal Hocko063d99b2015-10-15 15:28:24 -0700386 get_block, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300388 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 BUG_ON(!list_empty(pages));
391 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500392 mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return 0;
394}
395EXPORT_SYMBOL(mpage_readpages);
396
397/*
398 * This isn't called much at all
399 */
400int mpage_readpage(struct page *page, get_block_t get_block)
401{
402 struct bio *bio = NULL;
403 sector_t last_block_in_bio = 0;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800404 struct buffer_head map_bh;
405 unsigned long first_logical_block = 0;
Michal Hockoc62d2552015-11-06 16:28:49 -0800406 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Aneesh Kumar K.V79ffab32009-05-13 15:13:42 -0400408 map_bh.b_state = 0;
409 map_bh.b_size = 0;
Badari Pulavartyfa30bd02006-03-26 01:38:01 -0800410 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
Michal Hocko063d99b2015-10-15 15:28:24 -0700411 &map_bh, &first_logical_block, get_block, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500413 mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return 0;
415}
416EXPORT_SYMBOL(mpage_readpage);
417
418/*
419 * Writing is not so simple.
420 *
421 * If the page has buffers then they will be used for obtaining the disk
422 * mapping. We only support pages which are fully mapped-and-dirty, with a
423 * special case for pages which are unmapped at the end: end-of-file.
424 *
425 * If the page has no buffers (preferred) then the page is mapped here.
426 *
427 * If all blocks are found to be contiguous then the page can go into the
428 * BIO. Otherwise fall back to the mapping's writepage().
429 *
430 * FIXME: This code wants an estimate of how many pages are still to be
431 * written, so it can intelligently allocate a suitably-sized BIO. For now,
432 * just allocate full-size (16-page) BIOs.
433 */
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700434
Dmitri Vorobievced117c2009-03-31 00:41:20 +0300435struct mpage_data {
436 struct bio *bio;
437 sector_t last_block_in_bio;
438 get_block_t *get_block;
439 unsigned use_writepage;
440};
441
Matthew Wilcox90768ee2014-06-04 16:07:44 -0700442/*
443 * We have our BIO, so we can now mark the buffers clean. Make
444 * sure to only clean buffers which we know we'll be writing.
445 */
446static void clean_buffers(struct page *page, unsigned first_unmapped)
447{
448 unsigned buffer_counter = 0;
449 struct buffer_head *bh, *head;
450 if (!page_has_buffers(page))
451 return;
452 head = page_buffers(page);
453 bh = head;
454
455 do {
456 if (buffer_counter++ == first_unmapped)
457 break;
458 clear_buffer_dirty(bh);
459 bh = bh->b_this_page;
460 } while (bh != head);
461
462 /*
463 * we cannot drop the bh if the page is not uptodate or a concurrent
464 * readpage would fail to serialize with the bh and it would read from
465 * disk before we reach the platter.
466 */
467 if (buffer_heads_over_limit && PageUptodate(page))
468 try_to_free_buffers(page);
469}
470
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700471/*
472 * For situations where we want to clean all buffers attached to a page.
473 * We don't need to calculate how many buffers are attached to the page,
474 * we just need to specify a number larger than the maximum number of buffers.
475 */
476void clean_page_buffers(struct page *page)
477{
478 clean_buffers(page, ~0U);
479}
480
Dmitri Vorobievced117c2009-03-31 00:41:20 +0300481static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
Alex Tomas29a814d2008-07-11 19:27:31 -0400482 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700484 struct mpage_data *mpd = data;
485 struct bio *bio = mpd->bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 struct address_space *mapping = page->mapping;
487 struct inode *inode = page->mapping->host;
488 const unsigned blkbits = inode->i_blkbits;
489 unsigned long end_index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300490 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 sector_t last_block;
492 sector_t block_in_file;
493 sector_t blocks[MAX_BUF_PER_PAGE];
494 unsigned page_block;
495 unsigned first_unmapped = blocks_per_page;
496 struct block_device *bdev = NULL;
497 int boundary = 0;
498 sector_t boundary_block = 0;
499 struct block_device *boundary_bdev = NULL;
500 int length;
501 struct buffer_head map_bh;
502 loff_t i_size = i_size_read(inode);
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700503 int ret = 0;
Jens Axboe76372412016-11-01 10:00:38 -0600504 int op_flags = wbc_to_write_flags(wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
506 if (page_has_buffers(page)) {
507 struct buffer_head *head = page_buffers(page);
508 struct buffer_head *bh = head;
509
510 /* If they're all mapped and dirty, do it */
511 page_block = 0;
512 do {
513 BUG_ON(buffer_locked(bh));
514 if (!buffer_mapped(bh)) {
515 /*
516 * unmapped dirty buffers are created by
517 * __set_page_dirty_buffers -> mmapped data
518 */
519 if (buffer_dirty(bh))
520 goto confused;
521 if (first_unmapped == blocks_per_page)
522 first_unmapped = page_block;
523 continue;
524 }
525
526 if (first_unmapped != blocks_per_page)
527 goto confused; /* hole -> non-hole */
528
529 if (!buffer_dirty(bh) || !buffer_uptodate(bh))
530 goto confused;
531 if (page_block) {
532 if (bh->b_blocknr != blocks[page_block-1] + 1)
533 goto confused;
534 }
535 blocks[page_block++] = bh->b_blocknr;
536 boundary = buffer_boundary(bh);
537 if (boundary) {
538 boundary_block = bh->b_blocknr;
539 boundary_bdev = bh->b_bdev;
540 }
541 bdev = bh->b_bdev;
542 } while ((bh = bh->b_this_page) != head);
543
544 if (first_unmapped)
545 goto page_is_mapped;
546
547 /*
548 * Page has buffers, but they are all unmapped. The page was
549 * created by pagein or read over a hole which was handled by
550 * block_read_full_page(). If this address_space is also
551 * using mpage_readpages then this can rarely happen.
552 */
553 goto confused;
554 }
555
556 /*
557 * The page has no buffers: map it to disk
558 */
559 BUG_ON(!PageUptodate(page));
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300560 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 last_block = (i_size - 1) >> blkbits;
562 map_bh.b_page = page;
563 for (page_block = 0; page_block < blocks_per_page; ) {
564
565 map_bh.b_state = 0;
Badari Pulavartyb0cf2322006-03-26 01:38:00 -0800566 map_bh.b_size = 1 << blkbits;
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700567 if (mpd->get_block(inode, block_in_file, &map_bh, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 goto confused;
569 if (buffer_new(&map_bh))
Jan Karae64855c2016-11-04 18:08:15 +0100570 clean_bdev_bh_alias(&map_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 if (buffer_boundary(&map_bh)) {
572 boundary_block = map_bh.b_blocknr;
573 boundary_bdev = map_bh.b_bdev;
574 }
575 if (page_block) {
576 if (map_bh.b_blocknr != blocks[page_block-1] + 1)
577 goto confused;
578 }
579 blocks[page_block++] = map_bh.b_blocknr;
580 boundary = buffer_boundary(&map_bh);
581 bdev = map_bh.b_bdev;
582 if (block_in_file == last_block)
583 break;
584 block_in_file++;
585 }
586 BUG_ON(page_block == 0);
587
588 first_unmapped = page_block;
589
590page_is_mapped:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300591 end_index = i_size >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (page->index >= end_index) {
593 /*
594 * The page straddles i_size. It must be zeroed out on each
Adam Buchbinder2a61aa42009-12-11 16:35:40 -0500595 * and every writepage invocation because it may be mmapped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * "A file is mapped in multiples of the page size. For a file
597 * that is not a multiple of the page size, the remaining memory
598 * is zeroed when mapped, and writes to that region are not
599 * written out to the file."
600 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300601 unsigned offset = i_size & (PAGE_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603 if (page->index > end_index || !offset)
604 goto confused;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300605 zero_user_segment(page, offset, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
608 /*
609 * This page will go to BIO. Do we need to send this BIO off first?
610 */
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700611 if (bio && mpd->last_block_in_bio != blocks[0] - 1)
Mike Christieeed25cd2016-06-05 14:31:59 -0500612 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614alloc_new:
615 if (bio == NULL) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700616 if (first_unmapped == blocks_per_page) {
617 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700618 page, wbc))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700619 goto out;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200622 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if (bio == NULL)
624 goto confused;
Tejun Heo429b3fb2015-05-22 17:14:04 -0400625
Tejun Heob16b1de2015-06-02 08:39:48 -0600626 wbc_init_bio(wbc, bio);
Jens Axboe8e8f9292017-06-27 09:30:05 -0600627 bio->bi_write_hint = inode->i_write_hint;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 }
629
630 /*
631 * Must try to add the page before marking the buffer clean or
632 * the confused fail path above (OOM) will be very confused when
633 * it finds all bh marked clean (i.e. it will not write anything)
634 */
Tejun Heo2a814902015-05-28 14:50:51 -0400635 wbc_account_io(wbc, page, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 length = first_unmapped << blkbits;
637 if (bio_add_page(bio, page, length, 0) < length) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500638 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 goto alloc_new;
640 }
641
Matthew Wilcox90768ee2014-06-04 16:07:44 -0700642 clean_buffers(page, first_unmapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 BUG_ON(PageWriteback(page));
645 set_page_writeback(page);
646 unlock_page(page);
647 if (boundary || (first_unmapped != blocks_per_page)) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500648 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (boundary_block) {
650 write_boundary_block(boundary_bdev,
651 boundary_block, 1 << blkbits);
652 }
653 } else {
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700654 mpd->last_block_in_bio = blocks[blocks_per_page - 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656 goto out;
657
658confused:
659 if (bio)
Mike Christieeed25cd2016-06-05 14:31:59 -0500660 bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700662 if (mpd->use_writepage) {
663 ret = mapping->a_ops->writepage(page, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 } else {
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700665 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 goto out;
667 }
668 /*
669 * The caller has a ref on the inode, so *mapping is stable
670 */
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700671 mapping_set_error(mapping, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672out:
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700673 mpd->bio = bio;
674 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675}
676
677/**
Randy Dunlap78a4a502008-02-29 22:02:31 -0800678 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 * @mapping: address space structure to write
680 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
681 * @get_block: the filesystem's block mapper function.
682 * If this is NULL then use a_ops->writepage. Otherwise, go
683 * direct-to-BIO.
684 *
685 * This is a library function, which implements the writepages()
686 * address_space_operation.
687 *
688 * If a page is already under I/O, generic_writepages() skips it, even
689 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
690 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
691 * and msync() need to guarantee that all the data which was dirty at the time
692 * the call was made get new I/O started against them. If wbc->sync_mode is
693 * WB_SYNC_ALL then we were called for data integrity and we must wait for
694 * existing IO to complete.
695 */
696int
697mpage_writepages(struct address_space *mapping,
698 struct writeback_control *wbc, get_block_t get_block)
699{
Jens Axboe2ed1a6b2010-06-22 12:52:14 +0200700 struct blk_plug plug;
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700701 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Jens Axboe2ed1a6b2010-06-22 12:52:14 +0200703 blk_start_plug(&plug);
704
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700705 if (!get_block)
706 ret = generic_writepages(mapping, wbc);
707 else {
708 struct mpage_data mpd = {
709 .bio = NULL,
710 .last_block_in_bio = 0,
711 .get_block = get_block,
712 .use_writepage = 1,
713 };
714
715 ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
Roman Pen5948edb2015-09-15 08:27:25 -0600716 if (mpd.bio) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500717 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600718 REQ_SYNC : 0);
Mike Christieeed25cd2016-06-05 14:31:59 -0500719 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
Roman Pen5948edb2015-09-15 08:27:25 -0600720 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
Jens Axboe2ed1a6b2010-06-22 12:52:14 +0200722 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return ret;
724}
725EXPORT_SYMBOL(mpage_writepages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727int mpage_writepage(struct page *page, get_block_t get_block,
728 struct writeback_control *wbc)
729{
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700730 struct mpage_data mpd = {
731 .bio = NULL,
732 .last_block_in_bio = 0,
733 .get_block = get_block,
734 .use_writepage = 0,
735 };
736 int ret = __mpage_writepage(page, wbc, &mpd);
Roman Pen5948edb2015-09-15 08:27:25 -0600737 if (mpd.bio) {
Mike Christieeed25cd2016-06-05 14:31:59 -0500738 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600739 REQ_SYNC : 0);
Mike Christieeed25cd2016-06-05 14:31:59 -0500740 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
Roman Pen5948edb2015-09-15 08:27:25 -0600741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return ret;
743}
744EXPORT_SYMBOL(mpage_writepage);