blob: 0ff0f8ca3b197500b397698a05ec8b4100390411 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig72b4daa2018-06-19 15:10:57 -07003 * Copyright (c) 2016-2018 Christoph Hellwig.
Christoph Hellwigae259a92016-06-21 09:23:11 +10004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070020#include <linux/migrate.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100021#include <linux/mm.h>
Christoph Hellwig72b4daa2018-06-19 15:10:57 -070022#include <linux/mm_inline.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100023#include <linux/swap.h>
24#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070025#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100026#include <linux/file.h>
27#include <linux/uio.h>
28#include <linux/backing-dev.h>
29#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110030#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100031#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010032#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070033#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010034
Christoph Hellwigae259a92016-06-21 09:23:11 +100035#include "internal.h"
36
Christoph Hellwigae259a92016-06-21 09:23:11 +100037/*
38 * Execute a iomap write on a segment of the mapping that spans a
39 * contiguous range of pages that have identical block mapping state.
40 *
41 * This avoids the need to map pages individually, do individual allocations
42 * for each page and most importantly avoid the need for filesystem specific
43 * locking per page. Instead, all the operations are amortised over the entire
44 * range of pages. It is assumed that the filesystems will lock whatever
45 * resources they require in the iomap_begin call, and release them in the
46 * iomap_end call.
47 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100048loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100049iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080050 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100051{
52 struct iomap iomap = { 0 };
53 loff_t written = 0, ret;
54
55 /*
56 * Need to map a range from start position for length bytes. This can
57 * span multiple pages - it is only guaranteed to return a range of a
58 * single type of pages (e.g. all into a hole, all mapped or all
59 * unwritten). Failure at this point has nothing to undo.
60 *
61 * If allocation is required for this range, reserve the space now so
62 * that the allocation is guaranteed to succeed later on. Once we copy
63 * the data into the page cache pages, then we cannot fail otherwise we
64 * expose transient stale data. If the reserve fails, we can safely
65 * back out at this point as there is nothing to undo.
66 */
67 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
68 if (ret)
69 return ret;
70 if (WARN_ON(iomap.offset > pos))
71 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080072 if (WARN_ON(iomap.length == 0))
73 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100074
75 /*
76 * Cut down the length to the one actually provided by the filesystem,
77 * as it might not be able to give us the whole size that we requested.
78 */
79 if (iomap.offset + iomap.length < pos + length)
80 length = iomap.offset + iomap.length - pos;
81
82 /*
83 * Now that we have guaranteed that the space allocation will succeed.
84 * we can do the copy-in page by page without having to worry about
85 * failures exposing transient data.
86 */
87 written = actor(inode, pos, length, data, &iomap);
88
89 /*
90 * Now the data has been copied, commit the range we've copied. This
91 * should not fail unless the filesystem has had a fatal error.
92 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100093 if (ops->iomap_end) {
94 ret = ops->iomap_end(inode, pos, length,
95 written > 0 ? written : 0,
96 flags, &iomap);
97 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100098
99 return written ? written : ret;
100}
101
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700102static sector_t
103iomap_sector(struct iomap *iomap, loff_t pos)
104{
105 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
106}
107
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700108static struct iomap_page *
109iomap_page_create(struct inode *inode, struct page *page)
110{
111 struct iomap_page *iop = to_iomap_page(page);
112
113 if (iop || i_blocksize(inode) == PAGE_SIZE)
114 return iop;
115
116 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
117 atomic_set(&iop->read_count, 0);
118 atomic_set(&iop->write_count, 0);
119 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
Piotr Jaroszynskid23792f2019-01-27 08:46:45 -0800120
121 /*
122 * migrate_page_move_mapping() assumes that pages with private data have
123 * their count elevated by 1.
124 */
125 get_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700126 set_page_private(page, (unsigned long)iop);
127 SetPagePrivate(page);
128 return iop;
129}
130
131static void
132iomap_page_release(struct page *page)
133{
134 struct iomap_page *iop = to_iomap_page(page);
135
136 if (!iop)
137 return;
138 WARN_ON_ONCE(atomic_read(&iop->read_count));
139 WARN_ON_ONCE(atomic_read(&iop->write_count));
140 ClearPagePrivate(page);
141 set_page_private(page, 0);
Piotr Jaroszynskid23792f2019-01-27 08:46:45 -0800142 put_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700143 kfree(iop);
144}
145
146/*
147 * Calculate the range inside the page that we actually need to read.
148 */
149static void
150iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
151 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
152{
153 unsigned block_bits = inode->i_blkbits;
154 unsigned block_size = (1 << block_bits);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700155 unsigned poff = offset_in_page(*pos);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700156 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
157 unsigned first = poff >> block_bits;
158 unsigned last = (poff + plen - 1) >> block_bits;
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700159 unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700160
161 /*
162 * If the block size is smaller than the page size we need to check the
163 * per-block uptodate status and adjust the offset and length if needed
164 * to avoid reading in already uptodate ranges.
165 */
166 if (iop) {
167 unsigned int i;
168
169 /* move forward for each leading block marked uptodate */
170 for (i = first; i <= last; i++) {
171 if (!test_bit(i, iop->uptodate))
172 break;
173 *pos += block_size;
174 poff += block_size;
175 plen -= block_size;
176 first++;
177 }
178
179 /* truncate len if we find any trailing uptodate block(s) */
180 for ( ; i <= last; i++) {
181 if (test_bit(i, iop->uptodate)) {
182 plen -= (last - i + 1) * block_size;
183 last = i - 1;
184 break;
185 }
186 }
187 }
188
189 /*
190 * If the extent spans the block that contains the i_size we need to
191 * handle both halves separately so that we properly zero data in the
192 * page cache for blocks that are entirely outside of i_size.
193 */
194 if (first <= end && last > end)
195 plen -= (last - end) * block_size;
196
197 *offp = poff;
198 *lenp = plen;
199}
200
201static void
202iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
203{
204 struct iomap_page *iop = to_iomap_page(page);
205 struct inode *inode = page->mapping->host;
206 unsigned first = off >> inode->i_blkbits;
207 unsigned last = (off + len - 1) >> inode->i_blkbits;
208 unsigned int i;
209 bool uptodate = true;
210
211 if (iop) {
212 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
213 if (i >= first && i <= last)
214 set_bit(i, iop->uptodate);
215 else if (!test_bit(i, iop->uptodate))
216 uptodate = false;
217 }
218 }
219
220 if (uptodate && !PageError(page))
221 SetPageUptodate(page);
222}
223
224static void
225iomap_read_finish(struct iomap_page *iop, struct page *page)
226{
227 if (!iop || atomic_dec_and_test(&iop->read_count))
228 unlock_page(page);
229}
230
231static void
232iomap_read_page_end_io(struct bio_vec *bvec, int error)
233{
234 struct page *page = bvec->bv_page;
235 struct iomap_page *iop = to_iomap_page(page);
236
237 if (unlikely(error)) {
238 ClearPageUptodate(page);
239 SetPageError(page);
240 } else {
241 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
242 }
243
244 iomap_read_finish(iop, page);
245}
246
Christoph Hellwigae259a92016-06-21 09:23:11 +1000247static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700248iomap_read_inline_data(struct inode *inode, struct page *page,
249 struct iomap *iomap)
250{
251 size_t size = i_size_read(inode);
252 void *addr;
253
254 if (PageUptodate(page))
255 return;
256
257 BUG_ON(page->index);
258 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
259
260 addr = kmap_atomic(page);
261 memcpy(addr, iomap->inline_data, size);
262 memset(addr + size, 0, PAGE_SIZE - size);
263 kunmap_atomic(addr);
264 SetPageUptodate(page);
265}
266
267static void
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700268iomap_read_end_io(struct bio *bio)
269{
270 int error = blk_status_to_errno(bio->bi_status);
271 struct bio_vec *bvec;
272 int i;
273
274 bio_for_each_segment_all(bvec, bio, i)
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700275 iomap_read_page_end_io(bvec, error);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700276 bio_put(bio);
277}
278
279struct iomap_readpage_ctx {
280 struct page *cur_page;
281 bool cur_page_in_bio;
282 bool is_readahead;
283 struct bio *bio;
284 struct list_head *pages;
285};
286
287static loff_t
288iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
289 struct iomap *iomap)
290{
291 struct iomap_readpage_ctx *ctx = data;
292 struct page *page = ctx->cur_page;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700293 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700294 bool is_contig = false;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700295 loff_t orig_pos = pos;
296 unsigned poff, plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700297 sector_t sector;
298
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700299 if (iomap->type == IOMAP_INLINE) {
Darrick J. Wong7d5e0492018-08-10 17:55:57 -0700300 WARN_ON_ONCE(pos);
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700301 iomap_read_inline_data(inode, page, iomap);
302 return PAGE_SIZE;
303 }
304
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700305 /* zero post-eof blocks as the page may be mapped */
306 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
307 if (plen == 0)
308 goto done;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700309
310 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
311 zero_user(page, poff, plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700312 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700313 goto done;
314 }
315
316 ctx->cur_page_in_bio = true;
317
318 /*
319 * Try to merge into a previous segment if we can.
320 */
321 sector = iomap_sector(iomap, pos);
322 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
323 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
324 goto done;
325 is_contig = true;
326 }
327
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700328 /*
329 * If we start a new segment we need to increase the read count, and we
330 * need to do so before submitting any previous full bio to make sure
331 * that we don't prematurely unlock the page.
332 */
333 if (iop)
334 atomic_inc(&iop->read_count);
335
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700336 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
337 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
338 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
339
340 if (ctx->bio)
341 submit_bio(ctx->bio);
342
343 if (ctx->is_readahead) /* same as readahead_gfp_mask */
344 gfp |= __GFP_NORETRY | __GFP_NOWARN;
345 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
346 ctx->bio->bi_opf = REQ_OP_READ;
347 if (ctx->is_readahead)
348 ctx->bio->bi_opf |= REQ_RAHEAD;
349 ctx->bio->bi_iter.bi_sector = sector;
350 bio_set_dev(ctx->bio, iomap->bdev);
351 ctx->bio->bi_end_io = iomap_read_end_io;
352 }
353
354 __bio_add_page(ctx->bio, page, plen, poff);
355done:
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700356 /*
357 * Move the caller beyond our range so that it keeps making progress.
358 * For that we have to include any leading non-uptodate ranges, but
359 * we can skip trailing ones as they will be handled in the next
360 * iteration.
361 */
362 return pos - orig_pos + plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700363}
364
365int
366iomap_readpage(struct page *page, const struct iomap_ops *ops)
367{
368 struct iomap_readpage_ctx ctx = { .cur_page = page };
369 struct inode *inode = page->mapping->host;
370 unsigned poff;
371 loff_t ret;
372
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700373 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
374 ret = iomap_apply(inode, page_offset(page) + poff,
375 PAGE_SIZE - poff, 0, ops, &ctx,
376 iomap_readpage_actor);
377 if (ret <= 0) {
378 WARN_ON_ONCE(ret == 0);
379 SetPageError(page);
380 break;
381 }
382 }
383
384 if (ctx.bio) {
385 submit_bio(ctx.bio);
386 WARN_ON_ONCE(!ctx.cur_page_in_bio);
387 } else {
388 WARN_ON_ONCE(ctx.cur_page_in_bio);
389 unlock_page(page);
390 }
391
392 /*
393 * Just like mpage_readpages and block_read_full_page we always
394 * return 0 and just mark the page as PageError on errors. This
395 * should be cleaned up all through the stack eventually.
396 */
397 return 0;
398}
399EXPORT_SYMBOL_GPL(iomap_readpage);
400
401static struct page *
402iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
403 loff_t length, loff_t *done)
404{
405 while (!list_empty(pages)) {
406 struct page *page = lru_to_page(pages);
407
408 if (page_offset(page) >= (u64)pos + length)
409 break;
410
411 list_del(&page->lru);
412 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
413 GFP_NOFS))
414 return page;
415
416 /*
417 * If we already have a page in the page cache at index we are
418 * done. Upper layers don't care if it is uptodate after the
419 * readpages call itself as every page gets checked again once
420 * actually needed.
421 */
422 *done += PAGE_SIZE;
423 put_page(page);
424 }
425
426 return NULL;
427}
428
429static loff_t
430iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
431 void *data, struct iomap *iomap)
432{
433 struct iomap_readpage_ctx *ctx = data;
434 loff_t done, ret;
435
436 for (done = 0; done < length; done += ret) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700437 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700438 if (!ctx->cur_page_in_bio)
439 unlock_page(ctx->cur_page);
440 put_page(ctx->cur_page);
441 ctx->cur_page = NULL;
442 }
443 if (!ctx->cur_page) {
444 ctx->cur_page = iomap_next_page(inode, ctx->pages,
445 pos, length, &done);
446 if (!ctx->cur_page)
447 break;
448 ctx->cur_page_in_bio = false;
449 }
450 ret = iomap_readpage_actor(inode, pos + done, length - done,
451 ctx, iomap);
452 }
453
454 return done;
455}
456
457int
458iomap_readpages(struct address_space *mapping, struct list_head *pages,
459 unsigned nr_pages, const struct iomap_ops *ops)
460{
461 struct iomap_readpage_ctx ctx = {
462 .pages = pages,
463 .is_readahead = true,
464 };
465 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
466 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
467 loff_t length = last - pos + PAGE_SIZE, ret = 0;
468
469 while (length > 0) {
470 ret = iomap_apply(mapping->host, pos, length, 0, ops,
471 &ctx, iomap_readpages_actor);
472 if (ret <= 0) {
473 WARN_ON_ONCE(ret == 0);
474 goto done;
475 }
476 pos += ret;
477 length -= ret;
478 }
479 ret = 0;
480done:
481 if (ctx.bio)
482 submit_bio(ctx.bio);
483 if (ctx.cur_page) {
484 if (!ctx.cur_page_in_bio)
485 unlock_page(ctx.cur_page);
486 put_page(ctx.cur_page);
487 }
488
489 /*
490 * Check that we didn't lose a page due to the arcance calling
491 * conventions..
492 */
493 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
494 return ret;
495}
496EXPORT_SYMBOL_GPL(iomap_readpages);
497
Eric Sandeenc9dcb872018-12-21 08:42:50 -0800498/*
499 * iomap_is_partially_uptodate checks whether blocks within a page are
500 * uptodate or not.
501 *
502 * Returns true if all blocks which correspond to a file portion
503 * we want to read within the page are uptodate.
504 */
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700505int
506iomap_is_partially_uptodate(struct page *page, unsigned long from,
507 unsigned long count)
508{
509 struct iomap_page *iop = to_iomap_page(page);
510 struct inode *inode = page->mapping->host;
Eric Sandeenc9dcb872018-12-21 08:42:50 -0800511 unsigned len, first, last;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700512 unsigned i;
513
Eric Sandeenc9dcb872018-12-21 08:42:50 -0800514 /* Limit range to one page */
515 len = min_t(unsigned, PAGE_SIZE - from, count);
516
517 /* First and last blocks in range within page */
518 first = from >> inode->i_blkbits;
519 last = (from + len - 1) >> inode->i_blkbits;
520
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700521 if (iop) {
522 for (i = first; i <= last; i++)
523 if (!test_bit(i, iop->uptodate))
524 return 0;
525 return 1;
526 }
527
528 return 0;
529}
530EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
531
532int
533iomap_releasepage(struct page *page, gfp_t gfp_mask)
534{
535 /*
536 * mm accommodates an old ext3 case where clean pages might not have had
537 * the dirty bit cleared. Thus, it can send actual dirty pages to
538 * ->releasepage() via shrink_active_list(), skip those here.
539 */
540 if (PageDirty(page) || PageWriteback(page))
541 return 0;
542 iomap_page_release(page);
543 return 1;
544}
545EXPORT_SYMBOL_GPL(iomap_releasepage);
546
547void
548iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
549{
550 /*
551 * If we are invalidating the entire page, clear the dirty state from it
552 * and release it to avoid unnecessary buildup of the LRU.
553 */
554 if (offset == 0 && len == PAGE_SIZE) {
555 WARN_ON_ONCE(PageWriteback(page));
556 cancel_dirty_page(page);
557 iomap_page_release(page);
558 }
559}
560EXPORT_SYMBOL_GPL(iomap_invalidatepage);
561
562#ifdef CONFIG_MIGRATION
563int
564iomap_migrate_page(struct address_space *mapping, struct page *newpage,
565 struct page *page, enum migrate_mode mode)
566{
567 int ret;
568
569 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
570 if (ret != MIGRATEPAGE_SUCCESS)
571 return ret;
572
573 if (page_has_private(page)) {
574 ClearPagePrivate(page);
Piotr Jaroszynskid23792f2019-01-27 08:46:45 -0800575 get_page(newpage);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700576 set_page_private(newpage, page_private(page));
577 set_page_private(page, 0);
Piotr Jaroszynskid23792f2019-01-27 08:46:45 -0800578 put_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700579 SetPagePrivate(newpage);
580 }
581
582 if (mode != MIGRATE_SYNC_NO_COPY)
583 migrate_page_copy(newpage, page);
584 else
585 migrate_page_states(newpage, page);
586 return MIGRATEPAGE_SUCCESS;
587}
588EXPORT_SYMBOL_GPL(iomap_migrate_page);
589#endif /* CONFIG_MIGRATION */
590
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700591static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000592iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
593{
594 loff_t i_size = i_size_read(inode);
595
596 /*
597 * Only truncate newly allocated pages beyoned EOF, even if the
598 * write started inside the existing inode size.
599 */
600 if (pos + len > i_size)
601 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
602}
603
604static int
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700605iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
606 unsigned poff, unsigned plen, unsigned from, unsigned to,
607 struct iomap *iomap)
608{
609 struct bio_vec bvec;
610 struct bio bio;
611
612 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
613 zero_user_segments(page, poff, from, to, poff + plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700614 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700615 return 0;
616 }
617
618 bio_init(&bio, &bvec, 1);
619 bio.bi_opf = REQ_OP_READ;
620 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
621 bio_set_dev(&bio, iomap->bdev);
622 __bio_add_page(&bio, page, plen, poff);
623 return submit_bio_wait(&bio);
624}
625
626static int
627__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
628 struct page *page, struct iomap *iomap)
629{
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700630 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700631 loff_t block_size = i_blocksize(inode);
632 loff_t block_start = pos & ~(block_size - 1);
633 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700634 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700635 int status = 0;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700636
637 if (PageUptodate(page))
638 return 0;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700639
640 do {
641 iomap_adjust_read_range(inode, iop, &block_start,
642 block_end - block_start, &poff, &plen);
643 if (plen == 0)
644 break;
645
646 if ((from > poff && from < poff + plen) ||
647 (to > poff && to < poff + plen)) {
648 status = iomap_read_page_sync(inode, block_start, page,
649 poff, plen, from, to, iomap);
650 if (status)
651 break;
652 }
653
654 } while ((block_start += plen) < block_end);
655
656 return status;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700657}
658
659static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000660iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
661 struct page **pagep, struct iomap *iomap)
662{
663 pgoff_t index = pos >> PAGE_SHIFT;
664 struct page *page;
665 int status = 0;
666
667 BUG_ON(pos + len > iomap->offset + iomap->length);
668
Michal Hockod1908f52017-02-03 13:13:26 -0800669 if (fatal_signal_pending(current))
670 return -EINTR;
671
Christoph Hellwigae259a92016-06-21 09:23:11 +1000672 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
673 if (!page)
674 return -ENOMEM;
675
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700676 if (iomap->type == IOMAP_INLINE)
677 iomap_read_inline_data(inode, page, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700678 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700679 status = __block_write_begin_int(page, pos, len, NULL, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700680 else
681 status = __iomap_write_begin(inode, pos, len, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000682 if (unlikely(status)) {
683 unlock_page(page);
684 put_page(page);
685 page = NULL;
686
687 iomap_write_failed(inode, pos, len);
688 }
689
690 *pagep = page;
691 return status;
692}
693
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700694int
695iomap_set_page_dirty(struct page *page)
696{
697 struct address_space *mapping = page_mapping(page);
698 int newly_dirty;
699
700 if (unlikely(!mapping))
701 return !TestSetPageDirty(page);
702
703 /*
704 * Lock out page->mem_cgroup migration to keep PageDirty
705 * synchronized with per-memcg dirty page counters.
706 */
707 lock_page_memcg(page);
708 newly_dirty = !TestSetPageDirty(page);
709 if (newly_dirty)
710 __set_page_dirty(page, mapping, 0);
711 unlock_page_memcg(page);
712
713 if (newly_dirty)
714 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
715 return newly_dirty;
716}
717EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
718
719static int
720__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
721 unsigned copied, struct page *page, struct iomap *iomap)
722{
723 flush_dcache_page(page);
724
725 /*
726 * The blocks that were entirely written will now be uptodate, so we
727 * don't have to worry about a readpage reading them and overwriting a
728 * partial write. However if we have encountered a short write and only
729 * partially written into a block, it will not be marked uptodate, so a
730 * readpage might come in and destroy our partial write.
731 *
732 * Do the simplest thing, and just treat any short write to a non
733 * uptodate page as a zero-length write, and force the caller to redo
734 * the whole thing.
735 */
736 if (unlikely(copied < len && !PageUptodate(page))) {
737 copied = 0;
738 } else {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700739 iomap_set_range_uptodate(page, offset_in_page(pos), len);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700740 iomap_set_page_dirty(page);
741 }
742 return __generic_write_end(inode, pos, copied, page);
743}
744
Christoph Hellwigae259a92016-06-21 09:23:11 +1000745static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700746iomap_write_end_inline(struct inode *inode, struct page *page,
747 struct iomap *iomap, loff_t pos, unsigned copied)
748{
749 void *addr;
750
751 WARN_ON_ONCE(!PageUptodate(page));
752 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
753
754 addr = kmap_atomic(page);
755 memcpy(iomap->inline_data + pos, addr + pos, copied);
756 kunmap_atomic(addr);
757
758 mark_inode_dirty(inode);
759 __generic_write_end(inode, pos, copied, page);
760 return copied;
761}
762
Christoph Hellwigae259a92016-06-21 09:23:11 +1000763static int
764iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700765 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000766{
767 int ret;
768
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700769 if (iomap->type == IOMAP_INLINE) {
770 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700771 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700772 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
773 copied, page, NULL);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700774 } else {
775 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700776 }
777
Christoph Hellwig63899c62018-06-19 15:10:56 -0700778 if (iomap->page_done)
779 iomap->page_done(inode, pos, copied, page, iomap);
780
Christoph Hellwigae259a92016-06-21 09:23:11 +1000781 if (ret < len)
782 iomap_write_failed(inode, pos, len);
783 return ret;
784}
785
786static loff_t
787iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
788 struct iomap *iomap)
789{
790 struct iov_iter *i = data;
791 long status = 0;
792 ssize_t written = 0;
793 unsigned int flags = AOP_FLAG_NOFS;
794
Christoph Hellwigae259a92016-06-21 09:23:11 +1000795 do {
796 struct page *page;
797 unsigned long offset; /* Offset into pagecache page */
798 unsigned long bytes; /* Bytes to write to page */
799 size_t copied; /* Bytes copied from user */
800
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700801 offset = offset_in_page(pos);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000802 bytes = min_t(unsigned long, PAGE_SIZE - offset,
803 iov_iter_count(i));
804again:
805 if (bytes > length)
806 bytes = length;
807
808 /*
809 * Bring in the user page that we will copy from _first_.
810 * Otherwise there's a nasty deadlock on copying from the
811 * same page as we're writing to, without it being marked
812 * up-to-date.
813 *
814 * Not only is this an optimisation, but it is also required
815 * to check that the address is actually valid, when atomic
816 * usercopies are used, below.
817 */
818 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
819 status = -EFAULT;
820 break;
821 }
822
823 status = iomap_write_begin(inode, pos, bytes, flags, &page,
824 iomap);
825 if (unlikely(status))
826 break;
827
828 if (mapping_writably_mapped(inode->i_mapping))
829 flush_dcache_page(page);
830
Christoph Hellwigae259a92016-06-21 09:23:11 +1000831 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000832
833 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000834
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700835 status = iomap_write_end(inode, pos, bytes, copied, page,
836 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000837 if (unlikely(status < 0))
838 break;
839 copied = status;
840
841 cond_resched();
842
843 iov_iter_advance(i, copied);
844 if (unlikely(copied == 0)) {
845 /*
846 * If we were unable to copy any data at all, we must
847 * fall back to a single segment length write.
848 *
849 * If we didn't fallback here, we could livelock
850 * because not all segments in the iov can be copied at
851 * once without a pagefault.
852 */
853 bytes = min_t(unsigned long, PAGE_SIZE - offset,
854 iov_iter_single_seg_count(i));
855 goto again;
856 }
857 pos += copied;
858 written += copied;
859 length -= copied;
860
861 balance_dirty_pages_ratelimited(inode->i_mapping);
862 } while (iov_iter_count(i) && length);
863
864 return written ? written : status;
865}
866
867ssize_t
868iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800869 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000870{
871 struct inode *inode = iocb->ki_filp->f_mapping->host;
872 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
873
874 while (iov_iter_count(iter)) {
875 ret = iomap_apply(inode, pos, iov_iter_count(iter),
876 IOMAP_WRITE, ops, iter, iomap_write_actor);
877 if (ret <= 0)
878 break;
879 pos += ret;
880 written += ret;
881 }
882
883 return written ? written : ret;
884}
885EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
886
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000887static struct page *
888__iomap_read_page(struct inode *inode, loff_t offset)
889{
890 struct address_space *mapping = inode->i_mapping;
891 struct page *page;
892
893 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
894 if (IS_ERR(page))
895 return page;
896 if (!PageUptodate(page)) {
897 put_page(page);
898 return ERR_PTR(-EIO);
899 }
900 return page;
901}
902
903static loff_t
904iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
905 struct iomap *iomap)
906{
907 long status = 0;
908 ssize_t written = 0;
909
910 do {
911 struct page *page, *rpage;
912 unsigned long offset; /* Offset into pagecache page */
913 unsigned long bytes; /* Bytes to write to page */
914
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700915 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700916 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000917
918 rpage = __iomap_read_page(inode, pos);
919 if (IS_ERR(rpage))
920 return PTR_ERR(rpage);
921
922 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700923 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000924 put_page(rpage);
925 if (unlikely(status))
926 return status;
927
928 WARN_ON_ONCE(!PageUptodate(page));
929
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700930 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000931 if (unlikely(status <= 0)) {
932 if (WARN_ON_ONCE(status == 0))
933 return -EIO;
934 return status;
935 }
936
937 cond_resched();
938
939 pos += status;
940 written += status;
941 length -= status;
942
943 balance_dirty_pages_ratelimited(inode->i_mapping);
944 } while (length);
945
946 return written;
947}
948
949int
950iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800951 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000952{
953 loff_t ret;
954
955 while (len) {
956 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
957 iomap_dirty_actor);
958 if (ret <= 0)
959 return ret;
960 pos += ret;
961 len -= ret;
962 }
963
964 return 0;
965}
966EXPORT_SYMBOL_GPL(iomap_file_dirty);
967
Christoph Hellwigae259a92016-06-21 09:23:11 +1000968static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
969 unsigned bytes, struct iomap *iomap)
970{
971 struct page *page;
972 int status;
973
Tetsuo Handac718a972017-05-08 15:58:59 -0700974 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
975 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000976 if (status)
977 return status;
978
979 zero_user(page, offset, bytes);
980 mark_page_accessed(page);
981
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700982 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000983}
984
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000985static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
986 struct iomap *iomap)
987{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700988 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
989 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000990}
991
Christoph Hellwigae259a92016-06-21 09:23:11 +1000992static loff_t
993iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
994 void *data, struct iomap *iomap)
995{
996 bool *did_zero = data;
997 loff_t written = 0;
998 int status;
999
1000 /* already zeroed? we're done. */
1001 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1002 return count;
1003
1004 do {
1005 unsigned offset, bytes;
1006
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001007 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -07001008 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001009
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001010 if (IS_DAX(inode))
1011 status = iomap_dax_zero(pos, offset, bytes, iomap);
1012 else
1013 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001014 if (status < 0)
1015 return status;
1016
1017 pos += bytes;
1018 count -= bytes;
1019 written += bytes;
1020 if (did_zero)
1021 *did_zero = true;
1022 } while (count > 0);
1023
1024 return written;
1025}
1026
1027int
1028iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001029 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001030{
1031 loff_t ret;
1032
1033 while (len > 0) {
1034 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1035 ops, did_zero, iomap_zero_range_actor);
1036 if (ret <= 0)
1037 return ret;
1038
1039 pos += ret;
1040 len -= ret;
1041 }
1042
1043 return 0;
1044}
1045EXPORT_SYMBOL_GPL(iomap_zero_range);
1046
1047int
1048iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001049 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001050{
Fabian Frederick93407472017-02-27 14:28:32 -08001051 unsigned int blocksize = i_blocksize(inode);
1052 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001053
1054 /* Block boundary? Nothing to do */
1055 if (!off)
1056 return 0;
1057 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1058}
1059EXPORT_SYMBOL_GPL(iomap_truncate_page);
1060
1061static loff_t
1062iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1063 void *data, struct iomap *iomap)
1064{
1065 struct page *page = data;
1066 int ret;
1067
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001068 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1069 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1070 if (ret)
1071 return ret;
1072 block_commit_write(page, 0, length);
1073 } else {
1074 WARN_ON_ONCE(!PageUptodate(page));
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07001075 iomap_page_create(inode, page);
Brian Foster561295a2018-09-29 13:51:01 +10001076 set_page_dirty(page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001077 }
Christoph Hellwigae259a92016-06-21 09:23:11 +10001078
Christoph Hellwigae259a92016-06-21 09:23:11 +10001079 return length;
1080}
1081
Dave Jiang11bac802017-02-24 14:56:41 -08001082int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001083{
1084 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -08001085 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001086 unsigned long length;
1087 loff_t offset, size;
1088 ssize_t ret;
1089
1090 lock_page(page);
1091 size = i_size_read(inode);
1092 if ((page->mapping != inode->i_mapping) ||
1093 (page_offset(page) > size)) {
1094 /* We overload EFAULT to mean page got truncated */
1095 ret = -EFAULT;
1096 goto out_unlock;
1097 }
1098
1099 /* page is wholly or partially inside EOF */
1100 if (((page->index + 1) << PAGE_SHIFT) > size)
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001101 length = offset_in_page(size);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001102 else
1103 length = PAGE_SIZE;
1104
1105 offset = page_offset(page);
1106 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +11001107 ret = iomap_apply(inode, offset, length,
1108 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1109 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001110 if (unlikely(ret <= 0))
1111 goto out_unlock;
1112 offset += ret;
1113 length -= ret;
1114 }
1115
Christoph Hellwigae259a92016-06-21 09:23:11 +10001116 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001117 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +10001118out_unlock:
1119 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001120 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001121}
1122EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001123
1124struct fiemap_ctx {
1125 struct fiemap_extent_info *fi;
1126 struct iomap prev;
1127};
1128
1129static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1130 struct iomap *iomap, u32 flags)
1131{
1132 switch (iomap->type) {
1133 case IOMAP_HOLE:
1134 /* skip holes */
1135 return 0;
1136 case IOMAP_DELALLOC:
1137 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1138 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001139 case IOMAP_MAPPED:
1140 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001141 case IOMAP_UNWRITTEN:
1142 flags |= FIEMAP_EXTENT_UNWRITTEN;
1143 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001144 case IOMAP_INLINE:
1145 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001146 break;
1147 }
1148
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001149 if (iomap->flags & IOMAP_F_MERGED)
1150 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +10001151 if (iomap->flags & IOMAP_F_SHARED)
1152 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001153
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001154 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -04001155 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001156 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001157}
1158
1159static loff_t
1160iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1161 struct iomap *iomap)
1162{
1163 struct fiemap_ctx *ctx = data;
1164 loff_t ret = length;
1165
1166 if (iomap->type == IOMAP_HOLE)
1167 return length;
1168
1169 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1170 ctx->prev = *iomap;
1171 switch (ret) {
1172 case 0: /* success */
1173 return length;
1174 case 1: /* extent array full */
1175 return 0;
1176 default:
1177 return ret;
1178 }
1179}
1180
1181int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001182 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001183{
1184 struct fiemap_ctx ctx;
1185 loff_t ret;
1186
1187 memset(&ctx, 0, sizeof(ctx));
1188 ctx.fi = fi;
1189 ctx.prev.type = IOMAP_HOLE;
1190
1191 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1192 if (ret)
1193 return ret;
1194
Dave Chinner8896b8f2016-08-17 08:41:10 +10001195 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1196 ret = filemap_write_and_wait(inode->i_mapping);
1197 if (ret)
1198 return ret;
1199 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001200
1201 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +11001202 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001203 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +10001204 /* inode with no (attribute) mapping will give ENOENT */
1205 if (ret == -ENOENT)
1206 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001207 if (ret < 0)
1208 return ret;
1209 if (ret == 0)
1210 break;
1211
1212 start += ret;
1213 len -= ret;
1214 }
1215
1216 if (ctx.prev.type != IOMAP_HOLE) {
1217 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1218 if (ret < 0)
1219 return ret;
1220 }
1221
1222 return 0;
1223}
1224EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001225
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001226/*
1227 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001228 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001229 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001230static bool
1231page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1232 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001233{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001234 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1235 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001236 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001237 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001238
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001239 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1240 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001241
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001242 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001243 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001244 * Last offset smaller than the start of the page means we found
1245 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001246 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001247 if (whence == SEEK_HOLE)
1248 return true;
1249 *lastoff = poff;
1250 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001251
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001252 /*
1253 * Just check the page unless we can and should check block ranges:
1254 */
1255 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1256 return PageUptodate(page) == seek_data;
1257
1258 lock_page(page);
1259 if (unlikely(page->mapping != inode->i_mapping))
1260 goto out_unlock_not_found;
1261
1262 for (off = 0; off < PAGE_SIZE; off += bsize) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001263 if (offset_in_page(*lastoff) >= off + bsize)
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001264 continue;
1265 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1266 unlock_page(page);
1267 return true;
1268 }
1269 *lastoff = poff + off + bsize;
1270 }
1271
1272out_unlock_not_found:
1273 unlock_page(page);
1274 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001275}
1276
1277/*
1278 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1279 *
1280 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -07001281 * and which are data: uptodate buffer heads count as data; everything else
1282 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001283 *
1284 * Returns the resulting offset on successs, and -ENOENT otherwise.
1285 */
1286static loff_t
1287page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1288 int whence)
1289{
1290 pgoff_t index = offset >> PAGE_SHIFT;
1291 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1292 loff_t lastoff = offset;
1293 struct pagevec pvec;
1294
1295 if (length <= 0)
1296 return -ENOENT;
1297
1298 pagevec_init(&pvec);
1299
1300 do {
1301 unsigned nr_pages, i;
1302
1303 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1304 end - 1);
1305 if (nr_pages == 0)
1306 break;
1307
1308 for (i = 0; i < nr_pages; i++) {
1309 struct page *page = pvec.pages[i];
1310
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001311 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001312 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001313 lastoff = page_offset(page) + PAGE_SIZE;
1314 }
1315 pagevec_release(&pvec);
1316 } while (index < end);
1317
1318 /* When no page at lastoff and we are not done, we found a hole. */
1319 if (whence != SEEK_HOLE)
1320 goto not_found;
1321
1322check_range:
1323 if (lastoff < offset + length)
1324 goto out;
1325not_found:
1326 lastoff = -ENOENT;
1327out:
1328 pagevec_release(&pvec);
1329 return lastoff;
1330}
1331
1332
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001333static loff_t
1334iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1335 void *data, struct iomap *iomap)
1336{
1337 switch (iomap->type) {
1338 case IOMAP_UNWRITTEN:
1339 offset = page_cache_seek_hole_data(inode, offset, length,
1340 SEEK_HOLE);
1341 if (offset < 0)
1342 return length;
1343 /* fall through */
1344 case IOMAP_HOLE:
1345 *(loff_t *)data = offset;
1346 return 0;
1347 default:
1348 return length;
1349 }
1350}
1351
1352loff_t
1353iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1354{
1355 loff_t size = i_size_read(inode);
1356 loff_t length = size - offset;
1357 loff_t ret;
1358
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001359 /* Nothing to be found before or beyond the end of the file. */
1360 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001361 return -ENXIO;
1362
1363 while (length > 0) {
1364 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1365 &offset, iomap_seek_hole_actor);
1366 if (ret < 0)
1367 return ret;
1368 if (ret == 0)
1369 break;
1370
1371 offset += ret;
1372 length -= ret;
1373 }
1374
1375 return offset;
1376}
1377EXPORT_SYMBOL_GPL(iomap_seek_hole);
1378
1379static loff_t
1380iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1381 void *data, struct iomap *iomap)
1382{
1383 switch (iomap->type) {
1384 case IOMAP_HOLE:
1385 return length;
1386 case IOMAP_UNWRITTEN:
1387 offset = page_cache_seek_hole_data(inode, offset, length,
1388 SEEK_DATA);
1389 if (offset < 0)
1390 return length;
1391 /*FALLTHRU*/
1392 default:
1393 *(loff_t *)data = offset;
1394 return 0;
1395 }
1396}
1397
1398loff_t
1399iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1400{
1401 loff_t size = i_size_read(inode);
1402 loff_t length = size - offset;
1403 loff_t ret;
1404
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001405 /* Nothing to be found before or beyond the end of the file. */
1406 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001407 return -ENXIO;
1408
1409 while (length > 0) {
1410 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1411 &offset, iomap_seek_data_actor);
1412 if (ret < 0)
1413 return ret;
1414 if (ret == 0)
1415 break;
1416
1417 offset += ret;
1418 length -= ret;
1419 }
1420
1421 if (length <= 0)
1422 return -ENXIO;
1423 return offset;
1424}
1425EXPORT_SYMBOL_GPL(iomap_seek_data);
1426
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001427/*
1428 * Private flags for iomap_dio, must not overlap with the public ones in
1429 * iomap.h:
1430 */
Dave Chinner3460cac2018-05-02 12:54:53 -07001431#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -07001432#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001433#define IOMAP_DIO_WRITE (1 << 30)
1434#define IOMAP_DIO_DIRTY (1 << 31)
1435
1436struct iomap_dio {
1437 struct kiocb *iocb;
1438 iomap_dio_end_io_t *end_io;
1439 loff_t i_size;
1440 loff_t size;
1441 atomic_t ref;
1442 unsigned flags;
1443 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001444 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001445
1446 union {
1447 /* used during submission and for synchronous completion: */
1448 struct {
1449 struct iov_iter *iter;
1450 struct task_struct *waiter;
1451 struct request_queue *last_queue;
1452 blk_qc_t cookie;
1453 } submit;
1454
1455 /* used for aio completion: */
1456 struct {
1457 struct work_struct work;
1458 } aio;
1459 };
1460};
1461
1462static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1463{
1464 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -06001465 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001466 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001467 ssize_t ret;
1468
1469 if (dio->end_io) {
1470 ret = dio->end_io(iocb,
1471 dio->error ? dio->error : dio->size,
1472 dio->flags);
1473 } else {
1474 ret = dio->error;
1475 }
1476
1477 if (likely(!ret)) {
1478 ret = dio->size;
1479 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -07001480 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001481 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -07001482 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001483 iocb->ki_pos += ret;
1484 }
1485
Eryu Guan5e25c262017-10-13 09:47:46 -07001486 /*
1487 * Try again to invalidate clean pages which might have been cached by
1488 * non-direct readahead, or faulted in by get_user_pages() if the source
1489 * of the write was an mmap'ed region of the file we're writing. Either
1490 * one is a pretty crazy thing to do, so we don't support it 100%. If
1491 * this invalidation fails, tough, the write still worked...
1492 *
1493 * And this page cache invalidation has to be after dio->end_io(), as
1494 * some filesystems convert unwritten extents to real allocations in
1495 * end_io() when necessary, otherwise a racing buffer read would cache
1496 * zeros from unwritten extents.
1497 */
1498 if (!dio->error &&
1499 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1500 int err;
1501 err = invalidate_inode_pages2_range(inode->i_mapping,
1502 offset >> PAGE_SHIFT,
1503 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001504 if (err)
1505 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001506 }
1507
Dave Chinner4f8ff442018-05-02 12:54:52 -07001508 /*
1509 * If this is a DSYNC write, make sure we push it to stable storage now
1510 * that we've written data.
1511 */
1512 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1513 ret = generic_write_sync(iocb, ret);
1514
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001515 inode_dio_end(file_inode(iocb->ki_filp));
1516 kfree(dio);
1517
1518 return ret;
1519}
1520
1521static void iomap_dio_complete_work(struct work_struct *work)
1522{
1523 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1524 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001525
Dave Chinner4f8ff442018-05-02 12:54:52 -07001526 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001527}
1528
1529/*
1530 * Set an error in the dio if none is set yet. We have to use cmpxchg
1531 * as the submission context and the completion context(s) can race to
1532 * update the error.
1533 */
1534static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1535{
1536 cmpxchg(&dio->error, 0, ret);
1537}
1538
1539static void iomap_dio_bio_end_io(struct bio *bio)
1540{
1541 struct iomap_dio *dio = bio->bi_private;
1542 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1543
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001544 if (bio->bi_status)
1545 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001546
1547 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001548 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001549 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001550 WRITE_ONCE(dio->submit.waiter, NULL);
1551 wake_up_process(waiter);
1552 } else if (dio->flags & IOMAP_DIO_WRITE) {
1553 struct inode *inode = file_inode(dio->iocb->ki_filp);
1554
1555 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1556 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1557 } else {
1558 iomap_dio_complete_work(&dio->aio.work);
1559 }
1560 }
1561
1562 if (should_dirty) {
1563 bio_check_pages_dirty(bio);
1564 } else {
1565 struct bio_vec *bvec;
1566 int i;
1567
1568 bio_for_each_segment_all(bvec, bio, i)
1569 put_page(bvec->bv_page);
1570 bio_put(bio);
1571 }
1572}
1573
1574static blk_qc_t
1575iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1576 unsigned len)
1577{
1578 struct page *page = ZERO_PAGE(0);
1579 struct bio *bio;
1580
1581 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001582 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001583 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001584 bio->bi_private = dio;
1585 bio->bi_end_io = iomap_dio_bio_end_io;
1586
1587 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001588 __bio_add_page(bio, page, len, 0);
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001589 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001590
1591 atomic_inc(&dio->ref);
1592 return submit_bio(bio);
1593}
1594
1595static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001596iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1597 struct iomap_dio *dio, struct iomap *iomap)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001598{
Fabian Frederick93407472017-02-27 14:28:32 -08001599 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1600 unsigned int fs_block_size = i_blocksize(inode), pad;
1601 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001602 struct iov_iter iter;
1603 struct bio *bio;
1604 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001605 bool use_fua = false;
Dave Chinner807a5972018-11-19 13:31:11 -08001606 int nr_pages, ret = 0;
Al Virocfe057f2017-09-11 21:17:09 +01001607 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001608
1609 if ((pos | length | align) & ((1 << blkbits) - 1))
1610 return -EINVAL;
1611
Christoph Hellwig09230432018-07-03 09:07:46 -07001612 if (iomap->type == IOMAP_UNWRITTEN) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001613 dio->flags |= IOMAP_DIO_UNWRITTEN;
1614 need_zeroout = true;
Christoph Hellwig09230432018-07-03 09:07:46 -07001615 }
1616
1617 if (iomap->flags & IOMAP_F_SHARED)
1618 dio->flags |= IOMAP_DIO_COW;
1619
1620 if (iomap->flags & IOMAP_F_NEW) {
1621 need_zeroout = true;
Dave Chinnerac3ec5a2018-11-19 13:31:10 -08001622 } else if (iomap->type == IOMAP_MAPPED) {
Christoph Hellwig09230432018-07-03 09:07:46 -07001623 /*
Dave Chinnerac3ec5a2018-11-19 13:31:10 -08001624 * Use a FUA write if we need datasync semantics, this is a pure
1625 * data IO that doesn't require any metadata updates (including
1626 * after IO completion such as unwritten extent conversion) and
1627 * the underlying device supports FUA. This allows us to avoid
1628 * cache flushes on IO completion.
Christoph Hellwig09230432018-07-03 09:07:46 -07001629 */
1630 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1631 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1632 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1633 use_fua = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001634 }
1635
1636 /*
1637 * Operate on a partial iter trimmed to the extent we were called for.
1638 * We'll update the iter in the dio once we're done with this extent.
1639 */
1640 iter = *dio->submit.iter;
1641 iov_iter_truncate(&iter, length);
1642
1643 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1644 if (nr_pages <= 0)
1645 return nr_pages;
1646
1647 if (need_zeroout) {
1648 /* zero out from the start of the block to the write offset */
1649 pad = pos & (fs_block_size - 1);
1650 if (pad)
1651 iomap_dio_zero(dio, iomap, pos - pad, pad);
1652 }
1653
1654 do {
Al Virocfe057f2017-09-11 21:17:09 +01001655 size_t n;
1656 if (dio->error) {
1657 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001658 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001659 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001660
1661 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001662 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001663 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001664 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001665 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001666 bio->bi_private = dio;
1667 bio->bi_end_io = iomap_dio_bio_end_io;
1668
1669 ret = bio_iov_iter_get_pages(bio, &iter);
1670 if (unlikely(ret)) {
Dave Chinner807a5972018-11-19 13:31:11 -08001671 /*
1672 * We have to stop part way through an IO. We must fall
1673 * through to the sub-block tail zeroing here, otherwise
1674 * this short IO may expose stale data in the tail of
1675 * the block we haven't written data to.
1676 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001677 bio_put(bio);
Dave Chinner807a5972018-11-19 13:31:11 -08001678 goto zero_tail;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001679 }
1680
Al Virocfe057f2017-09-11 21:17:09 +01001681 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001682 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001683 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1684 if (use_fua)
1685 bio->bi_opf |= REQ_FUA;
1686 else
1687 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001688 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001689 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001690 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001691 if (dio->flags & IOMAP_DIO_DIRTY)
1692 bio_set_pages_dirty(bio);
1693 }
1694
Al Virocfe057f2017-09-11 21:17:09 +01001695 iov_iter_advance(dio->submit.iter, n);
1696
1697 dio->size += n;
1698 pos += n;
1699 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001700
1701 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1702
1703 atomic_inc(&dio->ref);
1704
1705 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1706 dio->submit.cookie = submit_bio(bio);
1707 } while (nr_pages);
1708
Dave Chinnerb61fdcd2018-11-19 13:31:10 -08001709 /*
1710 * We need to zeroout the tail of a sub-block write if the extent type
1711 * requires zeroing or the write extends beyond EOF. If we don't zero
1712 * the block tail in the latter case, we can expose stale data via mmap
1713 * reads of the EOF block.
1714 */
Dave Chinner807a5972018-11-19 13:31:11 -08001715zero_tail:
Dave Chinnerb61fdcd2018-11-19 13:31:10 -08001716 if (need_zeroout ||
1717 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001718 /* zero out from the end of the write to the end of the block */
1719 pad = pos & (fs_block_size - 1);
1720 if (pad)
1721 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1722 }
Dave Chinner807a5972018-11-19 13:31:11 -08001723 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001724}
1725
Christoph Hellwig09230432018-07-03 09:07:46 -07001726static loff_t
1727iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1728{
1729 length = iov_iter_zero(length, dio->submit.iter);
1730 dio->size += length;
1731 return length;
1732}
1733
1734static loff_t
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001735iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1736 struct iomap_dio *dio, struct iomap *iomap)
1737{
1738 struct iov_iter *iter = dio->submit.iter;
1739 size_t copied;
1740
1741 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1742
1743 if (dio->flags & IOMAP_DIO_WRITE) {
1744 loff_t size = inode->i_size;
1745
1746 if (pos > size)
1747 memset(iomap->inline_data + size, 0, pos - size);
1748 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1749 if (copied) {
1750 if (pos + copied > size)
1751 i_size_write(inode, pos + copied);
1752 mark_inode_dirty(inode);
1753 }
1754 } else {
1755 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1756 }
1757 dio->size += copied;
1758 return copied;
1759}
1760
1761static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001762iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1763 void *data, struct iomap *iomap)
1764{
1765 struct iomap_dio *dio = data;
1766
1767 switch (iomap->type) {
1768 case IOMAP_HOLE:
1769 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1770 return -EIO;
1771 return iomap_dio_hole_actor(length, dio);
1772 case IOMAP_UNWRITTEN:
1773 if (!(dio->flags & IOMAP_DIO_WRITE))
1774 return iomap_dio_hole_actor(length, dio);
1775 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1776 case IOMAP_MAPPED:
1777 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001778 case IOMAP_INLINE:
1779 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
Christoph Hellwig09230432018-07-03 09:07:46 -07001780 default:
1781 WARN_ON_ONCE(1);
1782 return -EIO;
1783 }
1784}
1785
Dave Chinner4f8ff442018-05-02 12:54:52 -07001786/*
1787 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001788 * is being issued as AIO or not. This allows us to optimise pure data writes
1789 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1790 * REQ_FLUSH post write. This is slightly tricky because a single request here
1791 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1792 * may be pure data writes. In that case, we still need to do a full data sync
1793 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001794 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001795ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001796iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1797 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001798{
1799 struct address_space *mapping = iocb->ki_filp->f_mapping;
1800 struct inode *inode = file_inode(iocb->ki_filp);
1801 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001802 loff_t pos = iocb->ki_pos, start = pos;
1803 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001804 unsigned int flags = IOMAP_DIRECT;
Christoph Hellwigd9ba8422019-01-17 08:58:58 -08001805 bool wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001806 struct blk_plug plug;
1807 struct iomap_dio *dio;
1808
1809 lockdep_assert_held(&inode->i_rwsem);
1810
1811 if (!count)
1812 return 0;
1813
1814 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1815 if (!dio)
1816 return -ENOMEM;
1817
1818 dio->iocb = iocb;
1819 atomic_set(&dio->ref, 1);
1820 dio->size = 0;
1821 dio->i_size = i_size_read(inode);
1822 dio->end_io = end_io;
1823 dio->error = 0;
1824 dio->flags = 0;
1825
1826 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001827 dio->submit.waiter = current;
1828 dio->submit.cookie = BLK_QC_T_NONE;
1829 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001830
1831 if (iov_iter_rw(iter) == READ) {
1832 if (pos >= dio->i_size)
1833 goto out_free_dio;
1834
1835 if (iter->type == ITER_IOVEC)
1836 dio->flags |= IOMAP_DIO_DIRTY;
1837 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001838 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001839 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001840
1841 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001842 if (iocb->ki_flags & IOCB_DSYNC)
1843 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001844
1845 /*
1846 * For datasync only writes, we optimistically try using FUA for
1847 * this IO. Any non-FUA write that occurs will clear this flag,
1848 * hence we know before completion whether a cache flush is
1849 * necessary.
1850 */
1851 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1852 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001853 }
1854
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001855 if (iocb->ki_flags & IOCB_NOWAIT) {
1856 if (filemap_range_has_page(mapping, start, end)) {
1857 ret = -EAGAIN;
1858 goto out_free_dio;
1859 }
1860 flags |= IOMAP_NOWAIT;
1861 }
1862
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001863 ret = filemap_write_and_wait_range(mapping, start, end);
1864 if (ret)
1865 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001866
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001867 /*
1868 * Try to invalidate cache pages for the range we're direct
1869 * writing. If this invalidation fails, tough, the write will
1870 * still work, but racing two incompatible write paths is a
1871 * pretty crazy thing to do, so we don't support it 100%.
1872 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001873 ret = invalidate_inode_pages2_range(mapping,
1874 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001875 if (ret)
1876 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001877 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001878
Christoph Hellwigd9ba8422019-01-17 08:58:58 -08001879 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001880 !inode->i_sb->s_dio_done_wq) {
1881 ret = sb_init_dio_done_wq(inode->i_sb);
1882 if (ret < 0)
1883 goto out_free_dio;
1884 }
1885
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001886 inode_dio_begin(inode);
1887
1888 blk_start_plug(&plug);
1889 do {
1890 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1891 iomap_dio_actor);
1892 if (ret <= 0) {
1893 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001894 if (ret == -ENOTBLK) {
Christoph Hellwigd9ba8422019-01-17 08:58:58 -08001895 wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001896 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001897 }
Dave Chinner807a5972018-11-19 13:31:11 -08001898
1899 /*
1900 * Splicing to pipes can fail on a full pipe. We have to
1901 * swallow this to make it look like a short IO
1902 * otherwise the higher splice layers will completely
1903 * mishandle the error and stop moving data.
1904 */
1905 if (ret == -EFAULT)
1906 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001907 break;
1908 }
1909 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001910
1911 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1912 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001913 } while ((count = iov_iter_count(iter)) > 0);
1914 blk_finish_plug(&plug);
1915
1916 if (ret < 0)
1917 iomap_dio_set_error(dio, ret);
1918
Dave Chinner3460cac2018-05-02 12:54:53 -07001919 /*
1920 * If all the writes we issued were FUA, we don't need to flush the
1921 * cache on IO completion. Clear the sync flag for this case.
1922 */
1923 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1924 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1925
Christoph Hellwigd9ba8422019-01-17 08:58:58 -08001926 /*
1927 * We are about to drop our additional submission reference, which
1928 * might be the last reference to the dio. There are three three
1929 * different ways we can progress here:
1930 *
1931 * (a) If this is the last reference we will always complete and free
1932 * the dio ourselves.
1933 * (b) If this is not the last reference, and we serve an asynchronous
1934 * iocb, we must never touch the dio after the decrement, the
1935 * I/O completion handler will complete and free it.
1936 * (c) If this is not the last reference, but we serve a synchronous
1937 * iocb, the I/O completion handler will wake us up on the drop
1938 * of the final reference, and we will complete and free it here
1939 * after we got woken by the I/O completion handler.
1940 */
1941 dio->wait_for_completion = wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001942 if (!atomic_dec_and_test(&dio->ref)) {
Christoph Hellwigd9ba8422019-01-17 08:58:58 -08001943 if (!wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001944 return -EIOCBQUEUED;
1945
1946 for (;;) {
1947 set_current_state(TASK_UNINTERRUPTIBLE);
1948 if (!READ_ONCE(dio->submit.waiter))
1949 break;
1950
1951 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1952 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001953 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001954 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001955 io_schedule();
1956 }
1957 __set_current_state(TASK_RUNNING);
1958 }
1959
Christoph Hellwigd9ba8422019-01-17 08:58:58 -08001960 return iomap_dio_complete(dio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001961
1962out_free_dio:
1963 kfree(dio);
1964 return ret;
1965}
1966EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001967
1968/* Swapfile activation */
1969
1970#ifdef CONFIG_SWAP
1971struct iomap_swapfile_info {
1972 struct iomap iomap; /* accumulated iomap */
1973 struct swap_info_struct *sis;
1974 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1975 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1976 unsigned long nr_pages; /* number of pages collected */
1977 int nr_extents; /* extent count */
1978};
1979
1980/*
1981 * Collect physical extents for this swap file. Physical extents reported to
1982 * the swap code must be trimmed to align to a page boundary. The logical
1983 * offset within the file is irrelevant since the swapfile code maps logical
1984 * page numbers of the swap device to the physical page-aligned extents.
1985 */
1986static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1987{
1988 struct iomap *iomap = &isi->iomap;
1989 unsigned long nr_pages;
1990 uint64_t first_ppage;
1991 uint64_t first_ppage_reported;
1992 uint64_t next_ppage;
1993 int error;
1994
1995 /*
1996 * Round the start up and the end down so that the physical
1997 * extent aligns to a page boundary.
1998 */
1999 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
2000 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
2001 PAGE_SHIFT;
2002
2003 /* Skip too-short physical extents. */
2004 if (first_ppage >= next_ppage)
2005 return 0;
2006 nr_pages = next_ppage - first_ppage;
2007
2008 /*
2009 * Calculate how much swap space we're adding; the first page contains
2010 * the swap header and doesn't count. The mm still wants that first
2011 * page fed to add_swap_extent, however.
2012 */
2013 first_ppage_reported = first_ppage;
2014 if (iomap->offset == 0)
2015 first_ppage_reported++;
2016 if (isi->lowest_ppage > first_ppage_reported)
2017 isi->lowest_ppage = first_ppage_reported;
2018 if (isi->highest_ppage < (next_ppage - 1))
2019 isi->highest_ppage = next_ppage - 1;
2020
2021 /* Add extent, set up for the next call. */
2022 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
2023 if (error < 0)
2024 return error;
2025 isi->nr_extents += error;
2026 isi->nr_pages += nr_pages;
2027 return 0;
2028}
2029
2030/*
2031 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
2032 * swap only cares about contiguous page-aligned physical extents and makes no
2033 * distinction between written and unwritten extents.
2034 */
2035static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
2036 loff_t count, void *data, struct iomap *iomap)
2037{
2038 struct iomap_swapfile_info *isi = data;
2039 int error;
2040
Christoph Hellwig19319b52018-06-01 09:03:06 -07002041 switch (iomap->type) {
2042 case IOMAP_MAPPED:
2043 case IOMAP_UNWRITTEN:
2044 /* Only real or unwritten extents. */
2045 break;
2046 case IOMAP_INLINE:
2047 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07002048 pr_err("swapon: file is inline\n");
2049 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07002050 default:
Omar Sandovalec601922018-05-16 11:13:34 -07002051 pr_err("swapon: file has unallocated extents\n");
2052 return -EINVAL;
2053 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002054
Omar Sandovalec601922018-05-16 11:13:34 -07002055 /* No uncommitted metadata or shared blocks. */
2056 if (iomap->flags & IOMAP_F_DIRTY) {
2057 pr_err("swapon: file is not committed\n");
2058 return -EINVAL;
2059 }
2060 if (iomap->flags & IOMAP_F_SHARED) {
2061 pr_err("swapon: file has shared extents\n");
2062 return -EINVAL;
2063 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002064
Omar Sandovalec601922018-05-16 11:13:34 -07002065 /* Only one bdev per swap file. */
2066 if (iomap->bdev != isi->sis->bdev) {
2067 pr_err("swapon: file is on multiple devices\n");
2068 return -EINVAL;
2069 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002070
2071 if (isi->iomap.length == 0) {
2072 /* No accumulated extent, so just store it. */
2073 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2074 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2075 /* Append this to the accumulated extent. */
2076 isi->iomap.length += iomap->length;
2077 } else {
2078 /* Otherwise, add the retained iomap and store this one. */
2079 error = iomap_swapfile_add_extent(isi);
2080 if (error)
2081 return error;
2082 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2083 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002084 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07002085}
2086
2087/*
2088 * Iterate a swap file's iomaps to construct physical extents that can be
2089 * passed to the swapfile subsystem.
2090 */
2091int iomap_swapfile_activate(struct swap_info_struct *sis,
2092 struct file *swap_file, sector_t *pagespan,
2093 const struct iomap_ops *ops)
2094{
2095 struct iomap_swapfile_info isi = {
2096 .sis = sis,
2097 .lowest_ppage = (sector_t)-1ULL,
2098 };
2099 struct address_space *mapping = swap_file->f_mapping;
2100 struct inode *inode = mapping->host;
2101 loff_t pos = 0;
2102 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2103 loff_t ret;
2104
Darrick J. Wong117a1482018-06-05 09:53:05 -07002105 /*
2106 * Persist all file mapping metadata so that we won't have any
2107 * IOMAP_F_DIRTY iomaps.
2108 */
2109 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07002110 if (ret)
2111 return ret;
2112
2113 while (len > 0) {
2114 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2115 ops, &isi, iomap_swapfile_activate_actor);
2116 if (ret <= 0)
2117 return ret;
2118
2119 pos += ret;
2120 len -= ret;
2121 }
2122
2123 if (isi.iomap.length) {
2124 ret = iomap_swapfile_add_extent(&isi);
2125 if (ret)
2126 return ret;
2127 }
2128
2129 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2130 sis->max = isi.nr_pages;
2131 sis->pages = isi.nr_pages - 1;
2132 sis->highest_bit = isi.nr_pages - 1;
2133 return isi.nr_extents;
2134}
2135EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2136#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002137
2138static loff_t
2139iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2140 void *data, struct iomap *iomap)
2141{
2142 sector_t *bno = data, addr;
2143
2144 if (iomap->type == IOMAP_MAPPED) {
2145 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2146 if (addr > INT_MAX)
2147 WARN(1, "would truncate bmap result\n");
2148 else
2149 *bno = addr;
2150 }
2151 return 0;
2152}
2153
2154/* legacy ->bmap interface. 0 is the error return (!) */
2155sector_t
2156iomap_bmap(struct address_space *mapping, sector_t bno,
2157 const struct iomap_ops *ops)
2158{
2159 struct inode *inode = mapping->host;
Eric Sandeen79b3dbe2018-08-02 13:09:27 -07002160 loff_t pos = bno << inode->i_blkbits;
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002161 unsigned blocksize = i_blocksize(inode);
2162
2163 if (filemap_write_and_wait(mapping))
2164 return 0;
2165
2166 bno = 0;
2167 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2168 return bno;
2169}
2170EXPORT_SYMBOL_GPL(iomap_bmap);