blob: e57fb1e534c5a5815da77b7ca63c323bf2689608 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig72b4daa2018-06-19 15:10:57 -07003 * Copyright (c) 2016-2018 Christoph Hellwig.
Christoph Hellwigae259a92016-06-21 09:23:11 +10004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070020#include <linux/migrate.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100021#include <linux/mm.h>
Christoph Hellwig72b4daa2018-06-19 15:10:57 -070022#include <linux/mm_inline.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100023#include <linux/swap.h>
24#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070025#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100026#include <linux/file.h>
27#include <linux/uio.h>
28#include <linux/backing-dev.h>
29#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110030#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100031#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010032#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070033#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010034
Christoph Hellwigae259a92016-06-21 09:23:11 +100035#include "internal.h"
36
Christoph Hellwigae259a92016-06-21 09:23:11 +100037/*
38 * Execute a iomap write on a segment of the mapping that spans a
39 * contiguous range of pages that have identical block mapping state.
40 *
41 * This avoids the need to map pages individually, do individual allocations
42 * for each page and most importantly avoid the need for filesystem specific
43 * locking per page. Instead, all the operations are amortised over the entire
44 * range of pages. It is assumed that the filesystems will lock whatever
45 * resources they require in the iomap_begin call, and release them in the
46 * iomap_end call.
47 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100048loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100049iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080050 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100051{
52 struct iomap iomap = { 0 };
53 loff_t written = 0, ret;
54
55 /*
56 * Need to map a range from start position for length bytes. This can
57 * span multiple pages - it is only guaranteed to return a range of a
58 * single type of pages (e.g. all into a hole, all mapped or all
59 * unwritten). Failure at this point has nothing to undo.
60 *
61 * If allocation is required for this range, reserve the space now so
62 * that the allocation is guaranteed to succeed later on. Once we copy
63 * the data into the page cache pages, then we cannot fail otherwise we
64 * expose transient stale data. If the reserve fails, we can safely
65 * back out at this point as there is nothing to undo.
66 */
67 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
68 if (ret)
69 return ret;
70 if (WARN_ON(iomap.offset > pos))
71 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080072 if (WARN_ON(iomap.length == 0))
73 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100074
75 /*
76 * Cut down the length to the one actually provided by the filesystem,
77 * as it might not be able to give us the whole size that we requested.
78 */
79 if (iomap.offset + iomap.length < pos + length)
80 length = iomap.offset + iomap.length - pos;
81
82 /*
83 * Now that we have guaranteed that the space allocation will succeed.
84 * we can do the copy-in page by page without having to worry about
85 * failures exposing transient data.
86 */
87 written = actor(inode, pos, length, data, &iomap);
88
89 /*
90 * Now the data has been copied, commit the range we've copied. This
91 * should not fail unless the filesystem has had a fatal error.
92 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100093 if (ops->iomap_end) {
94 ret = ops->iomap_end(inode, pos, length,
95 written > 0 ? written : 0,
96 flags, &iomap);
97 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100098
99 return written ? written : ret;
100}
101
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700102static sector_t
103iomap_sector(struct iomap *iomap, loff_t pos)
104{
105 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
106}
107
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700108static struct iomap_page *
109iomap_page_create(struct inode *inode, struct page *page)
110{
111 struct iomap_page *iop = to_iomap_page(page);
112
113 if (iop || i_blocksize(inode) == PAGE_SIZE)
114 return iop;
115
116 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
117 atomic_set(&iop->read_count, 0);
118 atomic_set(&iop->write_count, 0);
119 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
120 set_page_private(page, (unsigned long)iop);
121 SetPagePrivate(page);
122 return iop;
123}
124
125static void
126iomap_page_release(struct page *page)
127{
128 struct iomap_page *iop = to_iomap_page(page);
129
130 if (!iop)
131 return;
132 WARN_ON_ONCE(atomic_read(&iop->read_count));
133 WARN_ON_ONCE(atomic_read(&iop->write_count));
134 ClearPagePrivate(page);
135 set_page_private(page, 0);
136 kfree(iop);
137}
138
139/*
140 * Calculate the range inside the page that we actually need to read.
141 */
142static void
143iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
144 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
145{
146 unsigned block_bits = inode->i_blkbits;
147 unsigned block_size = (1 << block_bits);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700148 unsigned poff = offset_in_page(*pos);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700149 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
150 unsigned first = poff >> block_bits;
151 unsigned last = (poff + plen - 1) >> block_bits;
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700152 unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700153
154 /*
155 * If the block size is smaller than the page size we need to check the
156 * per-block uptodate status and adjust the offset and length if needed
157 * to avoid reading in already uptodate ranges.
158 */
159 if (iop) {
160 unsigned int i;
161
162 /* move forward for each leading block marked uptodate */
163 for (i = first; i <= last; i++) {
164 if (!test_bit(i, iop->uptodate))
165 break;
166 *pos += block_size;
167 poff += block_size;
168 plen -= block_size;
169 first++;
170 }
171
172 /* truncate len if we find any trailing uptodate block(s) */
173 for ( ; i <= last; i++) {
174 if (test_bit(i, iop->uptodate)) {
175 plen -= (last - i + 1) * block_size;
176 last = i - 1;
177 break;
178 }
179 }
180 }
181
182 /*
183 * If the extent spans the block that contains the i_size we need to
184 * handle both halves separately so that we properly zero data in the
185 * page cache for blocks that are entirely outside of i_size.
186 */
187 if (first <= end && last > end)
188 plen -= (last - end) * block_size;
189
190 *offp = poff;
191 *lenp = plen;
192}
193
194static void
195iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
196{
197 struct iomap_page *iop = to_iomap_page(page);
198 struct inode *inode = page->mapping->host;
199 unsigned first = off >> inode->i_blkbits;
200 unsigned last = (off + len - 1) >> inode->i_blkbits;
201 unsigned int i;
202 bool uptodate = true;
203
204 if (iop) {
205 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
206 if (i >= first && i <= last)
207 set_bit(i, iop->uptodate);
208 else if (!test_bit(i, iop->uptodate))
209 uptodate = false;
210 }
211 }
212
213 if (uptodate && !PageError(page))
214 SetPageUptodate(page);
215}
216
217static void
218iomap_read_finish(struct iomap_page *iop, struct page *page)
219{
220 if (!iop || atomic_dec_and_test(&iop->read_count))
221 unlock_page(page);
222}
223
224static void
225iomap_read_page_end_io(struct bio_vec *bvec, int error)
226{
227 struct page *page = bvec->bv_page;
228 struct iomap_page *iop = to_iomap_page(page);
229
230 if (unlikely(error)) {
231 ClearPageUptodate(page);
232 SetPageError(page);
233 } else {
234 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
235 }
236
237 iomap_read_finish(iop, page);
238}
239
Christoph Hellwigae259a92016-06-21 09:23:11 +1000240static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700241iomap_read_inline_data(struct inode *inode, struct page *page,
242 struct iomap *iomap)
243{
244 size_t size = i_size_read(inode);
245 void *addr;
246
247 if (PageUptodate(page))
248 return;
249
250 BUG_ON(page->index);
251 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
252
253 addr = kmap_atomic(page);
254 memcpy(addr, iomap->inline_data, size);
255 memset(addr + size, 0, PAGE_SIZE - size);
256 kunmap_atomic(addr);
257 SetPageUptodate(page);
258}
259
260static void
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700261iomap_read_end_io(struct bio *bio)
262{
263 int error = blk_status_to_errno(bio->bi_status);
264 struct bio_vec *bvec;
265 int i;
266
267 bio_for_each_segment_all(bvec, bio, i)
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700268 iomap_read_page_end_io(bvec, error);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700269 bio_put(bio);
270}
271
272struct iomap_readpage_ctx {
273 struct page *cur_page;
274 bool cur_page_in_bio;
275 bool is_readahead;
276 struct bio *bio;
277 struct list_head *pages;
278};
279
280static loff_t
281iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
282 struct iomap *iomap)
283{
284 struct iomap_readpage_ctx *ctx = data;
285 struct page *page = ctx->cur_page;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700286 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700287 bool is_contig = false;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700288 loff_t orig_pos = pos;
289 unsigned poff, plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700290 sector_t sector;
291
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700292 if (iomap->type == IOMAP_INLINE) {
Darrick J. Wong7d5e0492018-08-10 17:55:57 -0700293 WARN_ON_ONCE(pos);
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700294 iomap_read_inline_data(inode, page, iomap);
295 return PAGE_SIZE;
296 }
297
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700298 /* zero post-eof blocks as the page may be mapped */
299 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
300 if (plen == 0)
301 goto done;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700302
303 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
304 zero_user(page, poff, plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700305 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700306 goto done;
307 }
308
309 ctx->cur_page_in_bio = true;
310
311 /*
312 * Try to merge into a previous segment if we can.
313 */
314 sector = iomap_sector(iomap, pos);
315 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
316 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
317 goto done;
318 is_contig = true;
319 }
320
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700321 /*
322 * If we start a new segment we need to increase the read count, and we
323 * need to do so before submitting any previous full bio to make sure
324 * that we don't prematurely unlock the page.
325 */
326 if (iop)
327 atomic_inc(&iop->read_count);
328
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700329 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
330 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
331 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
332
333 if (ctx->bio)
334 submit_bio(ctx->bio);
335
336 if (ctx->is_readahead) /* same as readahead_gfp_mask */
337 gfp |= __GFP_NORETRY | __GFP_NOWARN;
338 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
339 ctx->bio->bi_opf = REQ_OP_READ;
340 if (ctx->is_readahead)
341 ctx->bio->bi_opf |= REQ_RAHEAD;
342 ctx->bio->bi_iter.bi_sector = sector;
343 bio_set_dev(ctx->bio, iomap->bdev);
344 ctx->bio->bi_end_io = iomap_read_end_io;
345 }
346
347 __bio_add_page(ctx->bio, page, plen, poff);
348done:
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700349 /*
350 * Move the caller beyond our range so that it keeps making progress.
351 * For that we have to include any leading non-uptodate ranges, but
352 * we can skip trailing ones as they will be handled in the next
353 * iteration.
354 */
355 return pos - orig_pos + plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700356}
357
358int
359iomap_readpage(struct page *page, const struct iomap_ops *ops)
360{
361 struct iomap_readpage_ctx ctx = { .cur_page = page };
362 struct inode *inode = page->mapping->host;
363 unsigned poff;
364 loff_t ret;
365
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700366 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
367 ret = iomap_apply(inode, page_offset(page) + poff,
368 PAGE_SIZE - poff, 0, ops, &ctx,
369 iomap_readpage_actor);
370 if (ret <= 0) {
371 WARN_ON_ONCE(ret == 0);
372 SetPageError(page);
373 break;
374 }
375 }
376
377 if (ctx.bio) {
378 submit_bio(ctx.bio);
379 WARN_ON_ONCE(!ctx.cur_page_in_bio);
380 } else {
381 WARN_ON_ONCE(ctx.cur_page_in_bio);
382 unlock_page(page);
383 }
384
385 /*
386 * Just like mpage_readpages and block_read_full_page we always
387 * return 0 and just mark the page as PageError on errors. This
388 * should be cleaned up all through the stack eventually.
389 */
390 return 0;
391}
392EXPORT_SYMBOL_GPL(iomap_readpage);
393
394static struct page *
395iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
396 loff_t length, loff_t *done)
397{
398 while (!list_empty(pages)) {
399 struct page *page = lru_to_page(pages);
400
401 if (page_offset(page) >= (u64)pos + length)
402 break;
403
404 list_del(&page->lru);
405 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
406 GFP_NOFS))
407 return page;
408
409 /*
410 * If we already have a page in the page cache at index we are
411 * done. Upper layers don't care if it is uptodate after the
412 * readpages call itself as every page gets checked again once
413 * actually needed.
414 */
415 *done += PAGE_SIZE;
416 put_page(page);
417 }
418
419 return NULL;
420}
421
422static loff_t
423iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
424 void *data, struct iomap *iomap)
425{
426 struct iomap_readpage_ctx *ctx = data;
427 loff_t done, ret;
428
429 for (done = 0; done < length; done += ret) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700430 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700431 if (!ctx->cur_page_in_bio)
432 unlock_page(ctx->cur_page);
433 put_page(ctx->cur_page);
434 ctx->cur_page = NULL;
435 }
436 if (!ctx->cur_page) {
437 ctx->cur_page = iomap_next_page(inode, ctx->pages,
438 pos, length, &done);
439 if (!ctx->cur_page)
440 break;
441 ctx->cur_page_in_bio = false;
442 }
443 ret = iomap_readpage_actor(inode, pos + done, length - done,
444 ctx, iomap);
445 }
446
447 return done;
448}
449
450int
451iomap_readpages(struct address_space *mapping, struct list_head *pages,
452 unsigned nr_pages, const struct iomap_ops *ops)
453{
454 struct iomap_readpage_ctx ctx = {
455 .pages = pages,
456 .is_readahead = true,
457 };
458 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
459 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
460 loff_t length = last - pos + PAGE_SIZE, ret = 0;
461
462 while (length > 0) {
463 ret = iomap_apply(mapping->host, pos, length, 0, ops,
464 &ctx, iomap_readpages_actor);
465 if (ret <= 0) {
466 WARN_ON_ONCE(ret == 0);
467 goto done;
468 }
469 pos += ret;
470 length -= ret;
471 }
472 ret = 0;
473done:
474 if (ctx.bio)
475 submit_bio(ctx.bio);
476 if (ctx.cur_page) {
477 if (!ctx.cur_page_in_bio)
478 unlock_page(ctx.cur_page);
479 put_page(ctx.cur_page);
480 }
481
482 /*
483 * Check that we didn't lose a page due to the arcance calling
484 * conventions..
485 */
486 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
487 return ret;
488}
489EXPORT_SYMBOL_GPL(iomap_readpages);
490
Eric Sandeenc9dcb872018-12-21 08:42:50 -0800491/*
492 * iomap_is_partially_uptodate checks whether blocks within a page are
493 * uptodate or not.
494 *
495 * Returns true if all blocks which correspond to a file portion
496 * we want to read within the page are uptodate.
497 */
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700498int
499iomap_is_partially_uptodate(struct page *page, unsigned long from,
500 unsigned long count)
501{
502 struct iomap_page *iop = to_iomap_page(page);
503 struct inode *inode = page->mapping->host;
Eric Sandeenc9dcb872018-12-21 08:42:50 -0800504 unsigned len, first, last;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700505 unsigned i;
506
Eric Sandeenc9dcb872018-12-21 08:42:50 -0800507 /* Limit range to one page */
508 len = min_t(unsigned, PAGE_SIZE - from, count);
509
510 /* First and last blocks in range within page */
511 first = from >> inode->i_blkbits;
512 last = (from + len - 1) >> inode->i_blkbits;
513
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700514 if (iop) {
515 for (i = first; i <= last; i++)
516 if (!test_bit(i, iop->uptodate))
517 return 0;
518 return 1;
519 }
520
521 return 0;
522}
523EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
524
525int
526iomap_releasepage(struct page *page, gfp_t gfp_mask)
527{
528 /*
529 * mm accommodates an old ext3 case where clean pages might not have had
530 * the dirty bit cleared. Thus, it can send actual dirty pages to
531 * ->releasepage() via shrink_active_list(), skip those here.
532 */
533 if (PageDirty(page) || PageWriteback(page))
534 return 0;
535 iomap_page_release(page);
536 return 1;
537}
538EXPORT_SYMBOL_GPL(iomap_releasepage);
539
540void
541iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
542{
543 /*
544 * If we are invalidating the entire page, clear the dirty state from it
545 * and release it to avoid unnecessary buildup of the LRU.
546 */
547 if (offset == 0 && len == PAGE_SIZE) {
548 WARN_ON_ONCE(PageWriteback(page));
549 cancel_dirty_page(page);
550 iomap_page_release(page);
551 }
552}
553EXPORT_SYMBOL_GPL(iomap_invalidatepage);
554
555#ifdef CONFIG_MIGRATION
556int
557iomap_migrate_page(struct address_space *mapping, struct page *newpage,
558 struct page *page, enum migrate_mode mode)
559{
560 int ret;
561
562 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
563 if (ret != MIGRATEPAGE_SUCCESS)
564 return ret;
565
566 if (page_has_private(page)) {
567 ClearPagePrivate(page);
568 set_page_private(newpage, page_private(page));
569 set_page_private(page, 0);
570 SetPagePrivate(newpage);
571 }
572
573 if (mode != MIGRATE_SYNC_NO_COPY)
574 migrate_page_copy(newpage, page);
575 else
576 migrate_page_states(newpage, page);
577 return MIGRATEPAGE_SUCCESS;
578}
579EXPORT_SYMBOL_GPL(iomap_migrate_page);
580#endif /* CONFIG_MIGRATION */
581
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700582static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000583iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
584{
585 loff_t i_size = i_size_read(inode);
586
587 /*
588 * Only truncate newly allocated pages beyoned EOF, even if the
589 * write started inside the existing inode size.
590 */
591 if (pos + len > i_size)
592 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
593}
594
595static int
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700596iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
597 unsigned poff, unsigned plen, unsigned from, unsigned to,
598 struct iomap *iomap)
599{
600 struct bio_vec bvec;
601 struct bio bio;
602
603 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
604 zero_user_segments(page, poff, from, to, poff + plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700605 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700606 return 0;
607 }
608
609 bio_init(&bio, &bvec, 1);
610 bio.bi_opf = REQ_OP_READ;
611 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
612 bio_set_dev(&bio, iomap->bdev);
613 __bio_add_page(&bio, page, plen, poff);
614 return submit_bio_wait(&bio);
615}
616
617static int
618__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
619 struct page *page, struct iomap *iomap)
620{
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700621 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700622 loff_t block_size = i_blocksize(inode);
623 loff_t block_start = pos & ~(block_size - 1);
624 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700625 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700626 int status = 0;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700627
628 if (PageUptodate(page))
629 return 0;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700630
631 do {
632 iomap_adjust_read_range(inode, iop, &block_start,
633 block_end - block_start, &poff, &plen);
634 if (plen == 0)
635 break;
636
637 if ((from > poff && from < poff + plen) ||
638 (to > poff && to < poff + plen)) {
639 status = iomap_read_page_sync(inode, block_start, page,
640 poff, plen, from, to, iomap);
641 if (status)
642 break;
643 }
644
645 } while ((block_start += plen) < block_end);
646
647 return status;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700648}
649
650static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000651iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
652 struct page **pagep, struct iomap *iomap)
653{
654 pgoff_t index = pos >> PAGE_SHIFT;
655 struct page *page;
656 int status = 0;
657
658 BUG_ON(pos + len > iomap->offset + iomap->length);
659
Michal Hockod1908f52017-02-03 13:13:26 -0800660 if (fatal_signal_pending(current))
661 return -EINTR;
662
Christoph Hellwigae259a92016-06-21 09:23:11 +1000663 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
664 if (!page)
665 return -ENOMEM;
666
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700667 if (iomap->type == IOMAP_INLINE)
668 iomap_read_inline_data(inode, page, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700669 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700670 status = __block_write_begin_int(page, pos, len, NULL, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700671 else
672 status = __iomap_write_begin(inode, pos, len, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000673 if (unlikely(status)) {
674 unlock_page(page);
675 put_page(page);
676 page = NULL;
677
678 iomap_write_failed(inode, pos, len);
679 }
680
681 *pagep = page;
682 return status;
683}
684
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700685int
686iomap_set_page_dirty(struct page *page)
687{
688 struct address_space *mapping = page_mapping(page);
689 int newly_dirty;
690
691 if (unlikely(!mapping))
692 return !TestSetPageDirty(page);
693
694 /*
695 * Lock out page->mem_cgroup migration to keep PageDirty
696 * synchronized with per-memcg dirty page counters.
697 */
698 lock_page_memcg(page);
699 newly_dirty = !TestSetPageDirty(page);
700 if (newly_dirty)
701 __set_page_dirty(page, mapping, 0);
702 unlock_page_memcg(page);
703
704 if (newly_dirty)
705 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
706 return newly_dirty;
707}
708EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
709
710static int
711__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
712 unsigned copied, struct page *page, struct iomap *iomap)
713{
714 flush_dcache_page(page);
715
716 /*
717 * The blocks that were entirely written will now be uptodate, so we
718 * don't have to worry about a readpage reading them and overwriting a
719 * partial write. However if we have encountered a short write and only
720 * partially written into a block, it will not be marked uptodate, so a
721 * readpage might come in and destroy our partial write.
722 *
723 * Do the simplest thing, and just treat any short write to a non
724 * uptodate page as a zero-length write, and force the caller to redo
725 * the whole thing.
726 */
727 if (unlikely(copied < len && !PageUptodate(page))) {
728 copied = 0;
729 } else {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700730 iomap_set_range_uptodate(page, offset_in_page(pos), len);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700731 iomap_set_page_dirty(page);
732 }
733 return __generic_write_end(inode, pos, copied, page);
734}
735
Christoph Hellwigae259a92016-06-21 09:23:11 +1000736static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700737iomap_write_end_inline(struct inode *inode, struct page *page,
738 struct iomap *iomap, loff_t pos, unsigned copied)
739{
740 void *addr;
741
742 WARN_ON_ONCE(!PageUptodate(page));
743 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
744
745 addr = kmap_atomic(page);
746 memcpy(iomap->inline_data + pos, addr + pos, copied);
747 kunmap_atomic(addr);
748
749 mark_inode_dirty(inode);
750 __generic_write_end(inode, pos, copied, page);
751 return copied;
752}
753
Christoph Hellwigae259a92016-06-21 09:23:11 +1000754static int
755iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700756 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000757{
758 int ret;
759
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700760 if (iomap->type == IOMAP_INLINE) {
761 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700762 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700763 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
764 copied, page, NULL);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700765 } else {
766 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700767 }
768
Christoph Hellwig63899c62018-06-19 15:10:56 -0700769 if (iomap->page_done)
770 iomap->page_done(inode, pos, copied, page, iomap);
771
Christoph Hellwigae259a92016-06-21 09:23:11 +1000772 if (ret < len)
773 iomap_write_failed(inode, pos, len);
774 return ret;
775}
776
777static loff_t
778iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
779 struct iomap *iomap)
780{
781 struct iov_iter *i = data;
782 long status = 0;
783 ssize_t written = 0;
784 unsigned int flags = AOP_FLAG_NOFS;
785
Christoph Hellwigae259a92016-06-21 09:23:11 +1000786 do {
787 struct page *page;
788 unsigned long offset; /* Offset into pagecache page */
789 unsigned long bytes; /* Bytes to write to page */
790 size_t copied; /* Bytes copied from user */
791
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700792 offset = offset_in_page(pos);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000793 bytes = min_t(unsigned long, PAGE_SIZE - offset,
794 iov_iter_count(i));
795again:
796 if (bytes > length)
797 bytes = length;
798
799 /*
800 * Bring in the user page that we will copy from _first_.
801 * Otherwise there's a nasty deadlock on copying from the
802 * same page as we're writing to, without it being marked
803 * up-to-date.
804 *
805 * Not only is this an optimisation, but it is also required
806 * to check that the address is actually valid, when atomic
807 * usercopies are used, below.
808 */
809 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
810 status = -EFAULT;
811 break;
812 }
813
814 status = iomap_write_begin(inode, pos, bytes, flags, &page,
815 iomap);
816 if (unlikely(status))
817 break;
818
819 if (mapping_writably_mapped(inode->i_mapping))
820 flush_dcache_page(page);
821
Christoph Hellwigae259a92016-06-21 09:23:11 +1000822 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000823
824 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000825
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700826 status = iomap_write_end(inode, pos, bytes, copied, page,
827 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000828 if (unlikely(status < 0))
829 break;
830 copied = status;
831
832 cond_resched();
833
834 iov_iter_advance(i, copied);
835 if (unlikely(copied == 0)) {
836 /*
837 * If we were unable to copy any data at all, we must
838 * fall back to a single segment length write.
839 *
840 * If we didn't fallback here, we could livelock
841 * because not all segments in the iov can be copied at
842 * once without a pagefault.
843 */
844 bytes = min_t(unsigned long, PAGE_SIZE - offset,
845 iov_iter_single_seg_count(i));
846 goto again;
847 }
848 pos += copied;
849 written += copied;
850 length -= copied;
851
852 balance_dirty_pages_ratelimited(inode->i_mapping);
853 } while (iov_iter_count(i) && length);
854
855 return written ? written : status;
856}
857
858ssize_t
859iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800860 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000861{
862 struct inode *inode = iocb->ki_filp->f_mapping->host;
863 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
864
865 while (iov_iter_count(iter)) {
866 ret = iomap_apply(inode, pos, iov_iter_count(iter),
867 IOMAP_WRITE, ops, iter, iomap_write_actor);
868 if (ret <= 0)
869 break;
870 pos += ret;
871 written += ret;
872 }
873
874 return written ? written : ret;
875}
876EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
877
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000878static struct page *
879__iomap_read_page(struct inode *inode, loff_t offset)
880{
881 struct address_space *mapping = inode->i_mapping;
882 struct page *page;
883
884 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
885 if (IS_ERR(page))
886 return page;
887 if (!PageUptodate(page)) {
888 put_page(page);
889 return ERR_PTR(-EIO);
890 }
891 return page;
892}
893
894static loff_t
895iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
896 struct iomap *iomap)
897{
898 long status = 0;
899 ssize_t written = 0;
900
901 do {
902 struct page *page, *rpage;
903 unsigned long offset; /* Offset into pagecache page */
904 unsigned long bytes; /* Bytes to write to page */
905
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700906 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700907 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000908
909 rpage = __iomap_read_page(inode, pos);
910 if (IS_ERR(rpage))
911 return PTR_ERR(rpage);
912
913 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700914 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000915 put_page(rpage);
916 if (unlikely(status))
917 return status;
918
919 WARN_ON_ONCE(!PageUptodate(page));
920
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700921 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000922 if (unlikely(status <= 0)) {
923 if (WARN_ON_ONCE(status == 0))
924 return -EIO;
925 return status;
926 }
927
928 cond_resched();
929
930 pos += status;
931 written += status;
932 length -= status;
933
934 balance_dirty_pages_ratelimited(inode->i_mapping);
935 } while (length);
936
937 return written;
938}
939
940int
941iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800942 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000943{
944 loff_t ret;
945
946 while (len) {
947 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
948 iomap_dirty_actor);
949 if (ret <= 0)
950 return ret;
951 pos += ret;
952 len -= ret;
953 }
954
955 return 0;
956}
957EXPORT_SYMBOL_GPL(iomap_file_dirty);
958
Christoph Hellwigae259a92016-06-21 09:23:11 +1000959static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
960 unsigned bytes, struct iomap *iomap)
961{
962 struct page *page;
963 int status;
964
Tetsuo Handac718a972017-05-08 15:58:59 -0700965 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
966 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000967 if (status)
968 return status;
969
970 zero_user(page, offset, bytes);
971 mark_page_accessed(page);
972
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700973 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000974}
975
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000976static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
977 struct iomap *iomap)
978{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700979 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
980 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000981}
982
Christoph Hellwigae259a92016-06-21 09:23:11 +1000983static loff_t
984iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
985 void *data, struct iomap *iomap)
986{
987 bool *did_zero = data;
988 loff_t written = 0;
989 int status;
990
991 /* already zeroed? we're done. */
992 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
993 return count;
994
995 do {
996 unsigned offset, bytes;
997
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700998 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700999 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001000
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001001 if (IS_DAX(inode))
1002 status = iomap_dax_zero(pos, offset, bytes, iomap);
1003 else
1004 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001005 if (status < 0)
1006 return status;
1007
1008 pos += bytes;
1009 count -= bytes;
1010 written += bytes;
1011 if (did_zero)
1012 *did_zero = true;
1013 } while (count > 0);
1014
1015 return written;
1016}
1017
1018int
1019iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001020 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001021{
1022 loff_t ret;
1023
1024 while (len > 0) {
1025 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1026 ops, did_zero, iomap_zero_range_actor);
1027 if (ret <= 0)
1028 return ret;
1029
1030 pos += ret;
1031 len -= ret;
1032 }
1033
1034 return 0;
1035}
1036EXPORT_SYMBOL_GPL(iomap_zero_range);
1037
1038int
1039iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001040 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001041{
Fabian Frederick93407472017-02-27 14:28:32 -08001042 unsigned int blocksize = i_blocksize(inode);
1043 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001044
1045 /* Block boundary? Nothing to do */
1046 if (!off)
1047 return 0;
1048 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1049}
1050EXPORT_SYMBOL_GPL(iomap_truncate_page);
1051
1052static loff_t
1053iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1054 void *data, struct iomap *iomap)
1055{
1056 struct page *page = data;
1057 int ret;
1058
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001059 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1060 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1061 if (ret)
1062 return ret;
1063 block_commit_write(page, 0, length);
1064 } else {
1065 WARN_ON_ONCE(!PageUptodate(page));
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07001066 iomap_page_create(inode, page);
Brian Foster561295a2018-09-29 13:51:01 +10001067 set_page_dirty(page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001068 }
Christoph Hellwigae259a92016-06-21 09:23:11 +10001069
Christoph Hellwigae259a92016-06-21 09:23:11 +10001070 return length;
1071}
1072
Dave Jiang11bac802017-02-24 14:56:41 -08001073int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001074{
1075 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -08001076 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001077 unsigned long length;
1078 loff_t offset, size;
1079 ssize_t ret;
1080
1081 lock_page(page);
1082 size = i_size_read(inode);
1083 if ((page->mapping != inode->i_mapping) ||
1084 (page_offset(page) > size)) {
1085 /* We overload EFAULT to mean page got truncated */
1086 ret = -EFAULT;
1087 goto out_unlock;
1088 }
1089
1090 /* page is wholly or partially inside EOF */
1091 if (((page->index + 1) << PAGE_SHIFT) > size)
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001092 length = offset_in_page(size);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001093 else
1094 length = PAGE_SIZE;
1095
1096 offset = page_offset(page);
1097 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +11001098 ret = iomap_apply(inode, offset, length,
1099 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1100 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001101 if (unlikely(ret <= 0))
1102 goto out_unlock;
1103 offset += ret;
1104 length -= ret;
1105 }
1106
Christoph Hellwigae259a92016-06-21 09:23:11 +10001107 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001108 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +10001109out_unlock:
1110 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001111 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001112}
1113EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001114
1115struct fiemap_ctx {
1116 struct fiemap_extent_info *fi;
1117 struct iomap prev;
1118};
1119
1120static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1121 struct iomap *iomap, u32 flags)
1122{
1123 switch (iomap->type) {
1124 case IOMAP_HOLE:
1125 /* skip holes */
1126 return 0;
1127 case IOMAP_DELALLOC:
1128 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1129 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001130 case IOMAP_MAPPED:
1131 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001132 case IOMAP_UNWRITTEN:
1133 flags |= FIEMAP_EXTENT_UNWRITTEN;
1134 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001135 case IOMAP_INLINE:
1136 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001137 break;
1138 }
1139
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001140 if (iomap->flags & IOMAP_F_MERGED)
1141 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +10001142 if (iomap->flags & IOMAP_F_SHARED)
1143 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001144
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001145 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -04001146 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001147 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001148}
1149
1150static loff_t
1151iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1152 struct iomap *iomap)
1153{
1154 struct fiemap_ctx *ctx = data;
1155 loff_t ret = length;
1156
1157 if (iomap->type == IOMAP_HOLE)
1158 return length;
1159
1160 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1161 ctx->prev = *iomap;
1162 switch (ret) {
1163 case 0: /* success */
1164 return length;
1165 case 1: /* extent array full */
1166 return 0;
1167 default:
1168 return ret;
1169 }
1170}
1171
1172int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001173 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001174{
1175 struct fiemap_ctx ctx;
1176 loff_t ret;
1177
1178 memset(&ctx, 0, sizeof(ctx));
1179 ctx.fi = fi;
1180 ctx.prev.type = IOMAP_HOLE;
1181
1182 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1183 if (ret)
1184 return ret;
1185
Dave Chinner8896b8f2016-08-17 08:41:10 +10001186 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1187 ret = filemap_write_and_wait(inode->i_mapping);
1188 if (ret)
1189 return ret;
1190 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001191
1192 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +11001193 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001194 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +10001195 /* inode with no (attribute) mapping will give ENOENT */
1196 if (ret == -ENOENT)
1197 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001198 if (ret < 0)
1199 return ret;
1200 if (ret == 0)
1201 break;
1202
1203 start += ret;
1204 len -= ret;
1205 }
1206
1207 if (ctx.prev.type != IOMAP_HOLE) {
1208 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1209 if (ret < 0)
1210 return ret;
1211 }
1212
1213 return 0;
1214}
1215EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001216
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001217/*
1218 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001219 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001220 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001221static bool
1222page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1223 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001224{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001225 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1226 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001227 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001228 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001229
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001230 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1231 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001232
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001233 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001234 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001235 * Last offset smaller than the start of the page means we found
1236 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001237 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001238 if (whence == SEEK_HOLE)
1239 return true;
1240 *lastoff = poff;
1241 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001242
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001243 /*
1244 * Just check the page unless we can and should check block ranges:
1245 */
1246 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1247 return PageUptodate(page) == seek_data;
1248
1249 lock_page(page);
1250 if (unlikely(page->mapping != inode->i_mapping))
1251 goto out_unlock_not_found;
1252
1253 for (off = 0; off < PAGE_SIZE; off += bsize) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001254 if (offset_in_page(*lastoff) >= off + bsize)
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001255 continue;
1256 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1257 unlock_page(page);
1258 return true;
1259 }
1260 *lastoff = poff + off + bsize;
1261 }
1262
1263out_unlock_not_found:
1264 unlock_page(page);
1265 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001266}
1267
1268/*
1269 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1270 *
1271 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -07001272 * and which are data: uptodate buffer heads count as data; everything else
1273 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001274 *
1275 * Returns the resulting offset on successs, and -ENOENT otherwise.
1276 */
1277static loff_t
1278page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1279 int whence)
1280{
1281 pgoff_t index = offset >> PAGE_SHIFT;
1282 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1283 loff_t lastoff = offset;
1284 struct pagevec pvec;
1285
1286 if (length <= 0)
1287 return -ENOENT;
1288
1289 pagevec_init(&pvec);
1290
1291 do {
1292 unsigned nr_pages, i;
1293
1294 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1295 end - 1);
1296 if (nr_pages == 0)
1297 break;
1298
1299 for (i = 0; i < nr_pages; i++) {
1300 struct page *page = pvec.pages[i];
1301
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001302 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001303 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001304 lastoff = page_offset(page) + PAGE_SIZE;
1305 }
1306 pagevec_release(&pvec);
1307 } while (index < end);
1308
1309 /* When no page at lastoff and we are not done, we found a hole. */
1310 if (whence != SEEK_HOLE)
1311 goto not_found;
1312
1313check_range:
1314 if (lastoff < offset + length)
1315 goto out;
1316not_found:
1317 lastoff = -ENOENT;
1318out:
1319 pagevec_release(&pvec);
1320 return lastoff;
1321}
1322
1323
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001324static loff_t
1325iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1326 void *data, struct iomap *iomap)
1327{
1328 switch (iomap->type) {
1329 case IOMAP_UNWRITTEN:
1330 offset = page_cache_seek_hole_data(inode, offset, length,
1331 SEEK_HOLE);
1332 if (offset < 0)
1333 return length;
1334 /* fall through */
1335 case IOMAP_HOLE:
1336 *(loff_t *)data = offset;
1337 return 0;
1338 default:
1339 return length;
1340 }
1341}
1342
1343loff_t
1344iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1345{
1346 loff_t size = i_size_read(inode);
1347 loff_t length = size - offset;
1348 loff_t ret;
1349
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001350 /* Nothing to be found before or beyond the end of the file. */
1351 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001352 return -ENXIO;
1353
1354 while (length > 0) {
1355 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1356 &offset, iomap_seek_hole_actor);
1357 if (ret < 0)
1358 return ret;
1359 if (ret == 0)
1360 break;
1361
1362 offset += ret;
1363 length -= ret;
1364 }
1365
1366 return offset;
1367}
1368EXPORT_SYMBOL_GPL(iomap_seek_hole);
1369
1370static loff_t
1371iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1372 void *data, struct iomap *iomap)
1373{
1374 switch (iomap->type) {
1375 case IOMAP_HOLE:
1376 return length;
1377 case IOMAP_UNWRITTEN:
1378 offset = page_cache_seek_hole_data(inode, offset, length,
1379 SEEK_DATA);
1380 if (offset < 0)
1381 return length;
1382 /*FALLTHRU*/
1383 default:
1384 *(loff_t *)data = offset;
1385 return 0;
1386 }
1387}
1388
1389loff_t
1390iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1391{
1392 loff_t size = i_size_read(inode);
1393 loff_t length = size - offset;
1394 loff_t ret;
1395
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001396 /* Nothing to be found before or beyond the end of the file. */
1397 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001398 return -ENXIO;
1399
1400 while (length > 0) {
1401 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1402 &offset, iomap_seek_data_actor);
1403 if (ret < 0)
1404 return ret;
1405 if (ret == 0)
1406 break;
1407
1408 offset += ret;
1409 length -= ret;
1410 }
1411
1412 if (length <= 0)
1413 return -ENXIO;
1414 return offset;
1415}
1416EXPORT_SYMBOL_GPL(iomap_seek_data);
1417
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001418/*
1419 * Private flags for iomap_dio, must not overlap with the public ones in
1420 * iomap.h:
1421 */
Dave Chinner3460cac2018-05-02 12:54:53 -07001422#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -07001423#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001424#define IOMAP_DIO_WRITE (1 << 30)
1425#define IOMAP_DIO_DIRTY (1 << 31)
1426
1427struct iomap_dio {
1428 struct kiocb *iocb;
1429 iomap_dio_end_io_t *end_io;
1430 loff_t i_size;
1431 loff_t size;
1432 atomic_t ref;
1433 unsigned flags;
1434 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001435 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001436
1437 union {
1438 /* used during submission and for synchronous completion: */
1439 struct {
1440 struct iov_iter *iter;
1441 struct task_struct *waiter;
1442 struct request_queue *last_queue;
1443 blk_qc_t cookie;
1444 } submit;
1445
1446 /* used for aio completion: */
1447 struct {
1448 struct work_struct work;
1449 } aio;
1450 };
1451};
1452
1453static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1454{
1455 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -06001456 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001457 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001458 ssize_t ret;
1459
1460 if (dio->end_io) {
1461 ret = dio->end_io(iocb,
1462 dio->error ? dio->error : dio->size,
1463 dio->flags);
1464 } else {
1465 ret = dio->error;
1466 }
1467
1468 if (likely(!ret)) {
1469 ret = dio->size;
1470 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -07001471 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001472 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -07001473 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001474 iocb->ki_pos += ret;
1475 }
1476
Eryu Guan5e25c262017-10-13 09:47:46 -07001477 /*
1478 * Try again to invalidate clean pages which might have been cached by
1479 * non-direct readahead, or faulted in by get_user_pages() if the source
1480 * of the write was an mmap'ed region of the file we're writing. Either
1481 * one is a pretty crazy thing to do, so we don't support it 100%. If
1482 * this invalidation fails, tough, the write still worked...
1483 *
1484 * And this page cache invalidation has to be after dio->end_io(), as
1485 * some filesystems convert unwritten extents to real allocations in
1486 * end_io() when necessary, otherwise a racing buffer read would cache
1487 * zeros from unwritten extents.
1488 */
1489 if (!dio->error &&
1490 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1491 int err;
1492 err = invalidate_inode_pages2_range(inode->i_mapping,
1493 offset >> PAGE_SHIFT,
1494 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001495 if (err)
1496 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001497 }
1498
Dave Chinner4f8ff442018-05-02 12:54:52 -07001499 /*
1500 * If this is a DSYNC write, make sure we push it to stable storage now
1501 * that we've written data.
1502 */
1503 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1504 ret = generic_write_sync(iocb, ret);
1505
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001506 inode_dio_end(file_inode(iocb->ki_filp));
1507 kfree(dio);
1508
1509 return ret;
1510}
1511
1512static void iomap_dio_complete_work(struct work_struct *work)
1513{
1514 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1515 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001516
Dave Chinner4f8ff442018-05-02 12:54:52 -07001517 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001518}
1519
1520/*
1521 * Set an error in the dio if none is set yet. We have to use cmpxchg
1522 * as the submission context and the completion context(s) can race to
1523 * update the error.
1524 */
1525static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1526{
1527 cmpxchg(&dio->error, 0, ret);
1528}
1529
1530static void iomap_dio_bio_end_io(struct bio *bio)
1531{
1532 struct iomap_dio *dio = bio->bi_private;
1533 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1534
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001535 if (bio->bi_status)
1536 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001537
1538 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001539 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001540 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001541 WRITE_ONCE(dio->submit.waiter, NULL);
1542 wake_up_process(waiter);
1543 } else if (dio->flags & IOMAP_DIO_WRITE) {
1544 struct inode *inode = file_inode(dio->iocb->ki_filp);
1545
1546 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1547 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1548 } else {
1549 iomap_dio_complete_work(&dio->aio.work);
1550 }
1551 }
1552
1553 if (should_dirty) {
1554 bio_check_pages_dirty(bio);
1555 } else {
1556 struct bio_vec *bvec;
1557 int i;
1558
1559 bio_for_each_segment_all(bvec, bio, i)
1560 put_page(bvec->bv_page);
1561 bio_put(bio);
1562 }
1563}
1564
1565static blk_qc_t
1566iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1567 unsigned len)
1568{
1569 struct page *page = ZERO_PAGE(0);
1570 struct bio *bio;
1571
1572 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001573 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001574 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001575 bio->bi_private = dio;
1576 bio->bi_end_io = iomap_dio_bio_end_io;
1577
1578 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001579 __bio_add_page(bio, page, len, 0);
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001580 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001581
1582 atomic_inc(&dio->ref);
1583 return submit_bio(bio);
1584}
1585
1586static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001587iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1588 struct iomap_dio *dio, struct iomap *iomap)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001589{
Fabian Frederick93407472017-02-27 14:28:32 -08001590 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1591 unsigned int fs_block_size = i_blocksize(inode), pad;
1592 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001593 struct iov_iter iter;
1594 struct bio *bio;
1595 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001596 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001597 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +01001598 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001599
1600 if ((pos | length | align) & ((1 << blkbits) - 1))
1601 return -EINVAL;
1602
Christoph Hellwig09230432018-07-03 09:07:46 -07001603 if (iomap->type == IOMAP_UNWRITTEN) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001604 dio->flags |= IOMAP_DIO_UNWRITTEN;
1605 need_zeroout = true;
Christoph Hellwig09230432018-07-03 09:07:46 -07001606 }
1607
1608 if (iomap->flags & IOMAP_F_SHARED)
1609 dio->flags |= IOMAP_DIO_COW;
1610
1611 if (iomap->flags & IOMAP_F_NEW) {
1612 need_zeroout = true;
1613 } else {
1614 /*
1615 * Use a FUA write if we need datasync semantics, this
1616 * is a pure data IO that doesn't require any metadata
1617 * updates and the underlying device supports FUA. This
1618 * allows us to avoid cache flushes on IO completion.
1619 */
1620 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1621 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1622 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1623 use_fua = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001624 }
1625
1626 /*
1627 * Operate on a partial iter trimmed to the extent we were called for.
1628 * We'll update the iter in the dio once we're done with this extent.
1629 */
1630 iter = *dio->submit.iter;
1631 iov_iter_truncate(&iter, length);
1632
1633 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1634 if (nr_pages <= 0)
1635 return nr_pages;
1636
1637 if (need_zeroout) {
1638 /* zero out from the start of the block to the write offset */
1639 pad = pos & (fs_block_size - 1);
1640 if (pad)
1641 iomap_dio_zero(dio, iomap, pos - pad, pad);
1642 }
1643
1644 do {
Al Virocfe057f2017-09-11 21:17:09 +01001645 size_t n;
1646 if (dio->error) {
1647 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001648 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001649 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001650
1651 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001652 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001653 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001654 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001655 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001656 bio->bi_private = dio;
1657 bio->bi_end_io = iomap_dio_bio_end_io;
1658
1659 ret = bio_iov_iter_get_pages(bio, &iter);
1660 if (unlikely(ret)) {
1661 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +01001662 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001663 }
1664
Al Virocfe057f2017-09-11 21:17:09 +01001665 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001666 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001667 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1668 if (use_fua)
1669 bio->bi_opf |= REQ_FUA;
1670 else
1671 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001672 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001673 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001674 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001675 if (dio->flags & IOMAP_DIO_DIRTY)
1676 bio_set_pages_dirty(bio);
1677 }
1678
Al Virocfe057f2017-09-11 21:17:09 +01001679 iov_iter_advance(dio->submit.iter, n);
1680
1681 dio->size += n;
1682 pos += n;
1683 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001684
1685 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1686
1687 atomic_inc(&dio->ref);
1688
1689 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1690 dio->submit.cookie = submit_bio(bio);
1691 } while (nr_pages);
1692
1693 if (need_zeroout) {
1694 /* zero out from the end of the write to the end of the block */
1695 pad = pos & (fs_block_size - 1);
1696 if (pad)
1697 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1698 }
Al Virocfe057f2017-09-11 21:17:09 +01001699 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001700}
1701
Christoph Hellwig09230432018-07-03 09:07:46 -07001702static loff_t
1703iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1704{
1705 length = iov_iter_zero(length, dio->submit.iter);
1706 dio->size += length;
1707 return length;
1708}
1709
1710static loff_t
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001711iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1712 struct iomap_dio *dio, struct iomap *iomap)
1713{
1714 struct iov_iter *iter = dio->submit.iter;
1715 size_t copied;
1716
1717 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1718
1719 if (dio->flags & IOMAP_DIO_WRITE) {
1720 loff_t size = inode->i_size;
1721
1722 if (pos > size)
1723 memset(iomap->inline_data + size, 0, pos - size);
1724 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1725 if (copied) {
1726 if (pos + copied > size)
1727 i_size_write(inode, pos + copied);
1728 mark_inode_dirty(inode);
1729 }
1730 } else {
1731 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1732 }
1733 dio->size += copied;
1734 return copied;
1735}
1736
1737static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001738iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1739 void *data, struct iomap *iomap)
1740{
1741 struct iomap_dio *dio = data;
1742
1743 switch (iomap->type) {
1744 case IOMAP_HOLE:
1745 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1746 return -EIO;
1747 return iomap_dio_hole_actor(length, dio);
1748 case IOMAP_UNWRITTEN:
1749 if (!(dio->flags & IOMAP_DIO_WRITE))
1750 return iomap_dio_hole_actor(length, dio);
1751 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1752 case IOMAP_MAPPED:
1753 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001754 case IOMAP_INLINE:
1755 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
Christoph Hellwig09230432018-07-03 09:07:46 -07001756 default:
1757 WARN_ON_ONCE(1);
1758 return -EIO;
1759 }
1760}
1761
Dave Chinner4f8ff442018-05-02 12:54:52 -07001762/*
1763 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001764 * is being issued as AIO or not. This allows us to optimise pure data writes
1765 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1766 * REQ_FLUSH post write. This is slightly tricky because a single request here
1767 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1768 * may be pure data writes. In that case, we still need to do a full data sync
1769 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001770 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001771ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001772iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1773 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001774{
1775 struct address_space *mapping = iocb->ki_filp->f_mapping;
1776 struct inode *inode = file_inode(iocb->ki_filp);
1777 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001778 loff_t pos = iocb->ki_pos, start = pos;
1779 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001780 unsigned int flags = IOMAP_DIRECT;
1781 struct blk_plug plug;
1782 struct iomap_dio *dio;
1783
1784 lockdep_assert_held(&inode->i_rwsem);
1785
1786 if (!count)
1787 return 0;
1788
1789 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1790 if (!dio)
1791 return -ENOMEM;
1792
1793 dio->iocb = iocb;
1794 atomic_set(&dio->ref, 1);
1795 dio->size = 0;
1796 dio->i_size = i_size_read(inode);
1797 dio->end_io = end_io;
1798 dio->error = 0;
1799 dio->flags = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001800 dio->wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001801
1802 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001803 dio->submit.waiter = current;
1804 dio->submit.cookie = BLK_QC_T_NONE;
1805 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001806
1807 if (iov_iter_rw(iter) == READ) {
1808 if (pos >= dio->i_size)
1809 goto out_free_dio;
1810
1811 if (iter->type == ITER_IOVEC)
1812 dio->flags |= IOMAP_DIO_DIRTY;
1813 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001814 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001815 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001816
1817 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001818 if (iocb->ki_flags & IOCB_DSYNC)
1819 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001820
1821 /*
1822 * For datasync only writes, we optimistically try using FUA for
1823 * this IO. Any non-FUA write that occurs will clear this flag,
1824 * hence we know before completion whether a cache flush is
1825 * necessary.
1826 */
1827 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1828 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001829 }
1830
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001831 if (iocb->ki_flags & IOCB_NOWAIT) {
1832 if (filemap_range_has_page(mapping, start, end)) {
1833 ret = -EAGAIN;
1834 goto out_free_dio;
1835 }
1836 flags |= IOMAP_NOWAIT;
1837 }
1838
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001839 ret = filemap_write_and_wait_range(mapping, start, end);
1840 if (ret)
1841 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001842
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001843 /*
1844 * Try to invalidate cache pages for the range we're direct
1845 * writing. If this invalidation fails, tough, the write will
1846 * still work, but racing two incompatible write paths is a
1847 * pretty crazy thing to do, so we don't support it 100%.
1848 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001849 ret = invalidate_inode_pages2_range(mapping,
1850 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001851 if (ret)
1852 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001853 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001854
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001855 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001856 !inode->i_sb->s_dio_done_wq) {
1857 ret = sb_init_dio_done_wq(inode->i_sb);
1858 if (ret < 0)
1859 goto out_free_dio;
1860 }
1861
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001862 inode_dio_begin(inode);
1863
1864 blk_start_plug(&plug);
1865 do {
1866 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1867 iomap_dio_actor);
1868 if (ret <= 0) {
1869 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001870 if (ret == -ENOTBLK) {
1871 dio->wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001872 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001873 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001874 break;
1875 }
1876 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001877
1878 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1879 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001880 } while ((count = iov_iter_count(iter)) > 0);
1881 blk_finish_plug(&plug);
1882
1883 if (ret < 0)
1884 iomap_dio_set_error(dio, ret);
1885
Dave Chinner3460cac2018-05-02 12:54:53 -07001886 /*
1887 * If all the writes we issued were FUA, we don't need to flush the
1888 * cache on IO completion. Clear the sync flag for this case.
1889 */
1890 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1891 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1892
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001893 if (!atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001894 if (!dio->wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001895 return -EIOCBQUEUED;
1896
1897 for (;;) {
1898 set_current_state(TASK_UNINTERRUPTIBLE);
1899 if (!READ_ONCE(dio->submit.waiter))
1900 break;
1901
1902 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1903 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001904 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001905 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001906 io_schedule();
1907 }
1908 __set_current_state(TASK_RUNNING);
1909 }
1910
Eryu Guanc771c142017-03-02 15:02:06 -08001911 ret = iomap_dio_complete(dio);
1912
Eryu Guanc771c142017-03-02 15:02:06 -08001913 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001914
1915out_free_dio:
1916 kfree(dio);
1917 return ret;
1918}
1919EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001920
1921/* Swapfile activation */
1922
1923#ifdef CONFIG_SWAP
1924struct iomap_swapfile_info {
1925 struct iomap iomap; /* accumulated iomap */
1926 struct swap_info_struct *sis;
1927 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1928 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1929 unsigned long nr_pages; /* number of pages collected */
1930 int nr_extents; /* extent count */
1931};
1932
1933/*
1934 * Collect physical extents for this swap file. Physical extents reported to
1935 * the swap code must be trimmed to align to a page boundary. The logical
1936 * offset within the file is irrelevant since the swapfile code maps logical
1937 * page numbers of the swap device to the physical page-aligned extents.
1938 */
1939static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1940{
1941 struct iomap *iomap = &isi->iomap;
1942 unsigned long nr_pages;
1943 uint64_t first_ppage;
1944 uint64_t first_ppage_reported;
1945 uint64_t next_ppage;
1946 int error;
1947
1948 /*
1949 * Round the start up and the end down so that the physical
1950 * extent aligns to a page boundary.
1951 */
1952 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1953 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1954 PAGE_SHIFT;
1955
1956 /* Skip too-short physical extents. */
1957 if (first_ppage >= next_ppage)
1958 return 0;
1959 nr_pages = next_ppage - first_ppage;
1960
1961 /*
1962 * Calculate how much swap space we're adding; the first page contains
1963 * the swap header and doesn't count. The mm still wants that first
1964 * page fed to add_swap_extent, however.
1965 */
1966 first_ppage_reported = first_ppage;
1967 if (iomap->offset == 0)
1968 first_ppage_reported++;
1969 if (isi->lowest_ppage > first_ppage_reported)
1970 isi->lowest_ppage = first_ppage_reported;
1971 if (isi->highest_ppage < (next_ppage - 1))
1972 isi->highest_ppage = next_ppage - 1;
1973
1974 /* Add extent, set up for the next call. */
1975 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1976 if (error < 0)
1977 return error;
1978 isi->nr_extents += error;
1979 isi->nr_pages += nr_pages;
1980 return 0;
1981}
1982
1983/*
1984 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1985 * swap only cares about contiguous page-aligned physical extents and makes no
1986 * distinction between written and unwritten extents.
1987 */
1988static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1989 loff_t count, void *data, struct iomap *iomap)
1990{
1991 struct iomap_swapfile_info *isi = data;
1992 int error;
1993
Christoph Hellwig19319b52018-06-01 09:03:06 -07001994 switch (iomap->type) {
1995 case IOMAP_MAPPED:
1996 case IOMAP_UNWRITTEN:
1997 /* Only real or unwritten extents. */
1998 break;
1999 case IOMAP_INLINE:
2000 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07002001 pr_err("swapon: file is inline\n");
2002 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07002003 default:
Omar Sandovalec601922018-05-16 11:13:34 -07002004 pr_err("swapon: file has unallocated extents\n");
2005 return -EINVAL;
2006 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002007
Omar Sandovalec601922018-05-16 11:13:34 -07002008 /* No uncommitted metadata or shared blocks. */
2009 if (iomap->flags & IOMAP_F_DIRTY) {
2010 pr_err("swapon: file is not committed\n");
2011 return -EINVAL;
2012 }
2013 if (iomap->flags & IOMAP_F_SHARED) {
2014 pr_err("swapon: file has shared extents\n");
2015 return -EINVAL;
2016 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002017
Omar Sandovalec601922018-05-16 11:13:34 -07002018 /* Only one bdev per swap file. */
2019 if (iomap->bdev != isi->sis->bdev) {
2020 pr_err("swapon: file is on multiple devices\n");
2021 return -EINVAL;
2022 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002023
2024 if (isi->iomap.length == 0) {
2025 /* No accumulated extent, so just store it. */
2026 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2027 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2028 /* Append this to the accumulated extent. */
2029 isi->iomap.length += iomap->length;
2030 } else {
2031 /* Otherwise, add the retained iomap and store this one. */
2032 error = iomap_swapfile_add_extent(isi);
2033 if (error)
2034 return error;
2035 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2036 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002037 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07002038}
2039
2040/*
2041 * Iterate a swap file's iomaps to construct physical extents that can be
2042 * passed to the swapfile subsystem.
2043 */
2044int iomap_swapfile_activate(struct swap_info_struct *sis,
2045 struct file *swap_file, sector_t *pagespan,
2046 const struct iomap_ops *ops)
2047{
2048 struct iomap_swapfile_info isi = {
2049 .sis = sis,
2050 .lowest_ppage = (sector_t)-1ULL,
2051 };
2052 struct address_space *mapping = swap_file->f_mapping;
2053 struct inode *inode = mapping->host;
2054 loff_t pos = 0;
2055 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2056 loff_t ret;
2057
Darrick J. Wong117a1482018-06-05 09:53:05 -07002058 /*
2059 * Persist all file mapping metadata so that we won't have any
2060 * IOMAP_F_DIRTY iomaps.
2061 */
2062 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07002063 if (ret)
2064 return ret;
2065
2066 while (len > 0) {
2067 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2068 ops, &isi, iomap_swapfile_activate_actor);
2069 if (ret <= 0)
2070 return ret;
2071
2072 pos += ret;
2073 len -= ret;
2074 }
2075
2076 if (isi.iomap.length) {
2077 ret = iomap_swapfile_add_extent(&isi);
2078 if (ret)
2079 return ret;
2080 }
2081
2082 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2083 sis->max = isi.nr_pages;
2084 sis->pages = isi.nr_pages - 1;
2085 sis->highest_bit = isi.nr_pages - 1;
2086 return isi.nr_extents;
2087}
2088EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2089#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002090
2091static loff_t
2092iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2093 void *data, struct iomap *iomap)
2094{
2095 sector_t *bno = data, addr;
2096
2097 if (iomap->type == IOMAP_MAPPED) {
2098 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2099 if (addr > INT_MAX)
2100 WARN(1, "would truncate bmap result\n");
2101 else
2102 *bno = addr;
2103 }
2104 return 0;
2105}
2106
2107/* legacy ->bmap interface. 0 is the error return (!) */
2108sector_t
2109iomap_bmap(struct address_space *mapping, sector_t bno,
2110 const struct iomap_ops *ops)
2111{
2112 struct inode *inode = mapping->host;
Eric Sandeen79b3dbe2018-08-02 13:09:27 -07002113 loff_t pos = bno << inode->i_blkbits;
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002114 unsigned blocksize = i_blocksize(inode);
2115
2116 if (filemap_write_and_wait(mapping))
2117 return 0;
2118
2119 bno = 0;
2120 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2121 return bno;
2122}
2123EXPORT_SYMBOL_GPL(iomap_bmap);