blob: f61d13dfdf09583403e8402962ce373d68c03d19 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig72b4daa2018-06-19 15:10:57 -07003 * Copyright (c) 2016-2018 Christoph Hellwig.
Christoph Hellwigae259a92016-06-21 09:23:11 +10004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070020#include <linux/migrate.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100021#include <linux/mm.h>
Christoph Hellwig72b4daa2018-06-19 15:10:57 -070022#include <linux/mm_inline.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100023#include <linux/swap.h>
24#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070025#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100026#include <linux/file.h>
27#include <linux/uio.h>
28#include <linux/backing-dev.h>
29#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110030#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100031#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010032#include <linux/sched/signal.h>
33
Christoph Hellwigae259a92016-06-21 09:23:11 +100034#include "internal.h"
35
Christoph Hellwigae259a92016-06-21 09:23:11 +100036/*
37 * Execute a iomap write on a segment of the mapping that spans a
38 * contiguous range of pages that have identical block mapping state.
39 *
40 * This avoids the need to map pages individually, do individual allocations
41 * for each page and most importantly avoid the need for filesystem specific
42 * locking per page. Instead, all the operations are amortised over the entire
43 * range of pages. It is assumed that the filesystems will lock whatever
44 * resources they require in the iomap_begin call, and release them in the
45 * iomap_end call.
46 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100047loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100048iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080049 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100050{
51 struct iomap iomap = { 0 };
52 loff_t written = 0, ret;
53
54 /*
55 * Need to map a range from start position for length bytes. This can
56 * span multiple pages - it is only guaranteed to return a range of a
57 * single type of pages (e.g. all into a hole, all mapped or all
58 * unwritten). Failure at this point has nothing to undo.
59 *
60 * If allocation is required for this range, reserve the space now so
61 * that the allocation is guaranteed to succeed later on. Once we copy
62 * the data into the page cache pages, then we cannot fail otherwise we
63 * expose transient stale data. If the reserve fails, we can safely
64 * back out at this point as there is nothing to undo.
65 */
66 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
67 if (ret)
68 return ret;
69 if (WARN_ON(iomap.offset > pos))
70 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080071 if (WARN_ON(iomap.length == 0))
72 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100073
74 /*
75 * Cut down the length to the one actually provided by the filesystem,
76 * as it might not be able to give us the whole size that we requested.
77 */
78 if (iomap.offset + iomap.length < pos + length)
79 length = iomap.offset + iomap.length - pos;
80
81 /*
82 * Now that we have guaranteed that the space allocation will succeed.
83 * we can do the copy-in page by page without having to worry about
84 * failures exposing transient data.
85 */
86 written = actor(inode, pos, length, data, &iomap);
87
88 /*
89 * Now the data has been copied, commit the range we've copied. This
90 * should not fail unless the filesystem has had a fatal error.
91 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100092 if (ops->iomap_end) {
93 ret = ops->iomap_end(inode, pos, length,
94 written > 0 ? written : 0,
95 flags, &iomap);
96 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100097
98 return written ? written : ret;
99}
100
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700101static sector_t
102iomap_sector(struct iomap *iomap, loff_t pos)
103{
104 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
105}
106
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700107static struct iomap_page *
108iomap_page_create(struct inode *inode, struct page *page)
109{
110 struct iomap_page *iop = to_iomap_page(page);
111
112 if (iop || i_blocksize(inode) == PAGE_SIZE)
113 return iop;
114
115 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
116 atomic_set(&iop->read_count, 0);
117 atomic_set(&iop->write_count, 0);
118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
119 set_page_private(page, (unsigned long)iop);
120 SetPagePrivate(page);
121 return iop;
122}
123
124static void
125iomap_page_release(struct page *page)
126{
127 struct iomap_page *iop = to_iomap_page(page);
128
129 if (!iop)
130 return;
131 WARN_ON_ONCE(atomic_read(&iop->read_count));
132 WARN_ON_ONCE(atomic_read(&iop->write_count));
133 ClearPagePrivate(page);
134 set_page_private(page, 0);
135 kfree(iop);
136}
137
138/*
139 * Calculate the range inside the page that we actually need to read.
140 */
141static void
142iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
143 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
144{
145 unsigned block_bits = inode->i_blkbits;
146 unsigned block_size = (1 << block_bits);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700147 unsigned poff = offset_in_page(*pos);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700148 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
149 unsigned first = poff >> block_bits;
150 unsigned last = (poff + plen - 1) >> block_bits;
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700151 unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700152
153 /*
154 * If the block size is smaller than the page size we need to check the
155 * per-block uptodate status and adjust the offset and length if needed
156 * to avoid reading in already uptodate ranges.
157 */
158 if (iop) {
159 unsigned int i;
160
161 /* move forward for each leading block marked uptodate */
162 for (i = first; i <= last; i++) {
163 if (!test_bit(i, iop->uptodate))
164 break;
165 *pos += block_size;
166 poff += block_size;
167 plen -= block_size;
168 first++;
169 }
170
171 /* truncate len if we find any trailing uptodate block(s) */
172 for ( ; i <= last; i++) {
173 if (test_bit(i, iop->uptodate)) {
174 plen -= (last - i + 1) * block_size;
175 last = i - 1;
176 break;
177 }
178 }
179 }
180
181 /*
182 * If the extent spans the block that contains the i_size we need to
183 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size.
185 */
186 if (first <= end && last > end)
187 plen -= (last - end) * block_size;
188
189 *offp = poff;
190 *lenp = plen;
191}
192
193static void
194iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
195{
196 struct iomap_page *iop = to_iomap_page(page);
197 struct inode *inode = page->mapping->host;
198 unsigned first = off >> inode->i_blkbits;
199 unsigned last = (off + len - 1) >> inode->i_blkbits;
200 unsigned int i;
201 bool uptodate = true;
202
203 if (iop) {
204 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
205 if (i >= first && i <= last)
206 set_bit(i, iop->uptodate);
207 else if (!test_bit(i, iop->uptodate))
208 uptodate = false;
209 }
210 }
211
212 if (uptodate && !PageError(page))
213 SetPageUptodate(page);
214}
215
216static void
217iomap_read_finish(struct iomap_page *iop, struct page *page)
218{
219 if (!iop || atomic_dec_and_test(&iop->read_count))
220 unlock_page(page);
221}
222
223static void
224iomap_read_page_end_io(struct bio_vec *bvec, int error)
225{
226 struct page *page = bvec->bv_page;
227 struct iomap_page *iop = to_iomap_page(page);
228
229 if (unlikely(error)) {
230 ClearPageUptodate(page);
231 SetPageError(page);
232 } else {
233 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
234 }
235
236 iomap_read_finish(iop, page);
237}
238
Christoph Hellwigae259a92016-06-21 09:23:11 +1000239static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700240iomap_read_inline_data(struct inode *inode, struct page *page,
241 struct iomap *iomap)
242{
243 size_t size = i_size_read(inode);
244 void *addr;
245
246 if (PageUptodate(page))
247 return;
248
249 BUG_ON(page->index);
250 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
251
252 addr = kmap_atomic(page);
253 memcpy(addr, iomap->inline_data, size);
254 memset(addr + size, 0, PAGE_SIZE - size);
255 kunmap_atomic(addr);
256 SetPageUptodate(page);
257}
258
259static void
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700260iomap_read_end_io(struct bio *bio)
261{
262 int error = blk_status_to_errno(bio->bi_status);
263 struct bio_vec *bvec;
264 int i;
265
266 bio_for_each_segment_all(bvec, bio, i)
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700267 iomap_read_page_end_io(bvec, error);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700268 bio_put(bio);
269}
270
271struct iomap_readpage_ctx {
272 struct page *cur_page;
273 bool cur_page_in_bio;
274 bool is_readahead;
275 struct bio *bio;
276 struct list_head *pages;
277};
278
279static loff_t
280iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
281 struct iomap *iomap)
282{
283 struct iomap_readpage_ctx *ctx = data;
284 struct page *page = ctx->cur_page;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700285 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700286 bool is_contig = false;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700287 loff_t orig_pos = pos;
288 unsigned poff, plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700289 sector_t sector;
290
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700291 if (iomap->type == IOMAP_INLINE) {
Darrick J. Wong7d5e0492018-08-10 17:55:57 -0700292 WARN_ON_ONCE(pos);
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700293 iomap_read_inline_data(inode, page, iomap);
294 return PAGE_SIZE;
295 }
296
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700297 /* zero post-eof blocks as the page may be mapped */
298 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
299 if (plen == 0)
300 goto done;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700301
302 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
303 zero_user(page, poff, plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700304 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700305 goto done;
306 }
307
308 ctx->cur_page_in_bio = true;
309
310 /*
311 * Try to merge into a previous segment if we can.
312 */
313 sector = iomap_sector(iomap, pos);
314 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
315 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
316 goto done;
317 is_contig = true;
318 }
319
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700320 /*
321 * If we start a new segment we need to increase the read count, and we
322 * need to do so before submitting any previous full bio to make sure
323 * that we don't prematurely unlock the page.
324 */
325 if (iop)
326 atomic_inc(&iop->read_count);
327
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700328 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
329 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
330 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
331
332 if (ctx->bio)
333 submit_bio(ctx->bio);
334
335 if (ctx->is_readahead) /* same as readahead_gfp_mask */
336 gfp |= __GFP_NORETRY | __GFP_NOWARN;
337 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
338 ctx->bio->bi_opf = REQ_OP_READ;
339 if (ctx->is_readahead)
340 ctx->bio->bi_opf |= REQ_RAHEAD;
341 ctx->bio->bi_iter.bi_sector = sector;
342 bio_set_dev(ctx->bio, iomap->bdev);
343 ctx->bio->bi_end_io = iomap_read_end_io;
344 }
345
346 __bio_add_page(ctx->bio, page, plen, poff);
347done:
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700348 /*
349 * Move the caller beyond our range so that it keeps making progress.
350 * For that we have to include any leading non-uptodate ranges, but
351 * we can skip trailing ones as they will be handled in the next
352 * iteration.
353 */
354 return pos - orig_pos + plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700355}
356
357int
358iomap_readpage(struct page *page, const struct iomap_ops *ops)
359{
360 struct iomap_readpage_ctx ctx = { .cur_page = page };
361 struct inode *inode = page->mapping->host;
362 unsigned poff;
363 loff_t ret;
364
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700365 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
366 ret = iomap_apply(inode, page_offset(page) + poff,
367 PAGE_SIZE - poff, 0, ops, &ctx,
368 iomap_readpage_actor);
369 if (ret <= 0) {
370 WARN_ON_ONCE(ret == 0);
371 SetPageError(page);
372 break;
373 }
374 }
375
376 if (ctx.bio) {
377 submit_bio(ctx.bio);
378 WARN_ON_ONCE(!ctx.cur_page_in_bio);
379 } else {
380 WARN_ON_ONCE(ctx.cur_page_in_bio);
381 unlock_page(page);
382 }
383
384 /*
385 * Just like mpage_readpages and block_read_full_page we always
386 * return 0 and just mark the page as PageError on errors. This
387 * should be cleaned up all through the stack eventually.
388 */
389 return 0;
390}
391EXPORT_SYMBOL_GPL(iomap_readpage);
392
393static struct page *
394iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
395 loff_t length, loff_t *done)
396{
397 while (!list_empty(pages)) {
398 struct page *page = lru_to_page(pages);
399
400 if (page_offset(page) >= (u64)pos + length)
401 break;
402
403 list_del(&page->lru);
404 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
405 GFP_NOFS))
406 return page;
407
408 /*
409 * If we already have a page in the page cache at index we are
410 * done. Upper layers don't care if it is uptodate after the
411 * readpages call itself as every page gets checked again once
412 * actually needed.
413 */
414 *done += PAGE_SIZE;
415 put_page(page);
416 }
417
418 return NULL;
419}
420
421static loff_t
422iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
423 void *data, struct iomap *iomap)
424{
425 struct iomap_readpage_ctx *ctx = data;
426 loff_t done, ret;
427
428 for (done = 0; done < length; done += ret) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700429 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700430 if (!ctx->cur_page_in_bio)
431 unlock_page(ctx->cur_page);
432 put_page(ctx->cur_page);
433 ctx->cur_page = NULL;
434 }
435 if (!ctx->cur_page) {
436 ctx->cur_page = iomap_next_page(inode, ctx->pages,
437 pos, length, &done);
438 if (!ctx->cur_page)
439 break;
440 ctx->cur_page_in_bio = false;
441 }
442 ret = iomap_readpage_actor(inode, pos + done, length - done,
443 ctx, iomap);
444 }
445
446 return done;
447}
448
449int
450iomap_readpages(struct address_space *mapping, struct list_head *pages,
451 unsigned nr_pages, const struct iomap_ops *ops)
452{
453 struct iomap_readpage_ctx ctx = {
454 .pages = pages,
455 .is_readahead = true,
456 };
457 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
458 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
459 loff_t length = last - pos + PAGE_SIZE, ret = 0;
460
461 while (length > 0) {
462 ret = iomap_apply(mapping->host, pos, length, 0, ops,
463 &ctx, iomap_readpages_actor);
464 if (ret <= 0) {
465 WARN_ON_ONCE(ret == 0);
466 goto done;
467 }
468 pos += ret;
469 length -= ret;
470 }
471 ret = 0;
472done:
473 if (ctx.bio)
474 submit_bio(ctx.bio);
475 if (ctx.cur_page) {
476 if (!ctx.cur_page_in_bio)
477 unlock_page(ctx.cur_page);
478 put_page(ctx.cur_page);
479 }
480
481 /*
482 * Check that we didn't lose a page due to the arcance calling
483 * conventions..
484 */
485 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
486 return ret;
487}
488EXPORT_SYMBOL_GPL(iomap_readpages);
489
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700490int
491iomap_is_partially_uptodate(struct page *page, unsigned long from,
492 unsigned long count)
493{
494 struct iomap_page *iop = to_iomap_page(page);
495 struct inode *inode = page->mapping->host;
496 unsigned first = from >> inode->i_blkbits;
497 unsigned last = (from + count - 1) >> inode->i_blkbits;
498 unsigned i;
499
500 if (iop) {
501 for (i = first; i <= last; i++)
502 if (!test_bit(i, iop->uptodate))
503 return 0;
504 return 1;
505 }
506
507 return 0;
508}
509EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
510
511int
512iomap_releasepage(struct page *page, gfp_t gfp_mask)
513{
514 /*
515 * mm accommodates an old ext3 case where clean pages might not have had
516 * the dirty bit cleared. Thus, it can send actual dirty pages to
517 * ->releasepage() via shrink_active_list(), skip those here.
518 */
519 if (PageDirty(page) || PageWriteback(page))
520 return 0;
521 iomap_page_release(page);
522 return 1;
523}
524EXPORT_SYMBOL_GPL(iomap_releasepage);
525
526void
527iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
528{
529 /*
530 * If we are invalidating the entire page, clear the dirty state from it
531 * and release it to avoid unnecessary buildup of the LRU.
532 */
533 if (offset == 0 && len == PAGE_SIZE) {
534 WARN_ON_ONCE(PageWriteback(page));
535 cancel_dirty_page(page);
536 iomap_page_release(page);
537 }
538}
539EXPORT_SYMBOL_GPL(iomap_invalidatepage);
540
541#ifdef CONFIG_MIGRATION
542int
543iomap_migrate_page(struct address_space *mapping, struct page *newpage,
544 struct page *page, enum migrate_mode mode)
545{
546 int ret;
547
548 ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
549 if (ret != MIGRATEPAGE_SUCCESS)
550 return ret;
551
552 if (page_has_private(page)) {
553 ClearPagePrivate(page);
554 set_page_private(newpage, page_private(page));
555 set_page_private(page, 0);
556 SetPagePrivate(newpage);
557 }
558
559 if (mode != MIGRATE_SYNC_NO_COPY)
560 migrate_page_copy(newpage, page);
561 else
562 migrate_page_states(newpage, page);
563 return MIGRATEPAGE_SUCCESS;
564}
565EXPORT_SYMBOL_GPL(iomap_migrate_page);
566#endif /* CONFIG_MIGRATION */
567
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700568static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000569iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
570{
571 loff_t i_size = i_size_read(inode);
572
573 /*
574 * Only truncate newly allocated pages beyoned EOF, even if the
575 * write started inside the existing inode size.
576 */
577 if (pos + len > i_size)
578 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
579}
580
581static int
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700582iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
583 unsigned poff, unsigned plen, unsigned from, unsigned to,
584 struct iomap *iomap)
585{
586 struct bio_vec bvec;
587 struct bio bio;
588
589 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
590 zero_user_segments(page, poff, from, to, poff + plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700591 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700592 return 0;
593 }
594
595 bio_init(&bio, &bvec, 1);
596 bio.bi_opf = REQ_OP_READ;
597 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
598 bio_set_dev(&bio, iomap->bdev);
599 __bio_add_page(&bio, page, plen, poff);
600 return submit_bio_wait(&bio);
601}
602
603static int
604__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
605 struct page *page, struct iomap *iomap)
606{
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700607 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700608 loff_t block_size = i_blocksize(inode);
609 loff_t block_start = pos & ~(block_size - 1);
610 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700611 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700612 int status = 0;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700613
614 if (PageUptodate(page))
615 return 0;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700616
617 do {
618 iomap_adjust_read_range(inode, iop, &block_start,
619 block_end - block_start, &poff, &plen);
620 if (plen == 0)
621 break;
622
623 if ((from > poff && from < poff + plen) ||
624 (to > poff && to < poff + plen)) {
625 status = iomap_read_page_sync(inode, block_start, page,
626 poff, plen, from, to, iomap);
627 if (status)
628 break;
629 }
630
631 } while ((block_start += plen) < block_end);
632
633 return status;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700634}
635
636static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000637iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
638 struct page **pagep, struct iomap *iomap)
639{
640 pgoff_t index = pos >> PAGE_SHIFT;
641 struct page *page;
642 int status = 0;
643
644 BUG_ON(pos + len > iomap->offset + iomap->length);
645
Michal Hockod1908f52017-02-03 13:13:26 -0800646 if (fatal_signal_pending(current))
647 return -EINTR;
648
Christoph Hellwigae259a92016-06-21 09:23:11 +1000649 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
650 if (!page)
651 return -ENOMEM;
652
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700653 if (iomap->type == IOMAP_INLINE)
654 iomap_read_inline_data(inode, page, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700655 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700656 status = __block_write_begin_int(page, pos, len, NULL, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700657 else
658 status = __iomap_write_begin(inode, pos, len, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000659 if (unlikely(status)) {
660 unlock_page(page);
661 put_page(page);
662 page = NULL;
663
664 iomap_write_failed(inode, pos, len);
665 }
666
667 *pagep = page;
668 return status;
669}
670
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700671int
672iomap_set_page_dirty(struct page *page)
673{
674 struct address_space *mapping = page_mapping(page);
675 int newly_dirty;
676
677 if (unlikely(!mapping))
678 return !TestSetPageDirty(page);
679
680 /*
681 * Lock out page->mem_cgroup migration to keep PageDirty
682 * synchronized with per-memcg dirty page counters.
683 */
684 lock_page_memcg(page);
685 newly_dirty = !TestSetPageDirty(page);
686 if (newly_dirty)
687 __set_page_dirty(page, mapping, 0);
688 unlock_page_memcg(page);
689
690 if (newly_dirty)
691 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
692 return newly_dirty;
693}
694EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
695
696static int
697__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
698 unsigned copied, struct page *page, struct iomap *iomap)
699{
700 flush_dcache_page(page);
701
702 /*
703 * The blocks that were entirely written will now be uptodate, so we
704 * don't have to worry about a readpage reading them and overwriting a
705 * partial write. However if we have encountered a short write and only
706 * partially written into a block, it will not be marked uptodate, so a
707 * readpage might come in and destroy our partial write.
708 *
709 * Do the simplest thing, and just treat any short write to a non
710 * uptodate page as a zero-length write, and force the caller to redo
711 * the whole thing.
712 */
713 if (unlikely(copied < len && !PageUptodate(page))) {
714 copied = 0;
715 } else {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700716 iomap_set_range_uptodate(page, offset_in_page(pos), len);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700717 iomap_set_page_dirty(page);
718 }
719 return __generic_write_end(inode, pos, copied, page);
720}
721
Christoph Hellwigae259a92016-06-21 09:23:11 +1000722static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700723iomap_write_end_inline(struct inode *inode, struct page *page,
724 struct iomap *iomap, loff_t pos, unsigned copied)
725{
726 void *addr;
727
728 WARN_ON_ONCE(!PageUptodate(page));
729 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
730
731 addr = kmap_atomic(page);
732 memcpy(iomap->inline_data + pos, addr + pos, copied);
733 kunmap_atomic(addr);
734
735 mark_inode_dirty(inode);
736 __generic_write_end(inode, pos, copied, page);
737 return copied;
738}
739
Christoph Hellwigae259a92016-06-21 09:23:11 +1000740static int
741iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700742 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000743{
744 int ret;
745
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700746 if (iomap->type == IOMAP_INLINE) {
747 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700748 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700749 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
750 copied, page, NULL);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700751 } else {
752 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700753 }
754
Christoph Hellwig63899c62018-06-19 15:10:56 -0700755 if (iomap->page_done)
756 iomap->page_done(inode, pos, copied, page, iomap);
757
Christoph Hellwigae259a92016-06-21 09:23:11 +1000758 if (ret < len)
759 iomap_write_failed(inode, pos, len);
760 return ret;
761}
762
763static loff_t
764iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
765 struct iomap *iomap)
766{
767 struct iov_iter *i = data;
768 long status = 0;
769 ssize_t written = 0;
770 unsigned int flags = AOP_FLAG_NOFS;
771
Christoph Hellwigae259a92016-06-21 09:23:11 +1000772 do {
773 struct page *page;
774 unsigned long offset; /* Offset into pagecache page */
775 unsigned long bytes; /* Bytes to write to page */
776 size_t copied; /* Bytes copied from user */
777
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700778 offset = offset_in_page(pos);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000779 bytes = min_t(unsigned long, PAGE_SIZE - offset,
780 iov_iter_count(i));
781again:
782 if (bytes > length)
783 bytes = length;
784
785 /*
786 * Bring in the user page that we will copy from _first_.
787 * Otherwise there's a nasty deadlock on copying from the
788 * same page as we're writing to, without it being marked
789 * up-to-date.
790 *
791 * Not only is this an optimisation, but it is also required
792 * to check that the address is actually valid, when atomic
793 * usercopies are used, below.
794 */
795 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
796 status = -EFAULT;
797 break;
798 }
799
800 status = iomap_write_begin(inode, pos, bytes, flags, &page,
801 iomap);
802 if (unlikely(status))
803 break;
804
805 if (mapping_writably_mapped(inode->i_mapping))
806 flush_dcache_page(page);
807
Christoph Hellwigae259a92016-06-21 09:23:11 +1000808 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000809
810 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000811
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700812 status = iomap_write_end(inode, pos, bytes, copied, page,
813 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000814 if (unlikely(status < 0))
815 break;
816 copied = status;
817
818 cond_resched();
819
820 iov_iter_advance(i, copied);
821 if (unlikely(copied == 0)) {
822 /*
823 * If we were unable to copy any data at all, we must
824 * fall back to a single segment length write.
825 *
826 * If we didn't fallback here, we could livelock
827 * because not all segments in the iov can be copied at
828 * once without a pagefault.
829 */
830 bytes = min_t(unsigned long, PAGE_SIZE - offset,
831 iov_iter_single_seg_count(i));
832 goto again;
833 }
834 pos += copied;
835 written += copied;
836 length -= copied;
837
838 balance_dirty_pages_ratelimited(inode->i_mapping);
839 } while (iov_iter_count(i) && length);
840
841 return written ? written : status;
842}
843
844ssize_t
845iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800846 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000847{
848 struct inode *inode = iocb->ki_filp->f_mapping->host;
849 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
850
851 while (iov_iter_count(iter)) {
852 ret = iomap_apply(inode, pos, iov_iter_count(iter),
853 IOMAP_WRITE, ops, iter, iomap_write_actor);
854 if (ret <= 0)
855 break;
856 pos += ret;
857 written += ret;
858 }
859
860 return written ? written : ret;
861}
862EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
863
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000864static struct page *
865__iomap_read_page(struct inode *inode, loff_t offset)
866{
867 struct address_space *mapping = inode->i_mapping;
868 struct page *page;
869
870 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
871 if (IS_ERR(page))
872 return page;
873 if (!PageUptodate(page)) {
874 put_page(page);
875 return ERR_PTR(-EIO);
876 }
877 return page;
878}
879
880static loff_t
881iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
882 struct iomap *iomap)
883{
884 long status = 0;
885 ssize_t written = 0;
886
887 do {
888 struct page *page, *rpage;
889 unsigned long offset; /* Offset into pagecache page */
890 unsigned long bytes; /* Bytes to write to page */
891
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700892 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700893 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000894
895 rpage = __iomap_read_page(inode, pos);
896 if (IS_ERR(rpage))
897 return PTR_ERR(rpage);
898
899 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700900 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000901 put_page(rpage);
902 if (unlikely(status))
903 return status;
904
905 WARN_ON_ONCE(!PageUptodate(page));
906
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700907 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000908 if (unlikely(status <= 0)) {
909 if (WARN_ON_ONCE(status == 0))
910 return -EIO;
911 return status;
912 }
913
914 cond_resched();
915
916 pos += status;
917 written += status;
918 length -= status;
919
920 balance_dirty_pages_ratelimited(inode->i_mapping);
921 } while (length);
922
923 return written;
924}
925
926int
927iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800928 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000929{
930 loff_t ret;
931
932 while (len) {
933 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
934 iomap_dirty_actor);
935 if (ret <= 0)
936 return ret;
937 pos += ret;
938 len -= ret;
939 }
940
941 return 0;
942}
943EXPORT_SYMBOL_GPL(iomap_file_dirty);
944
Christoph Hellwigae259a92016-06-21 09:23:11 +1000945static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
946 unsigned bytes, struct iomap *iomap)
947{
948 struct page *page;
949 int status;
950
Tetsuo Handac718a972017-05-08 15:58:59 -0700951 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
952 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000953 if (status)
954 return status;
955
956 zero_user(page, offset, bytes);
957 mark_page_accessed(page);
958
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700959 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000960}
961
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000962static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
963 struct iomap *iomap)
964{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700965 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
966 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000967}
968
Christoph Hellwigae259a92016-06-21 09:23:11 +1000969static loff_t
970iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
971 void *data, struct iomap *iomap)
972{
973 bool *did_zero = data;
974 loff_t written = 0;
975 int status;
976
977 /* already zeroed? we're done. */
978 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
979 return count;
980
981 do {
982 unsigned offset, bytes;
983
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700984 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700985 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000986
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000987 if (IS_DAX(inode))
988 status = iomap_dax_zero(pos, offset, bytes, iomap);
989 else
990 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000991 if (status < 0)
992 return status;
993
994 pos += bytes;
995 count -= bytes;
996 written += bytes;
997 if (did_zero)
998 *did_zero = true;
999 } while (count > 0);
1000
1001 return written;
1002}
1003
1004int
1005iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001006 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001007{
1008 loff_t ret;
1009
1010 while (len > 0) {
1011 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1012 ops, did_zero, iomap_zero_range_actor);
1013 if (ret <= 0)
1014 return ret;
1015
1016 pos += ret;
1017 len -= ret;
1018 }
1019
1020 return 0;
1021}
1022EXPORT_SYMBOL_GPL(iomap_zero_range);
1023
1024int
1025iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001026 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001027{
Fabian Frederick93407472017-02-27 14:28:32 -08001028 unsigned int blocksize = i_blocksize(inode);
1029 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001030
1031 /* Block boundary? Nothing to do */
1032 if (!off)
1033 return 0;
1034 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1035}
1036EXPORT_SYMBOL_GPL(iomap_truncate_page);
1037
1038static loff_t
1039iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1040 void *data, struct iomap *iomap)
1041{
1042 struct page *page = data;
1043 int ret;
1044
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001045 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1046 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1047 if (ret)
1048 return ret;
1049 block_commit_write(page, 0, length);
1050 } else {
1051 WARN_ON_ONCE(!PageUptodate(page));
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07001052 iomap_page_create(inode, page);
Brian Foster561295a2018-09-29 13:51:01 +10001053 set_page_dirty(page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001054 }
Christoph Hellwigae259a92016-06-21 09:23:11 +10001055
Christoph Hellwigae259a92016-06-21 09:23:11 +10001056 return length;
1057}
1058
Souptick Joarder5780a022018-10-26 15:02:59 -07001059vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001060{
1061 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -08001062 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001063 unsigned long length;
1064 loff_t offset, size;
1065 ssize_t ret;
1066
1067 lock_page(page);
1068 size = i_size_read(inode);
1069 if ((page->mapping != inode->i_mapping) ||
1070 (page_offset(page) > size)) {
1071 /* We overload EFAULT to mean page got truncated */
1072 ret = -EFAULT;
1073 goto out_unlock;
1074 }
1075
1076 /* page is wholly or partially inside EOF */
1077 if (((page->index + 1) << PAGE_SHIFT) > size)
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001078 length = offset_in_page(size);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001079 else
1080 length = PAGE_SIZE;
1081
1082 offset = page_offset(page);
1083 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +11001084 ret = iomap_apply(inode, offset, length,
1085 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1086 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001087 if (unlikely(ret <= 0))
1088 goto out_unlock;
1089 offset += ret;
1090 length -= ret;
1091 }
1092
Christoph Hellwigae259a92016-06-21 09:23:11 +10001093 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001094 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +10001095out_unlock:
1096 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001097 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001098}
1099EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001100
1101struct fiemap_ctx {
1102 struct fiemap_extent_info *fi;
1103 struct iomap prev;
1104};
1105
1106static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1107 struct iomap *iomap, u32 flags)
1108{
1109 switch (iomap->type) {
1110 case IOMAP_HOLE:
1111 /* skip holes */
1112 return 0;
1113 case IOMAP_DELALLOC:
1114 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1115 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001116 case IOMAP_MAPPED:
1117 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001118 case IOMAP_UNWRITTEN:
1119 flags |= FIEMAP_EXTENT_UNWRITTEN;
1120 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001121 case IOMAP_INLINE:
1122 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001123 break;
1124 }
1125
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001126 if (iomap->flags & IOMAP_F_MERGED)
1127 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +10001128 if (iomap->flags & IOMAP_F_SHARED)
1129 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001130
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001131 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -04001132 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001133 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001134}
1135
1136static loff_t
1137iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1138 struct iomap *iomap)
1139{
1140 struct fiemap_ctx *ctx = data;
1141 loff_t ret = length;
1142
1143 if (iomap->type == IOMAP_HOLE)
1144 return length;
1145
1146 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1147 ctx->prev = *iomap;
1148 switch (ret) {
1149 case 0: /* success */
1150 return length;
1151 case 1: /* extent array full */
1152 return 0;
1153 default:
1154 return ret;
1155 }
1156}
1157
1158int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001159 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001160{
1161 struct fiemap_ctx ctx;
1162 loff_t ret;
1163
1164 memset(&ctx, 0, sizeof(ctx));
1165 ctx.fi = fi;
1166 ctx.prev.type = IOMAP_HOLE;
1167
1168 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1169 if (ret)
1170 return ret;
1171
Dave Chinner8896b8f2016-08-17 08:41:10 +10001172 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1173 ret = filemap_write_and_wait(inode->i_mapping);
1174 if (ret)
1175 return ret;
1176 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001177
1178 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +11001179 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001180 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +10001181 /* inode with no (attribute) mapping will give ENOENT */
1182 if (ret == -ENOENT)
1183 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001184 if (ret < 0)
1185 return ret;
1186 if (ret == 0)
1187 break;
1188
1189 start += ret;
1190 len -= ret;
1191 }
1192
1193 if (ctx.prev.type != IOMAP_HOLE) {
1194 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1195 if (ret < 0)
1196 return ret;
1197 }
1198
1199 return 0;
1200}
1201EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001202
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001203/*
1204 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001205 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001206 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001207static bool
1208page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1209 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001210{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001211 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1212 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001213 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001214 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001215
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001216 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1217 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001218
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001219 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001220 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001221 * Last offset smaller than the start of the page means we found
1222 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001223 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001224 if (whence == SEEK_HOLE)
1225 return true;
1226 *lastoff = poff;
1227 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001228
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001229 /*
1230 * Just check the page unless we can and should check block ranges:
1231 */
1232 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1233 return PageUptodate(page) == seek_data;
1234
1235 lock_page(page);
1236 if (unlikely(page->mapping != inode->i_mapping))
1237 goto out_unlock_not_found;
1238
1239 for (off = 0; off < PAGE_SIZE; off += bsize) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001240 if (offset_in_page(*lastoff) >= off + bsize)
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001241 continue;
1242 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1243 unlock_page(page);
1244 return true;
1245 }
1246 *lastoff = poff + off + bsize;
1247 }
1248
1249out_unlock_not_found:
1250 unlock_page(page);
1251 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001252}
1253
1254/*
1255 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1256 *
1257 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -07001258 * and which are data: uptodate buffer heads count as data; everything else
1259 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001260 *
1261 * Returns the resulting offset on successs, and -ENOENT otherwise.
1262 */
1263static loff_t
1264page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1265 int whence)
1266{
1267 pgoff_t index = offset >> PAGE_SHIFT;
1268 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1269 loff_t lastoff = offset;
1270 struct pagevec pvec;
1271
1272 if (length <= 0)
1273 return -ENOENT;
1274
1275 pagevec_init(&pvec);
1276
1277 do {
1278 unsigned nr_pages, i;
1279
1280 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1281 end - 1);
1282 if (nr_pages == 0)
1283 break;
1284
1285 for (i = 0; i < nr_pages; i++) {
1286 struct page *page = pvec.pages[i];
1287
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001288 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001289 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001290 lastoff = page_offset(page) + PAGE_SIZE;
1291 }
1292 pagevec_release(&pvec);
1293 } while (index < end);
1294
1295 /* When no page at lastoff and we are not done, we found a hole. */
1296 if (whence != SEEK_HOLE)
1297 goto not_found;
1298
1299check_range:
1300 if (lastoff < offset + length)
1301 goto out;
1302not_found:
1303 lastoff = -ENOENT;
1304out:
1305 pagevec_release(&pvec);
1306 return lastoff;
1307}
1308
1309
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001310static loff_t
1311iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1312 void *data, struct iomap *iomap)
1313{
1314 switch (iomap->type) {
1315 case IOMAP_UNWRITTEN:
1316 offset = page_cache_seek_hole_data(inode, offset, length,
1317 SEEK_HOLE);
1318 if (offset < 0)
1319 return length;
1320 /* fall through */
1321 case IOMAP_HOLE:
1322 *(loff_t *)data = offset;
1323 return 0;
1324 default:
1325 return length;
1326 }
1327}
1328
1329loff_t
1330iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1331{
1332 loff_t size = i_size_read(inode);
1333 loff_t length = size - offset;
1334 loff_t ret;
1335
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001336 /* Nothing to be found before or beyond the end of the file. */
1337 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001338 return -ENXIO;
1339
1340 while (length > 0) {
1341 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1342 &offset, iomap_seek_hole_actor);
1343 if (ret < 0)
1344 return ret;
1345 if (ret == 0)
1346 break;
1347
1348 offset += ret;
1349 length -= ret;
1350 }
1351
1352 return offset;
1353}
1354EXPORT_SYMBOL_GPL(iomap_seek_hole);
1355
1356static loff_t
1357iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1358 void *data, struct iomap *iomap)
1359{
1360 switch (iomap->type) {
1361 case IOMAP_HOLE:
1362 return length;
1363 case IOMAP_UNWRITTEN:
1364 offset = page_cache_seek_hole_data(inode, offset, length,
1365 SEEK_DATA);
1366 if (offset < 0)
1367 return length;
1368 /*FALLTHRU*/
1369 default:
1370 *(loff_t *)data = offset;
1371 return 0;
1372 }
1373}
1374
1375loff_t
1376iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1377{
1378 loff_t size = i_size_read(inode);
1379 loff_t length = size - offset;
1380 loff_t ret;
1381
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001382 /* Nothing to be found before or beyond the end of the file. */
1383 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001384 return -ENXIO;
1385
1386 while (length > 0) {
1387 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1388 &offset, iomap_seek_data_actor);
1389 if (ret < 0)
1390 return ret;
1391 if (ret == 0)
1392 break;
1393
1394 offset += ret;
1395 length -= ret;
1396 }
1397
1398 if (length <= 0)
1399 return -ENXIO;
1400 return offset;
1401}
1402EXPORT_SYMBOL_GPL(iomap_seek_data);
1403
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001404/*
1405 * Private flags for iomap_dio, must not overlap with the public ones in
1406 * iomap.h:
1407 */
Dave Chinner3460cac2018-05-02 12:54:53 -07001408#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -07001409#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001410#define IOMAP_DIO_WRITE (1 << 30)
1411#define IOMAP_DIO_DIRTY (1 << 31)
1412
1413struct iomap_dio {
1414 struct kiocb *iocb;
1415 iomap_dio_end_io_t *end_io;
1416 loff_t i_size;
1417 loff_t size;
1418 atomic_t ref;
1419 unsigned flags;
1420 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001421 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001422
1423 union {
1424 /* used during submission and for synchronous completion: */
1425 struct {
1426 struct iov_iter *iter;
1427 struct task_struct *waiter;
1428 struct request_queue *last_queue;
1429 blk_qc_t cookie;
1430 } submit;
1431
1432 /* used for aio completion: */
1433 struct {
1434 struct work_struct work;
1435 } aio;
1436 };
1437};
1438
1439static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1440{
1441 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -06001442 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001443 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001444 ssize_t ret;
1445
1446 if (dio->end_io) {
1447 ret = dio->end_io(iocb,
1448 dio->error ? dio->error : dio->size,
1449 dio->flags);
1450 } else {
1451 ret = dio->error;
1452 }
1453
1454 if (likely(!ret)) {
1455 ret = dio->size;
1456 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -07001457 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001458 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -07001459 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001460 iocb->ki_pos += ret;
1461 }
1462
Eryu Guan5e25c262017-10-13 09:47:46 -07001463 /*
1464 * Try again to invalidate clean pages which might have been cached by
1465 * non-direct readahead, or faulted in by get_user_pages() if the source
1466 * of the write was an mmap'ed region of the file we're writing. Either
1467 * one is a pretty crazy thing to do, so we don't support it 100%. If
1468 * this invalidation fails, tough, the write still worked...
1469 *
1470 * And this page cache invalidation has to be after dio->end_io(), as
1471 * some filesystems convert unwritten extents to real allocations in
1472 * end_io() when necessary, otherwise a racing buffer read would cache
1473 * zeros from unwritten extents.
1474 */
1475 if (!dio->error &&
1476 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1477 int err;
1478 err = invalidate_inode_pages2_range(inode->i_mapping,
1479 offset >> PAGE_SHIFT,
1480 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001481 if (err)
1482 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001483 }
1484
Dave Chinner4f8ff442018-05-02 12:54:52 -07001485 /*
1486 * If this is a DSYNC write, make sure we push it to stable storage now
1487 * that we've written data.
1488 */
1489 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1490 ret = generic_write_sync(iocb, ret);
1491
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001492 inode_dio_end(file_inode(iocb->ki_filp));
1493 kfree(dio);
1494
1495 return ret;
1496}
1497
1498static void iomap_dio_complete_work(struct work_struct *work)
1499{
1500 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1501 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001502
Dave Chinner4f8ff442018-05-02 12:54:52 -07001503 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001504}
1505
1506/*
1507 * Set an error in the dio if none is set yet. We have to use cmpxchg
1508 * as the submission context and the completion context(s) can race to
1509 * update the error.
1510 */
1511static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1512{
1513 cmpxchg(&dio->error, 0, ret);
1514}
1515
1516static void iomap_dio_bio_end_io(struct bio *bio)
1517{
1518 struct iomap_dio *dio = bio->bi_private;
1519 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1520
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001521 if (bio->bi_status)
1522 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001523
1524 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001525 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001526 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001527 WRITE_ONCE(dio->submit.waiter, NULL);
1528 wake_up_process(waiter);
1529 } else if (dio->flags & IOMAP_DIO_WRITE) {
1530 struct inode *inode = file_inode(dio->iocb->ki_filp);
1531
1532 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1533 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1534 } else {
1535 iomap_dio_complete_work(&dio->aio.work);
1536 }
1537 }
1538
1539 if (should_dirty) {
1540 bio_check_pages_dirty(bio);
1541 } else {
1542 struct bio_vec *bvec;
1543 int i;
1544
1545 bio_for_each_segment_all(bvec, bio, i)
1546 put_page(bvec->bv_page);
1547 bio_put(bio);
1548 }
1549}
1550
1551static blk_qc_t
1552iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1553 unsigned len)
1554{
1555 struct page *page = ZERO_PAGE(0);
Jens Axboed1e36282018-08-29 10:36:56 -06001556 int flags = REQ_SYNC | REQ_IDLE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001557 struct bio *bio;
1558
1559 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001560 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001561 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001562 bio->bi_private = dio;
1563 bio->bi_end_io = iomap_dio_bio_end_io;
1564
Jens Axboed1e36282018-08-29 10:36:56 -06001565 if (dio->iocb->ki_flags & IOCB_HIPRI)
1566 flags |= REQ_HIPRI;
1567
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001568 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001569 __bio_add_page(bio, page, len, 0);
Jens Axboed1e36282018-08-29 10:36:56 -06001570 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001571
1572 atomic_inc(&dio->ref);
1573 return submit_bio(bio);
1574}
1575
1576static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001577iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1578 struct iomap_dio *dio, struct iomap *iomap)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001579{
Fabian Frederick93407472017-02-27 14:28:32 -08001580 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1581 unsigned int fs_block_size = i_blocksize(inode), pad;
1582 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001583 struct iov_iter iter;
1584 struct bio *bio;
1585 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001586 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001587 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +01001588 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001589
1590 if ((pos | length | align) & ((1 << blkbits) - 1))
1591 return -EINVAL;
1592
Christoph Hellwig09230432018-07-03 09:07:46 -07001593 if (iomap->type == IOMAP_UNWRITTEN) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001594 dio->flags |= IOMAP_DIO_UNWRITTEN;
1595 need_zeroout = true;
Christoph Hellwig09230432018-07-03 09:07:46 -07001596 }
1597
1598 if (iomap->flags & IOMAP_F_SHARED)
1599 dio->flags |= IOMAP_DIO_COW;
1600
1601 if (iomap->flags & IOMAP_F_NEW) {
1602 need_zeroout = true;
1603 } else {
1604 /*
1605 * Use a FUA write if we need datasync semantics, this
1606 * is a pure data IO that doesn't require any metadata
1607 * updates and the underlying device supports FUA. This
1608 * allows us to avoid cache flushes on IO completion.
1609 */
1610 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1611 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1612 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1613 use_fua = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001614 }
1615
1616 /*
1617 * Operate on a partial iter trimmed to the extent we were called for.
1618 * We'll update the iter in the dio once we're done with this extent.
1619 */
1620 iter = *dio->submit.iter;
1621 iov_iter_truncate(&iter, length);
1622
1623 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1624 if (nr_pages <= 0)
1625 return nr_pages;
1626
1627 if (need_zeroout) {
1628 /* zero out from the start of the block to the write offset */
1629 pad = pos & (fs_block_size - 1);
1630 if (pad)
1631 iomap_dio_zero(dio, iomap, pos - pad, pad);
1632 }
1633
1634 do {
Al Virocfe057f2017-09-11 21:17:09 +01001635 size_t n;
1636 if (dio->error) {
1637 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001638 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001639 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001640
1641 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001642 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001643 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001644 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001645 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001646 bio->bi_private = dio;
1647 bio->bi_end_io = iomap_dio_bio_end_io;
1648
1649 ret = bio_iov_iter_get_pages(bio, &iter);
1650 if (unlikely(ret)) {
1651 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +01001652 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001653 }
1654
Al Virocfe057f2017-09-11 21:17:09 +01001655 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001656 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001657 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1658 if (use_fua)
1659 bio->bi_opf |= REQ_FUA;
1660 else
1661 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001662 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001663 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001664 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001665 if (dio->flags & IOMAP_DIO_DIRTY)
1666 bio_set_pages_dirty(bio);
1667 }
1668
Jens Axboed1e36282018-08-29 10:36:56 -06001669 if (dio->iocb->ki_flags & IOCB_HIPRI)
1670 bio->bi_opf |= REQ_HIPRI;
1671
Al Virocfe057f2017-09-11 21:17:09 +01001672 iov_iter_advance(dio->submit.iter, n);
1673
1674 dio->size += n;
1675 pos += n;
1676 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001677
1678 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1679
1680 atomic_inc(&dio->ref);
1681
1682 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1683 dio->submit.cookie = submit_bio(bio);
1684 } while (nr_pages);
1685
1686 if (need_zeroout) {
1687 /* zero out from the end of the write to the end of the block */
1688 pad = pos & (fs_block_size - 1);
1689 if (pad)
1690 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1691 }
Al Virocfe057f2017-09-11 21:17:09 +01001692 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001693}
1694
Christoph Hellwig09230432018-07-03 09:07:46 -07001695static loff_t
1696iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1697{
1698 length = iov_iter_zero(length, dio->submit.iter);
1699 dio->size += length;
1700 return length;
1701}
1702
1703static loff_t
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001704iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1705 struct iomap_dio *dio, struct iomap *iomap)
1706{
1707 struct iov_iter *iter = dio->submit.iter;
1708 size_t copied;
1709
1710 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1711
1712 if (dio->flags & IOMAP_DIO_WRITE) {
1713 loff_t size = inode->i_size;
1714
1715 if (pos > size)
1716 memset(iomap->inline_data + size, 0, pos - size);
1717 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1718 if (copied) {
1719 if (pos + copied > size)
1720 i_size_write(inode, pos + copied);
1721 mark_inode_dirty(inode);
1722 }
1723 } else {
1724 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1725 }
1726 dio->size += copied;
1727 return copied;
1728}
1729
1730static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001731iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1732 void *data, struct iomap *iomap)
1733{
1734 struct iomap_dio *dio = data;
1735
1736 switch (iomap->type) {
1737 case IOMAP_HOLE:
1738 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1739 return -EIO;
1740 return iomap_dio_hole_actor(length, dio);
1741 case IOMAP_UNWRITTEN:
1742 if (!(dio->flags & IOMAP_DIO_WRITE))
1743 return iomap_dio_hole_actor(length, dio);
1744 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1745 case IOMAP_MAPPED:
1746 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001747 case IOMAP_INLINE:
1748 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
Christoph Hellwig09230432018-07-03 09:07:46 -07001749 default:
1750 WARN_ON_ONCE(1);
1751 return -EIO;
1752 }
1753}
1754
Dave Chinner4f8ff442018-05-02 12:54:52 -07001755/*
1756 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001757 * is being issued as AIO or not. This allows us to optimise pure data writes
1758 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1759 * REQ_FLUSH post write. This is slightly tricky because a single request here
1760 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1761 * may be pure data writes. In that case, we still need to do a full data sync
1762 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001763 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001764ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001765iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1766 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001767{
1768 struct address_space *mapping = iocb->ki_filp->f_mapping;
1769 struct inode *inode = file_inode(iocb->ki_filp);
1770 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001771 loff_t pos = iocb->ki_pos, start = pos;
1772 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001773 unsigned int flags = IOMAP_DIRECT;
1774 struct blk_plug plug;
1775 struct iomap_dio *dio;
1776
1777 lockdep_assert_held(&inode->i_rwsem);
1778
1779 if (!count)
1780 return 0;
1781
1782 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1783 if (!dio)
1784 return -ENOMEM;
1785
1786 dio->iocb = iocb;
1787 atomic_set(&dio->ref, 1);
1788 dio->size = 0;
1789 dio->i_size = i_size_read(inode);
1790 dio->end_io = end_io;
1791 dio->error = 0;
1792 dio->flags = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001793 dio->wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001794
1795 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001796 dio->submit.waiter = current;
1797 dio->submit.cookie = BLK_QC_T_NONE;
1798 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001799
1800 if (iov_iter_rw(iter) == READ) {
1801 if (pos >= dio->i_size)
1802 goto out_free_dio;
1803
David Howells00e23702018-10-22 13:07:28 +01001804 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001805 dio->flags |= IOMAP_DIO_DIRTY;
1806 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001807 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001808 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001809
1810 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001811 if (iocb->ki_flags & IOCB_DSYNC)
1812 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001813
1814 /*
1815 * For datasync only writes, we optimistically try using FUA for
1816 * this IO. Any non-FUA write that occurs will clear this flag,
1817 * hence we know before completion whether a cache flush is
1818 * necessary.
1819 */
1820 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1821 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001822 }
1823
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001824 if (iocb->ki_flags & IOCB_NOWAIT) {
1825 if (filemap_range_has_page(mapping, start, end)) {
1826 ret = -EAGAIN;
1827 goto out_free_dio;
1828 }
1829 flags |= IOMAP_NOWAIT;
1830 }
1831
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001832 ret = filemap_write_and_wait_range(mapping, start, end);
1833 if (ret)
1834 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001835
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001836 /*
1837 * Try to invalidate cache pages for the range we're direct
1838 * writing. If this invalidation fails, tough, the write will
1839 * still work, but racing two incompatible write paths is a
1840 * pretty crazy thing to do, so we don't support it 100%.
1841 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001842 ret = invalidate_inode_pages2_range(mapping,
1843 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001844 if (ret)
1845 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001846 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001847
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001848 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001849 !inode->i_sb->s_dio_done_wq) {
1850 ret = sb_init_dio_done_wq(inode->i_sb);
1851 if (ret < 0)
1852 goto out_free_dio;
1853 }
1854
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001855 inode_dio_begin(inode);
1856
1857 blk_start_plug(&plug);
1858 do {
1859 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1860 iomap_dio_actor);
1861 if (ret <= 0) {
1862 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001863 if (ret == -ENOTBLK) {
1864 dio->wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001865 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001866 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001867 break;
1868 }
1869 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001870
1871 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1872 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001873 } while ((count = iov_iter_count(iter)) > 0);
1874 blk_finish_plug(&plug);
1875
1876 if (ret < 0)
1877 iomap_dio_set_error(dio, ret);
1878
Dave Chinner3460cac2018-05-02 12:54:53 -07001879 /*
1880 * If all the writes we issued were FUA, we don't need to flush the
1881 * cache on IO completion. Clear the sync flag for this case.
1882 */
1883 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1884 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1885
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001886 if (!atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001887 if (!dio->wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001888 return -EIOCBQUEUED;
1889
1890 for (;;) {
1891 set_current_state(TASK_UNINTERRUPTIBLE);
1892 if (!READ_ONCE(dio->submit.waiter))
1893 break;
1894
1895 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1896 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001897 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001898 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001899 io_schedule();
1900 }
1901 __set_current_state(TASK_RUNNING);
1902 }
1903
Eryu Guanc771c142017-03-02 15:02:06 -08001904 ret = iomap_dio_complete(dio);
1905
Eryu Guanc771c142017-03-02 15:02:06 -08001906 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001907
1908out_free_dio:
1909 kfree(dio);
1910 return ret;
1911}
1912EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001913
1914/* Swapfile activation */
1915
1916#ifdef CONFIG_SWAP
1917struct iomap_swapfile_info {
1918 struct iomap iomap; /* accumulated iomap */
1919 struct swap_info_struct *sis;
1920 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1921 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1922 unsigned long nr_pages; /* number of pages collected */
1923 int nr_extents; /* extent count */
1924};
1925
1926/*
1927 * Collect physical extents for this swap file. Physical extents reported to
1928 * the swap code must be trimmed to align to a page boundary. The logical
1929 * offset within the file is irrelevant since the swapfile code maps logical
1930 * page numbers of the swap device to the physical page-aligned extents.
1931 */
1932static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1933{
1934 struct iomap *iomap = &isi->iomap;
1935 unsigned long nr_pages;
1936 uint64_t first_ppage;
1937 uint64_t first_ppage_reported;
1938 uint64_t next_ppage;
1939 int error;
1940
1941 /*
1942 * Round the start up and the end down so that the physical
1943 * extent aligns to a page boundary.
1944 */
1945 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1946 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1947 PAGE_SHIFT;
1948
1949 /* Skip too-short physical extents. */
1950 if (first_ppage >= next_ppage)
1951 return 0;
1952 nr_pages = next_ppage - first_ppage;
1953
1954 /*
1955 * Calculate how much swap space we're adding; the first page contains
1956 * the swap header and doesn't count. The mm still wants that first
1957 * page fed to add_swap_extent, however.
1958 */
1959 first_ppage_reported = first_ppage;
1960 if (iomap->offset == 0)
1961 first_ppage_reported++;
1962 if (isi->lowest_ppage > first_ppage_reported)
1963 isi->lowest_ppage = first_ppage_reported;
1964 if (isi->highest_ppage < (next_ppage - 1))
1965 isi->highest_ppage = next_ppage - 1;
1966
1967 /* Add extent, set up for the next call. */
1968 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1969 if (error < 0)
1970 return error;
1971 isi->nr_extents += error;
1972 isi->nr_pages += nr_pages;
1973 return 0;
1974}
1975
1976/*
1977 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1978 * swap only cares about contiguous page-aligned physical extents and makes no
1979 * distinction between written and unwritten extents.
1980 */
1981static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1982 loff_t count, void *data, struct iomap *iomap)
1983{
1984 struct iomap_swapfile_info *isi = data;
1985 int error;
1986
Christoph Hellwig19319b52018-06-01 09:03:06 -07001987 switch (iomap->type) {
1988 case IOMAP_MAPPED:
1989 case IOMAP_UNWRITTEN:
1990 /* Only real or unwritten extents. */
1991 break;
1992 case IOMAP_INLINE:
1993 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07001994 pr_err("swapon: file is inline\n");
1995 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001996 default:
Omar Sandovalec601922018-05-16 11:13:34 -07001997 pr_err("swapon: file has unallocated extents\n");
1998 return -EINVAL;
1999 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002000
Omar Sandovalec601922018-05-16 11:13:34 -07002001 /* No uncommitted metadata or shared blocks. */
2002 if (iomap->flags & IOMAP_F_DIRTY) {
2003 pr_err("swapon: file is not committed\n");
2004 return -EINVAL;
2005 }
2006 if (iomap->flags & IOMAP_F_SHARED) {
2007 pr_err("swapon: file has shared extents\n");
2008 return -EINVAL;
2009 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002010
Omar Sandovalec601922018-05-16 11:13:34 -07002011 /* Only one bdev per swap file. */
2012 if (iomap->bdev != isi->sis->bdev) {
2013 pr_err("swapon: file is on multiple devices\n");
2014 return -EINVAL;
2015 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002016
2017 if (isi->iomap.length == 0) {
2018 /* No accumulated extent, so just store it. */
2019 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2020 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2021 /* Append this to the accumulated extent. */
2022 isi->iomap.length += iomap->length;
2023 } else {
2024 /* Otherwise, add the retained iomap and store this one. */
2025 error = iomap_swapfile_add_extent(isi);
2026 if (error)
2027 return error;
2028 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2029 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002030 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07002031}
2032
2033/*
2034 * Iterate a swap file's iomaps to construct physical extents that can be
2035 * passed to the swapfile subsystem.
2036 */
2037int iomap_swapfile_activate(struct swap_info_struct *sis,
2038 struct file *swap_file, sector_t *pagespan,
2039 const struct iomap_ops *ops)
2040{
2041 struct iomap_swapfile_info isi = {
2042 .sis = sis,
2043 .lowest_ppage = (sector_t)-1ULL,
2044 };
2045 struct address_space *mapping = swap_file->f_mapping;
2046 struct inode *inode = mapping->host;
2047 loff_t pos = 0;
2048 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2049 loff_t ret;
2050
Darrick J. Wong117a1482018-06-05 09:53:05 -07002051 /*
2052 * Persist all file mapping metadata so that we won't have any
2053 * IOMAP_F_DIRTY iomaps.
2054 */
2055 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07002056 if (ret)
2057 return ret;
2058
2059 while (len > 0) {
2060 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2061 ops, &isi, iomap_swapfile_activate_actor);
2062 if (ret <= 0)
2063 return ret;
2064
2065 pos += ret;
2066 len -= ret;
2067 }
2068
2069 if (isi.iomap.length) {
2070 ret = iomap_swapfile_add_extent(&isi);
2071 if (ret)
2072 return ret;
2073 }
2074
2075 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2076 sis->max = isi.nr_pages;
2077 sis->pages = isi.nr_pages - 1;
2078 sis->highest_bit = isi.nr_pages - 1;
2079 return isi.nr_extents;
2080}
2081EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2082#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002083
2084static loff_t
2085iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2086 void *data, struct iomap *iomap)
2087{
2088 sector_t *bno = data, addr;
2089
2090 if (iomap->type == IOMAP_MAPPED) {
2091 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2092 if (addr > INT_MAX)
2093 WARN(1, "would truncate bmap result\n");
2094 else
2095 *bno = addr;
2096 }
2097 return 0;
2098}
2099
2100/* legacy ->bmap interface. 0 is the error return (!) */
2101sector_t
2102iomap_bmap(struct address_space *mapping, sector_t bno,
2103 const struct iomap_ops *ops)
2104{
2105 struct inode *inode = mapping->host;
Eric Sandeen79b3dbe2018-08-02 13:09:27 -07002106 loff_t pos = bno << inode->i_blkbits;
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002107 unsigned blocksize = i_blocksize(inode);
2108
2109 if (filemap_write_and_wait(mapping))
2110 return 0;
2111
2112 bno = 0;
2113 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2114 return bno;
2115}
2116EXPORT_SYMBOL_GPL(iomap_bmap);