blob: a51cb4c07d4d8cd3a09715361c84126cecae2ca2 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/backing-dev.h>
26#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110027#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100028#include <linux/dax.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100029#include "internal.h"
30
Christoph Hellwigae259a92016-06-21 09:23:11 +100031/*
32 * Execute a iomap write on a segment of the mapping that spans a
33 * contiguous range of pages that have identical block mapping state.
34 *
35 * This avoids the need to map pages individually, do individual allocations
36 * for each page and most importantly avoid the need for filesystem specific
37 * locking per page. Instead, all the operations are amortised over the entire
38 * range of pages. It is assumed that the filesystems will lock whatever
39 * resources they require in the iomap_begin call, and release them in the
40 * iomap_end call.
41 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100042loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100043iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
44 struct iomap_ops *ops, void *data, iomap_actor_t actor)
45{
46 struct iomap iomap = { 0 };
47 loff_t written = 0, ret;
48
49 /*
50 * Need to map a range from start position for length bytes. This can
51 * span multiple pages - it is only guaranteed to return a range of a
52 * single type of pages (e.g. all into a hole, all mapped or all
53 * unwritten). Failure at this point has nothing to undo.
54 *
55 * If allocation is required for this range, reserve the space now so
56 * that the allocation is guaranteed to succeed later on. Once we copy
57 * the data into the page cache pages, then we cannot fail otherwise we
58 * expose transient stale data. If the reserve fails, we can safely
59 * back out at this point as there is nothing to undo.
60 */
61 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
62 if (ret)
63 return ret;
64 if (WARN_ON(iomap.offset > pos))
65 return -EIO;
66
67 /*
68 * Cut down the length to the one actually provided by the filesystem,
69 * as it might not be able to give us the whole size that we requested.
70 */
71 if (iomap.offset + iomap.length < pos + length)
72 length = iomap.offset + iomap.length - pos;
73
74 /*
75 * Now that we have guaranteed that the space allocation will succeed.
76 * we can do the copy-in page by page without having to worry about
77 * failures exposing transient data.
78 */
79 written = actor(inode, pos, length, data, &iomap);
80
81 /*
82 * Now the data has been copied, commit the range we've copied. This
83 * should not fail unless the filesystem has had a fatal error.
84 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100085 if (ops->iomap_end) {
86 ret = ops->iomap_end(inode, pos, length,
87 written > 0 ? written : 0,
88 flags, &iomap);
89 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100090
91 return written ? written : ret;
92}
93
94static void
95iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
96{
97 loff_t i_size = i_size_read(inode);
98
99 /*
100 * Only truncate newly allocated pages beyoned EOF, even if the
101 * write started inside the existing inode size.
102 */
103 if (pos + len > i_size)
104 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
105}
106
107static int
108iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
109 struct page **pagep, struct iomap *iomap)
110{
111 pgoff_t index = pos >> PAGE_SHIFT;
112 struct page *page;
113 int status = 0;
114
115 BUG_ON(pos + len > iomap->offset + iomap->length);
116
Michal Hockod1908f52017-02-03 13:13:26 -0800117 if (fatal_signal_pending(current))
118 return -EINTR;
119
Christoph Hellwigae259a92016-06-21 09:23:11 +1000120 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
121 if (!page)
122 return -ENOMEM;
123
124 status = __block_write_begin_int(page, pos, len, NULL, iomap);
125 if (unlikely(status)) {
126 unlock_page(page);
127 put_page(page);
128 page = NULL;
129
130 iomap_write_failed(inode, pos, len);
131 }
132
133 *pagep = page;
134 return status;
135}
136
137static int
138iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
139 unsigned copied, struct page *page)
140{
141 int ret;
142
143 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
144 copied, page, NULL);
145 if (ret < len)
146 iomap_write_failed(inode, pos, len);
147 return ret;
148}
149
150static loff_t
151iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
152 struct iomap *iomap)
153{
154 struct iov_iter *i = data;
155 long status = 0;
156 ssize_t written = 0;
157 unsigned int flags = AOP_FLAG_NOFS;
158
159 /*
160 * Copies from kernel address space cannot fail (NFSD is a big user).
161 */
162 if (!iter_is_iovec(i))
163 flags |= AOP_FLAG_UNINTERRUPTIBLE;
164
165 do {
166 struct page *page;
167 unsigned long offset; /* Offset into pagecache page */
168 unsigned long bytes; /* Bytes to write to page */
169 size_t copied; /* Bytes copied from user */
170
171 offset = (pos & (PAGE_SIZE - 1));
172 bytes = min_t(unsigned long, PAGE_SIZE - offset,
173 iov_iter_count(i));
174again:
175 if (bytes > length)
176 bytes = length;
177
178 /*
179 * Bring in the user page that we will copy from _first_.
180 * Otherwise there's a nasty deadlock on copying from the
181 * same page as we're writing to, without it being marked
182 * up-to-date.
183 *
184 * Not only is this an optimisation, but it is also required
185 * to check that the address is actually valid, when atomic
186 * usercopies are used, below.
187 */
188 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
189 status = -EFAULT;
190 break;
191 }
192
193 status = iomap_write_begin(inode, pos, bytes, flags, &page,
194 iomap);
195 if (unlikely(status))
196 break;
197
198 if (mapping_writably_mapped(inode->i_mapping))
199 flush_dcache_page(page);
200
Christoph Hellwigae259a92016-06-21 09:23:11 +1000201 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000202
203 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000204
205 status = iomap_write_end(inode, pos, bytes, copied, page);
206 if (unlikely(status < 0))
207 break;
208 copied = status;
209
210 cond_resched();
211
212 iov_iter_advance(i, copied);
213 if (unlikely(copied == 0)) {
214 /*
215 * If we were unable to copy any data at all, we must
216 * fall back to a single segment length write.
217 *
218 * If we didn't fallback here, we could livelock
219 * because not all segments in the iov can be copied at
220 * once without a pagefault.
221 */
222 bytes = min_t(unsigned long, PAGE_SIZE - offset,
223 iov_iter_single_seg_count(i));
224 goto again;
225 }
226 pos += copied;
227 written += copied;
228 length -= copied;
229
230 balance_dirty_pages_ratelimited(inode->i_mapping);
231 } while (iov_iter_count(i) && length);
232
233 return written ? written : status;
234}
235
236ssize_t
237iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
238 struct iomap_ops *ops)
239{
240 struct inode *inode = iocb->ki_filp->f_mapping->host;
241 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
242
243 while (iov_iter_count(iter)) {
244 ret = iomap_apply(inode, pos, iov_iter_count(iter),
245 IOMAP_WRITE, ops, iter, iomap_write_actor);
246 if (ret <= 0)
247 break;
248 pos += ret;
249 written += ret;
250 }
251
252 return written ? written : ret;
253}
254EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
255
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000256static struct page *
257__iomap_read_page(struct inode *inode, loff_t offset)
258{
259 struct address_space *mapping = inode->i_mapping;
260 struct page *page;
261
262 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
263 if (IS_ERR(page))
264 return page;
265 if (!PageUptodate(page)) {
266 put_page(page);
267 return ERR_PTR(-EIO);
268 }
269 return page;
270}
271
272static loff_t
273iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
274 struct iomap *iomap)
275{
276 long status = 0;
277 ssize_t written = 0;
278
279 do {
280 struct page *page, *rpage;
281 unsigned long offset; /* Offset into pagecache page */
282 unsigned long bytes; /* Bytes to write to page */
283
284 offset = (pos & (PAGE_SIZE - 1));
285 bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
286
287 rpage = __iomap_read_page(inode, pos);
288 if (IS_ERR(rpage))
289 return PTR_ERR(rpage);
290
291 status = iomap_write_begin(inode, pos, bytes,
292 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
293 &page, iomap);
294 put_page(rpage);
295 if (unlikely(status))
296 return status;
297
298 WARN_ON_ONCE(!PageUptodate(page));
299
300 status = iomap_write_end(inode, pos, bytes, bytes, page);
301 if (unlikely(status <= 0)) {
302 if (WARN_ON_ONCE(status == 0))
303 return -EIO;
304 return status;
305 }
306
307 cond_resched();
308
309 pos += status;
310 written += status;
311 length -= status;
312
313 balance_dirty_pages_ratelimited(inode->i_mapping);
314 } while (length);
315
316 return written;
317}
318
319int
320iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
321 struct iomap_ops *ops)
322{
323 loff_t ret;
324
325 while (len) {
326 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
327 iomap_dirty_actor);
328 if (ret <= 0)
329 return ret;
330 pos += ret;
331 len -= ret;
332 }
333
334 return 0;
335}
336EXPORT_SYMBOL_GPL(iomap_file_dirty);
337
Christoph Hellwigae259a92016-06-21 09:23:11 +1000338static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
339 unsigned bytes, struct iomap *iomap)
340{
341 struct page *page;
342 int status;
343
344 status = iomap_write_begin(inode, pos, bytes,
345 AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
346 if (status)
347 return status;
348
349 zero_user(page, offset, bytes);
350 mark_page_accessed(page);
351
352 return iomap_write_end(inode, pos, bytes, bytes, page);
353}
354
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000355static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
356 struct iomap *iomap)
357{
358 sector_t sector = iomap->blkno +
359 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
360
361 return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
362}
363
Christoph Hellwigae259a92016-06-21 09:23:11 +1000364static loff_t
365iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
366 void *data, struct iomap *iomap)
367{
368 bool *did_zero = data;
369 loff_t written = 0;
370 int status;
371
372 /* already zeroed? we're done. */
373 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
374 return count;
375
376 do {
377 unsigned offset, bytes;
378
379 offset = pos & (PAGE_SIZE - 1); /* Within page */
380 bytes = min_t(unsigned, PAGE_SIZE - offset, count);
381
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000382 if (IS_DAX(inode))
383 status = iomap_dax_zero(pos, offset, bytes, iomap);
384 else
385 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000386 if (status < 0)
387 return status;
388
389 pos += bytes;
390 count -= bytes;
391 written += bytes;
392 if (did_zero)
393 *did_zero = true;
394 } while (count > 0);
395
396 return written;
397}
398
399int
400iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
401 struct iomap_ops *ops)
402{
403 loff_t ret;
404
405 while (len > 0) {
406 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
407 ops, did_zero, iomap_zero_range_actor);
408 if (ret <= 0)
409 return ret;
410
411 pos += ret;
412 len -= ret;
413 }
414
415 return 0;
416}
417EXPORT_SYMBOL_GPL(iomap_zero_range);
418
419int
420iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
421 struct iomap_ops *ops)
422{
423 unsigned blocksize = (1 << inode->i_blkbits);
424 unsigned off = pos & (blocksize - 1);
425
426 /* Block boundary? Nothing to do */
427 if (!off)
428 return 0;
429 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
430}
431EXPORT_SYMBOL_GPL(iomap_truncate_page);
432
433static loff_t
434iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
435 void *data, struct iomap *iomap)
436{
437 struct page *page = data;
438 int ret;
439
Jan Karac663e292016-10-24 14:20:25 +1100440 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000441 if (ret)
442 return ret;
443
444 block_commit_write(page, 0, length);
445 return length;
446}
447
448int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
449 struct iomap_ops *ops)
450{
451 struct page *page = vmf->page;
452 struct inode *inode = file_inode(vma->vm_file);
453 unsigned long length;
454 loff_t offset, size;
455 ssize_t ret;
456
457 lock_page(page);
458 size = i_size_read(inode);
459 if ((page->mapping != inode->i_mapping) ||
460 (page_offset(page) > size)) {
461 /* We overload EFAULT to mean page got truncated */
462 ret = -EFAULT;
463 goto out_unlock;
464 }
465
466 /* page is wholly or partially inside EOF */
467 if (((page->index + 1) << PAGE_SHIFT) > size)
468 length = size & ~PAGE_MASK;
469 else
470 length = PAGE_SIZE;
471
472 offset = page_offset(page);
473 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +1100474 ret = iomap_apply(inode, offset, length,
475 IOMAP_WRITE | IOMAP_FAULT, ops, page,
476 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000477 if (unlikely(ret <= 0))
478 goto out_unlock;
479 offset += ret;
480 length -= ret;
481 }
482
483 set_page_dirty(page);
484 wait_for_stable_page(page);
485 return 0;
486out_unlock:
487 unlock_page(page);
488 return ret;
489}
490EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000491
492struct fiemap_ctx {
493 struct fiemap_extent_info *fi;
494 struct iomap prev;
495};
496
497static int iomap_to_fiemap(struct fiemap_extent_info *fi,
498 struct iomap *iomap, u32 flags)
499{
500 switch (iomap->type) {
501 case IOMAP_HOLE:
502 /* skip holes */
503 return 0;
504 case IOMAP_DELALLOC:
505 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
506 break;
507 case IOMAP_UNWRITTEN:
508 flags |= FIEMAP_EXTENT_UNWRITTEN;
509 break;
510 case IOMAP_MAPPED:
511 break;
512 }
513
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000514 if (iomap->flags & IOMAP_F_MERGED)
515 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +1000516 if (iomap->flags & IOMAP_F_SHARED)
517 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000518
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000519 return fiemap_fill_next_extent(fi, iomap->offset,
520 iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000521 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000522
523}
524
525static loff_t
526iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
527 struct iomap *iomap)
528{
529 struct fiemap_ctx *ctx = data;
530 loff_t ret = length;
531
532 if (iomap->type == IOMAP_HOLE)
533 return length;
534
535 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
536 ctx->prev = *iomap;
537 switch (ret) {
538 case 0: /* success */
539 return length;
540 case 1: /* extent array full */
541 return 0;
542 default:
543 return ret;
544 }
545}
546
547int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
548 loff_t start, loff_t len, struct iomap_ops *ops)
549{
550 struct fiemap_ctx ctx;
551 loff_t ret;
552
553 memset(&ctx, 0, sizeof(ctx));
554 ctx.fi = fi;
555 ctx.prev.type = IOMAP_HOLE;
556
557 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
558 if (ret)
559 return ret;
560
Dave Chinner8896b8f2016-08-17 08:41:10 +1000561 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
562 ret = filemap_write_and_wait(inode->i_mapping);
563 if (ret)
564 return ret;
565 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000566
567 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100568 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000569 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +1000570 /* inode with no (attribute) mapping will give ENOENT */
571 if (ret == -ENOENT)
572 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000573 if (ret < 0)
574 return ret;
575 if (ret == 0)
576 break;
577
578 start += ret;
579 len -= ret;
580 }
581
582 if (ctx.prev.type != IOMAP_HOLE) {
583 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
584 if (ret < 0)
585 return ret;
586 }
587
588 return 0;
589}
590EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100591
592/*
593 * Private flags for iomap_dio, must not overlap with the public ones in
594 * iomap.h:
595 */
596#define IOMAP_DIO_WRITE (1 << 30)
597#define IOMAP_DIO_DIRTY (1 << 31)
598
599struct iomap_dio {
600 struct kiocb *iocb;
601 iomap_dio_end_io_t *end_io;
602 loff_t i_size;
603 loff_t size;
604 atomic_t ref;
605 unsigned flags;
606 int error;
607
608 union {
609 /* used during submission and for synchronous completion: */
610 struct {
611 struct iov_iter *iter;
612 struct task_struct *waiter;
613 struct request_queue *last_queue;
614 blk_qc_t cookie;
615 } submit;
616
617 /* used for aio completion: */
618 struct {
619 struct work_struct work;
620 } aio;
621 };
622};
623
624static ssize_t iomap_dio_complete(struct iomap_dio *dio)
625{
626 struct kiocb *iocb = dio->iocb;
627 ssize_t ret;
628
629 if (dio->end_io) {
630 ret = dio->end_io(iocb,
631 dio->error ? dio->error : dio->size,
632 dio->flags);
633 } else {
634 ret = dio->error;
635 }
636
637 if (likely(!ret)) {
638 ret = dio->size;
639 /* check for short read */
640 if (iocb->ki_pos + ret > dio->i_size &&
641 !(dio->flags & IOMAP_DIO_WRITE))
642 ret = dio->i_size - iocb->ki_pos;
643 iocb->ki_pos += ret;
644 }
645
646 inode_dio_end(file_inode(iocb->ki_filp));
647 kfree(dio);
648
649 return ret;
650}
651
652static void iomap_dio_complete_work(struct work_struct *work)
653{
654 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
655 struct kiocb *iocb = dio->iocb;
656 bool is_write = (dio->flags & IOMAP_DIO_WRITE);
657 ssize_t ret;
658
659 ret = iomap_dio_complete(dio);
660 if (is_write && ret > 0)
661 ret = generic_write_sync(iocb, ret);
662 iocb->ki_complete(iocb, ret, 0);
663}
664
665/*
666 * Set an error in the dio if none is set yet. We have to use cmpxchg
667 * as the submission context and the completion context(s) can race to
668 * update the error.
669 */
670static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
671{
672 cmpxchg(&dio->error, 0, ret);
673}
674
675static void iomap_dio_bio_end_io(struct bio *bio)
676{
677 struct iomap_dio *dio = bio->bi_private;
678 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
679
680 if (bio->bi_error)
681 iomap_dio_set_error(dio, bio->bi_error);
682
683 if (atomic_dec_and_test(&dio->ref)) {
684 if (is_sync_kiocb(dio->iocb)) {
685 struct task_struct *waiter = dio->submit.waiter;
686
687 WRITE_ONCE(dio->submit.waiter, NULL);
688 wake_up_process(waiter);
689 } else if (dio->flags & IOMAP_DIO_WRITE) {
690 struct inode *inode = file_inode(dio->iocb->ki_filp);
691
692 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
693 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
694 } else {
695 iomap_dio_complete_work(&dio->aio.work);
696 }
697 }
698
699 if (should_dirty) {
700 bio_check_pages_dirty(bio);
701 } else {
702 struct bio_vec *bvec;
703 int i;
704
705 bio_for_each_segment_all(bvec, bio, i)
706 put_page(bvec->bv_page);
707 bio_put(bio);
708 }
709}
710
711static blk_qc_t
712iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
713 unsigned len)
714{
715 struct page *page = ZERO_PAGE(0);
716 struct bio *bio;
717
718 bio = bio_alloc(GFP_KERNEL, 1);
719 bio->bi_bdev = iomap->bdev;
720 bio->bi_iter.bi_sector =
721 iomap->blkno + ((pos - iomap->offset) >> 9);
722 bio->bi_private = dio;
723 bio->bi_end_io = iomap_dio_bio_end_io;
724
725 get_page(page);
726 if (bio_add_page(bio, page, len, 0) != len)
727 BUG();
Linus Torvalds5cc60ae2016-12-14 21:35:31 -0800728 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100729
730 atomic_inc(&dio->ref);
731 return submit_bio(bio);
732}
733
734static loff_t
735iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
736 void *data, struct iomap *iomap)
737{
738 struct iomap_dio *dio = data;
739 unsigned blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
740 unsigned fs_block_size = (1 << inode->i_blkbits), pad;
741 unsigned align = iov_iter_alignment(dio->submit.iter);
742 struct iov_iter iter;
743 struct bio *bio;
744 bool need_zeroout = false;
745 int nr_pages, ret;
746
747 if ((pos | length | align) & ((1 << blkbits) - 1))
748 return -EINVAL;
749
750 switch (iomap->type) {
751 case IOMAP_HOLE:
752 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
753 return -EIO;
754 /*FALLTHRU*/
755 case IOMAP_UNWRITTEN:
756 if (!(dio->flags & IOMAP_DIO_WRITE)) {
757 iov_iter_zero(length, dio->submit.iter);
758 dio->size += length;
759 return length;
760 }
761 dio->flags |= IOMAP_DIO_UNWRITTEN;
762 need_zeroout = true;
763 break;
764 case IOMAP_MAPPED:
765 if (iomap->flags & IOMAP_F_SHARED)
766 dio->flags |= IOMAP_DIO_COW;
767 if (iomap->flags & IOMAP_F_NEW)
768 need_zeroout = true;
769 break;
770 default:
771 WARN_ON_ONCE(1);
772 return -EIO;
773 }
774
775 /*
776 * Operate on a partial iter trimmed to the extent we were called for.
777 * We'll update the iter in the dio once we're done with this extent.
778 */
779 iter = *dio->submit.iter;
780 iov_iter_truncate(&iter, length);
781
782 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
783 if (nr_pages <= 0)
784 return nr_pages;
785
786 if (need_zeroout) {
787 /* zero out from the start of the block to the write offset */
788 pad = pos & (fs_block_size - 1);
789 if (pad)
790 iomap_dio_zero(dio, iomap, pos - pad, pad);
791 }
792
793 do {
794 if (dio->error)
795 return 0;
796
797 bio = bio_alloc(GFP_KERNEL, nr_pages);
798 bio->bi_bdev = iomap->bdev;
799 bio->bi_iter.bi_sector =
800 iomap->blkno + ((pos - iomap->offset) >> 9);
801 bio->bi_private = dio;
802 bio->bi_end_io = iomap_dio_bio_end_io;
803
804 ret = bio_iov_iter_get_pages(bio, &iter);
805 if (unlikely(ret)) {
806 bio_put(bio);
807 return ret;
808 }
809
810 if (dio->flags & IOMAP_DIO_WRITE) {
Linus Torvalds5cc60ae2016-12-14 21:35:31 -0800811 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100812 task_io_account_write(bio->bi_iter.bi_size);
813 } else {
814 bio_set_op_attrs(bio, REQ_OP_READ, 0);
815 if (dio->flags & IOMAP_DIO_DIRTY)
816 bio_set_pages_dirty(bio);
817 }
818
819 dio->size += bio->bi_iter.bi_size;
820 pos += bio->bi_iter.bi_size;
821
822 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
823
824 atomic_inc(&dio->ref);
825
826 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
827 dio->submit.cookie = submit_bio(bio);
828 } while (nr_pages);
829
830 if (need_zeroout) {
831 /* zero out from the end of the write to the end of the block */
832 pad = pos & (fs_block_size - 1);
833 if (pad)
834 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
835 }
836
837 iov_iter_advance(dio->submit.iter, length);
838 return length;
839}
840
841ssize_t
842iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops,
843 iomap_dio_end_io_t end_io)
844{
845 struct address_space *mapping = iocb->ki_filp->f_mapping;
846 struct inode *inode = file_inode(iocb->ki_filp);
847 size_t count = iov_iter_count(iter);
848 loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
849 unsigned int flags = IOMAP_DIRECT;
850 struct blk_plug plug;
851 struct iomap_dio *dio;
852
853 lockdep_assert_held(&inode->i_rwsem);
854
855 if (!count)
856 return 0;
857
858 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
859 if (!dio)
860 return -ENOMEM;
861
862 dio->iocb = iocb;
863 atomic_set(&dio->ref, 1);
864 dio->size = 0;
865 dio->i_size = i_size_read(inode);
866 dio->end_io = end_io;
867 dio->error = 0;
868 dio->flags = 0;
869
870 dio->submit.iter = iter;
871 if (is_sync_kiocb(iocb)) {
872 dio->submit.waiter = current;
873 dio->submit.cookie = BLK_QC_T_NONE;
874 dio->submit.last_queue = NULL;
875 }
876
877 if (iov_iter_rw(iter) == READ) {
878 if (pos >= dio->i_size)
879 goto out_free_dio;
880
881 if (iter->type == ITER_IOVEC)
882 dio->flags |= IOMAP_DIO_DIRTY;
883 } else {
884 dio->flags |= IOMAP_DIO_WRITE;
885 flags |= IOMAP_WRITE;
886 }
887
888 if (mapping->nrpages) {
889 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
890 if (ret)
891 goto out_free_dio;
892
893 ret = invalidate_inode_pages2_range(mapping,
894 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
895 WARN_ON_ONCE(ret);
896 ret = 0;
897 }
898
899 inode_dio_begin(inode);
900
901 blk_start_plug(&plug);
902 do {
903 ret = iomap_apply(inode, pos, count, flags, ops, dio,
904 iomap_dio_actor);
905 if (ret <= 0) {
906 /* magic error code to fall back to buffered I/O */
907 if (ret == -ENOTBLK)
908 ret = 0;
909 break;
910 }
911 pos += ret;
912 } while ((count = iov_iter_count(iter)) > 0);
913 blk_finish_plug(&plug);
914
915 if (ret < 0)
916 iomap_dio_set_error(dio, ret);
917
918 if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
919 !inode->i_sb->s_dio_done_wq) {
920 ret = sb_init_dio_done_wq(inode->i_sb);
921 if (ret < 0)
922 iomap_dio_set_error(dio, ret);
923 }
924
925 if (!atomic_dec_and_test(&dio->ref)) {
926 if (!is_sync_kiocb(iocb))
927 return -EIOCBQUEUED;
928
929 for (;;) {
930 set_current_state(TASK_UNINTERRUPTIBLE);
931 if (!READ_ONCE(dio->submit.waiter))
932 break;
933
934 if (!(iocb->ki_flags & IOCB_HIPRI) ||
935 !dio->submit.last_queue ||
Linus Torvalds5cc60ae2016-12-14 21:35:31 -0800936 !blk_mq_poll(dio->submit.last_queue,
937 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100938 io_schedule();
939 }
940 __set_current_state(TASK_RUNNING);
941 }
942
943 /*
944 * Try again to invalidate clean pages which might have been cached by
945 * non-direct readahead, or faulted in by get_user_pages() if the source
946 * of the write was an mmap'ed region of the file we're writing. Either
947 * one is a pretty crazy thing to do, so we don't support it 100%. If
948 * this invalidation fails, tough, the write still worked...
949 */
950 if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
951 ret = invalidate_inode_pages2_range(mapping,
952 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
953 WARN_ON_ONCE(ret);
954 }
955
956 return iomap_dio_complete(dio);
957
958out_free_dio:
959 kfree(dio);
960 return ret;
961}
962EXPORT_SYMBOL_GPL(iomap_dio_rw);