blob: 4d8ff0f5ecc91c9f24903be9cb69c5759b187893 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig72b4daa2018-06-19 15:10:57 -07003 * Copyright (c) 2016-2018 Christoph Hellwig.
Christoph Hellwigae259a92016-06-21 09:23:11 +10004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
Christoph Hellwig72b4daa2018-06-19 15:10:57 -070021#include <linux/mm_inline.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100022#include <linux/swap.h>
23#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070024#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100025#include <linux/file.h>
26#include <linux/uio.h>
27#include <linux/backing-dev.h>
28#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110029#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100030#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010031#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070032#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010033
Christoph Hellwigae259a92016-06-21 09:23:11 +100034#include "internal.h"
35
Christoph Hellwigae259a92016-06-21 09:23:11 +100036/*
37 * Execute a iomap write on a segment of the mapping that spans a
38 * contiguous range of pages that have identical block mapping state.
39 *
40 * This avoids the need to map pages individually, do individual allocations
41 * for each page and most importantly avoid the need for filesystem specific
42 * locking per page. Instead, all the operations are amortised over the entire
43 * range of pages. It is assumed that the filesystems will lock whatever
44 * resources they require in the iomap_begin call, and release them in the
45 * iomap_end call.
46 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100047loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100048iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080049 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100050{
51 struct iomap iomap = { 0 };
52 loff_t written = 0, ret;
53
54 /*
55 * Need to map a range from start position for length bytes. This can
56 * span multiple pages - it is only guaranteed to return a range of a
57 * single type of pages (e.g. all into a hole, all mapped or all
58 * unwritten). Failure at this point has nothing to undo.
59 *
60 * If allocation is required for this range, reserve the space now so
61 * that the allocation is guaranteed to succeed later on. Once we copy
62 * the data into the page cache pages, then we cannot fail otherwise we
63 * expose transient stale data. If the reserve fails, we can safely
64 * back out at this point as there is nothing to undo.
65 */
66 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
67 if (ret)
68 return ret;
69 if (WARN_ON(iomap.offset > pos))
70 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080071 if (WARN_ON(iomap.length == 0))
72 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100073
74 /*
75 * Cut down the length to the one actually provided by the filesystem,
76 * as it might not be able to give us the whole size that we requested.
77 */
78 if (iomap.offset + iomap.length < pos + length)
79 length = iomap.offset + iomap.length - pos;
80
81 /*
82 * Now that we have guaranteed that the space allocation will succeed.
83 * we can do the copy-in page by page without having to worry about
84 * failures exposing transient data.
85 */
86 written = actor(inode, pos, length, data, &iomap);
87
88 /*
89 * Now the data has been copied, commit the range we've copied. This
90 * should not fail unless the filesystem has had a fatal error.
91 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100092 if (ops->iomap_end) {
93 ret = ops->iomap_end(inode, pos, length,
94 written > 0 ? written : 0,
95 flags, &iomap);
96 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100097
98 return written ? written : ret;
99}
100
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700101static sector_t
102iomap_sector(struct iomap *iomap, loff_t pos)
103{
104 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
105}
106
Christoph Hellwigae259a92016-06-21 09:23:11 +1000107static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700108iomap_read_inline_data(struct inode *inode, struct page *page,
109 struct iomap *iomap)
110{
111 size_t size = i_size_read(inode);
112 void *addr;
113
114 if (PageUptodate(page))
115 return;
116
117 BUG_ON(page->index);
118 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
119
120 addr = kmap_atomic(page);
121 memcpy(addr, iomap->inline_data, size);
122 memset(addr + size, 0, PAGE_SIZE - size);
123 kunmap_atomic(addr);
124 SetPageUptodate(page);
125}
126
127static void
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700128iomap_read_end_io(struct bio *bio)
129{
130 int error = blk_status_to_errno(bio->bi_status);
131 struct bio_vec *bvec;
132 int i;
133
134 bio_for_each_segment_all(bvec, bio, i)
135 page_endio(bvec->bv_page, false, error);
136 bio_put(bio);
137}
138
139struct iomap_readpage_ctx {
140 struct page *cur_page;
141 bool cur_page_in_bio;
142 bool is_readahead;
143 struct bio *bio;
144 struct list_head *pages;
145};
146
147static loff_t
148iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
149 struct iomap *iomap)
150{
151 struct iomap_readpage_ctx *ctx = data;
152 struct page *page = ctx->cur_page;
153 unsigned poff = pos & (PAGE_SIZE - 1);
154 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
155 bool is_contig = false;
156 sector_t sector;
157
158 /* we don't support blocksize < PAGE_SIZE quite yet. */
159 WARN_ON_ONCE(pos != page_offset(page));
160 WARN_ON_ONCE(plen != PAGE_SIZE);
161
162 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
163 zero_user(page, poff, plen);
164 SetPageUptodate(page);
165 goto done;
166 }
167
168 ctx->cur_page_in_bio = true;
169
170 /*
171 * Try to merge into a previous segment if we can.
172 */
173 sector = iomap_sector(iomap, pos);
174 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
175 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
176 goto done;
177 is_contig = true;
178 }
179
180 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
181 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
182 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
183
184 if (ctx->bio)
185 submit_bio(ctx->bio);
186
187 if (ctx->is_readahead) /* same as readahead_gfp_mask */
188 gfp |= __GFP_NORETRY | __GFP_NOWARN;
189 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
190 ctx->bio->bi_opf = REQ_OP_READ;
191 if (ctx->is_readahead)
192 ctx->bio->bi_opf |= REQ_RAHEAD;
193 ctx->bio->bi_iter.bi_sector = sector;
194 bio_set_dev(ctx->bio, iomap->bdev);
195 ctx->bio->bi_end_io = iomap_read_end_io;
196 }
197
198 __bio_add_page(ctx->bio, page, plen, poff);
199done:
200 return plen;
201}
202
203int
204iomap_readpage(struct page *page, const struct iomap_ops *ops)
205{
206 struct iomap_readpage_ctx ctx = { .cur_page = page };
207 struct inode *inode = page->mapping->host;
208 unsigned poff;
209 loff_t ret;
210
211 WARN_ON_ONCE(page_has_buffers(page));
212
213 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
214 ret = iomap_apply(inode, page_offset(page) + poff,
215 PAGE_SIZE - poff, 0, ops, &ctx,
216 iomap_readpage_actor);
217 if (ret <= 0) {
218 WARN_ON_ONCE(ret == 0);
219 SetPageError(page);
220 break;
221 }
222 }
223
224 if (ctx.bio) {
225 submit_bio(ctx.bio);
226 WARN_ON_ONCE(!ctx.cur_page_in_bio);
227 } else {
228 WARN_ON_ONCE(ctx.cur_page_in_bio);
229 unlock_page(page);
230 }
231
232 /*
233 * Just like mpage_readpages and block_read_full_page we always
234 * return 0 and just mark the page as PageError on errors. This
235 * should be cleaned up all through the stack eventually.
236 */
237 return 0;
238}
239EXPORT_SYMBOL_GPL(iomap_readpage);
240
241static struct page *
242iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
243 loff_t length, loff_t *done)
244{
245 while (!list_empty(pages)) {
246 struct page *page = lru_to_page(pages);
247
248 if (page_offset(page) >= (u64)pos + length)
249 break;
250
251 list_del(&page->lru);
252 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
253 GFP_NOFS))
254 return page;
255
256 /*
257 * If we already have a page in the page cache at index we are
258 * done. Upper layers don't care if it is uptodate after the
259 * readpages call itself as every page gets checked again once
260 * actually needed.
261 */
262 *done += PAGE_SIZE;
263 put_page(page);
264 }
265
266 return NULL;
267}
268
269static loff_t
270iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
271 void *data, struct iomap *iomap)
272{
273 struct iomap_readpage_ctx *ctx = data;
274 loff_t done, ret;
275
276 for (done = 0; done < length; done += ret) {
277 if (ctx->cur_page && ((pos + done) & (PAGE_SIZE - 1)) == 0) {
278 if (!ctx->cur_page_in_bio)
279 unlock_page(ctx->cur_page);
280 put_page(ctx->cur_page);
281 ctx->cur_page = NULL;
282 }
283 if (!ctx->cur_page) {
284 ctx->cur_page = iomap_next_page(inode, ctx->pages,
285 pos, length, &done);
286 if (!ctx->cur_page)
287 break;
288 ctx->cur_page_in_bio = false;
289 }
290 ret = iomap_readpage_actor(inode, pos + done, length - done,
291 ctx, iomap);
292 }
293
294 return done;
295}
296
297int
298iomap_readpages(struct address_space *mapping, struct list_head *pages,
299 unsigned nr_pages, const struct iomap_ops *ops)
300{
301 struct iomap_readpage_ctx ctx = {
302 .pages = pages,
303 .is_readahead = true,
304 };
305 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
306 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
307 loff_t length = last - pos + PAGE_SIZE, ret = 0;
308
309 while (length > 0) {
310 ret = iomap_apply(mapping->host, pos, length, 0, ops,
311 &ctx, iomap_readpages_actor);
312 if (ret <= 0) {
313 WARN_ON_ONCE(ret == 0);
314 goto done;
315 }
316 pos += ret;
317 length -= ret;
318 }
319 ret = 0;
320done:
321 if (ctx.bio)
322 submit_bio(ctx.bio);
323 if (ctx.cur_page) {
324 if (!ctx.cur_page_in_bio)
325 unlock_page(ctx.cur_page);
326 put_page(ctx.cur_page);
327 }
328
329 /*
330 * Check that we didn't lose a page due to the arcance calling
331 * conventions..
332 */
333 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
334 return ret;
335}
336EXPORT_SYMBOL_GPL(iomap_readpages);
337
338static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000339iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
340{
341 loff_t i_size = i_size_read(inode);
342
343 /*
344 * Only truncate newly allocated pages beyoned EOF, even if the
345 * write started inside the existing inode size.
346 */
347 if (pos + len > i_size)
348 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
349}
350
351static int
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700352iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
353 unsigned poff, unsigned plen, unsigned from, unsigned to,
354 struct iomap *iomap)
355{
356 struct bio_vec bvec;
357 struct bio bio;
358
359 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
360 zero_user_segments(page, poff, from, to, poff + plen);
361 return 0;
362 }
363
364 bio_init(&bio, &bvec, 1);
365 bio.bi_opf = REQ_OP_READ;
366 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
367 bio_set_dev(&bio, iomap->bdev);
368 __bio_add_page(&bio, page, plen, poff);
369 return submit_bio_wait(&bio);
370}
371
372static int
373__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
374 struct page *page, struct iomap *iomap)
375{
376 loff_t block_size = i_blocksize(inode);
377 loff_t block_start = pos & ~(block_size - 1);
378 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
379 unsigned poff = block_start & (PAGE_SIZE - 1);
380 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, block_end - block_start);
381 unsigned from = pos & (PAGE_SIZE - 1), to = from + len;
382
383 WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE);
384
385 if (PageUptodate(page))
386 return 0;
387 if (from <= poff && to >= poff + plen)
388 return 0;
389 return iomap_read_page_sync(inode, block_start, page,
390 poff, plen, from, to, iomap);
391}
392
393static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000394iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
395 struct page **pagep, struct iomap *iomap)
396{
397 pgoff_t index = pos >> PAGE_SHIFT;
398 struct page *page;
399 int status = 0;
400
401 BUG_ON(pos + len > iomap->offset + iomap->length);
402
Michal Hockod1908f52017-02-03 13:13:26 -0800403 if (fatal_signal_pending(current))
404 return -EINTR;
405
Christoph Hellwigae259a92016-06-21 09:23:11 +1000406 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
407 if (!page)
408 return -ENOMEM;
409
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700410 if (iomap->type == IOMAP_INLINE)
411 iomap_read_inline_data(inode, page, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700412 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700413 status = __block_write_begin_int(page, pos, len, NULL, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700414 else
415 status = __iomap_write_begin(inode, pos, len, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000416 if (unlikely(status)) {
417 unlock_page(page);
418 put_page(page);
419 page = NULL;
420
421 iomap_write_failed(inode, pos, len);
422 }
423
424 *pagep = page;
425 return status;
426}
427
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700428int
429iomap_set_page_dirty(struct page *page)
430{
431 struct address_space *mapping = page_mapping(page);
432 int newly_dirty;
433
434 if (unlikely(!mapping))
435 return !TestSetPageDirty(page);
436
437 /*
438 * Lock out page->mem_cgroup migration to keep PageDirty
439 * synchronized with per-memcg dirty page counters.
440 */
441 lock_page_memcg(page);
442 newly_dirty = !TestSetPageDirty(page);
443 if (newly_dirty)
444 __set_page_dirty(page, mapping, 0);
445 unlock_page_memcg(page);
446
447 if (newly_dirty)
448 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
449 return newly_dirty;
450}
451EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
452
453static int
454__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
455 unsigned copied, struct page *page, struct iomap *iomap)
456{
457 flush_dcache_page(page);
458
459 /*
460 * The blocks that were entirely written will now be uptodate, so we
461 * don't have to worry about a readpage reading them and overwriting a
462 * partial write. However if we have encountered a short write and only
463 * partially written into a block, it will not be marked uptodate, so a
464 * readpage might come in and destroy our partial write.
465 *
466 * Do the simplest thing, and just treat any short write to a non
467 * uptodate page as a zero-length write, and force the caller to redo
468 * the whole thing.
469 */
470 if (unlikely(copied < len && !PageUptodate(page))) {
471 copied = 0;
472 } else {
473 SetPageUptodate(page);
474 iomap_set_page_dirty(page);
475 }
476 return __generic_write_end(inode, pos, copied, page);
477}
478
Christoph Hellwigae259a92016-06-21 09:23:11 +1000479static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700480iomap_write_end_inline(struct inode *inode, struct page *page,
481 struct iomap *iomap, loff_t pos, unsigned copied)
482{
483 void *addr;
484
485 WARN_ON_ONCE(!PageUptodate(page));
486 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
487
488 addr = kmap_atomic(page);
489 memcpy(iomap->inline_data + pos, addr + pos, copied);
490 kunmap_atomic(addr);
491
492 mark_inode_dirty(inode);
493 __generic_write_end(inode, pos, copied, page);
494 return copied;
495}
496
497static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000498iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700499 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000500{
501 int ret;
502
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700503 if (iomap->type == IOMAP_INLINE) {
504 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700505 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700506 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
507 copied, page, NULL);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700508 } else {
509 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700510 }
511
Christoph Hellwig63899c62018-06-19 15:10:56 -0700512 if (iomap->page_done)
513 iomap->page_done(inode, pos, copied, page, iomap);
514
Christoph Hellwigae259a92016-06-21 09:23:11 +1000515 if (ret < len)
516 iomap_write_failed(inode, pos, len);
517 return ret;
518}
519
520static loff_t
521iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
522 struct iomap *iomap)
523{
524 struct iov_iter *i = data;
525 long status = 0;
526 ssize_t written = 0;
527 unsigned int flags = AOP_FLAG_NOFS;
528
Christoph Hellwigae259a92016-06-21 09:23:11 +1000529 do {
530 struct page *page;
531 unsigned long offset; /* Offset into pagecache page */
532 unsigned long bytes; /* Bytes to write to page */
533 size_t copied; /* Bytes copied from user */
534
535 offset = (pos & (PAGE_SIZE - 1));
536 bytes = min_t(unsigned long, PAGE_SIZE - offset,
537 iov_iter_count(i));
538again:
539 if (bytes > length)
540 bytes = length;
541
542 /*
543 * Bring in the user page that we will copy from _first_.
544 * Otherwise there's a nasty deadlock on copying from the
545 * same page as we're writing to, without it being marked
546 * up-to-date.
547 *
548 * Not only is this an optimisation, but it is also required
549 * to check that the address is actually valid, when atomic
550 * usercopies are used, below.
551 */
552 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
553 status = -EFAULT;
554 break;
555 }
556
557 status = iomap_write_begin(inode, pos, bytes, flags, &page,
558 iomap);
559 if (unlikely(status))
560 break;
561
562 if (mapping_writably_mapped(inode->i_mapping))
563 flush_dcache_page(page);
564
Christoph Hellwigae259a92016-06-21 09:23:11 +1000565 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000566
567 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000568
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700569 status = iomap_write_end(inode, pos, bytes, copied, page,
570 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000571 if (unlikely(status < 0))
572 break;
573 copied = status;
574
575 cond_resched();
576
577 iov_iter_advance(i, copied);
578 if (unlikely(copied == 0)) {
579 /*
580 * If we were unable to copy any data at all, we must
581 * fall back to a single segment length write.
582 *
583 * If we didn't fallback here, we could livelock
584 * because not all segments in the iov can be copied at
585 * once without a pagefault.
586 */
587 bytes = min_t(unsigned long, PAGE_SIZE - offset,
588 iov_iter_single_seg_count(i));
589 goto again;
590 }
591 pos += copied;
592 written += copied;
593 length -= copied;
594
595 balance_dirty_pages_ratelimited(inode->i_mapping);
596 } while (iov_iter_count(i) && length);
597
598 return written ? written : status;
599}
600
601ssize_t
602iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800603 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000604{
605 struct inode *inode = iocb->ki_filp->f_mapping->host;
606 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
607
608 while (iov_iter_count(iter)) {
609 ret = iomap_apply(inode, pos, iov_iter_count(iter),
610 IOMAP_WRITE, ops, iter, iomap_write_actor);
611 if (ret <= 0)
612 break;
613 pos += ret;
614 written += ret;
615 }
616
617 return written ? written : ret;
618}
619EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
620
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000621static struct page *
622__iomap_read_page(struct inode *inode, loff_t offset)
623{
624 struct address_space *mapping = inode->i_mapping;
625 struct page *page;
626
627 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
628 if (IS_ERR(page))
629 return page;
630 if (!PageUptodate(page)) {
631 put_page(page);
632 return ERR_PTR(-EIO);
633 }
634 return page;
635}
636
637static loff_t
638iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
639 struct iomap *iomap)
640{
641 long status = 0;
642 ssize_t written = 0;
643
644 do {
645 struct page *page, *rpage;
646 unsigned long offset; /* Offset into pagecache page */
647 unsigned long bytes; /* Bytes to write to page */
648
649 offset = (pos & (PAGE_SIZE - 1));
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700650 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000651
652 rpage = __iomap_read_page(inode, pos);
653 if (IS_ERR(rpage))
654 return PTR_ERR(rpage);
655
656 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700657 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000658 put_page(rpage);
659 if (unlikely(status))
660 return status;
661
662 WARN_ON_ONCE(!PageUptodate(page));
663
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700664 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000665 if (unlikely(status <= 0)) {
666 if (WARN_ON_ONCE(status == 0))
667 return -EIO;
668 return status;
669 }
670
671 cond_resched();
672
673 pos += status;
674 written += status;
675 length -= status;
676
677 balance_dirty_pages_ratelimited(inode->i_mapping);
678 } while (length);
679
680 return written;
681}
682
683int
684iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800685 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000686{
687 loff_t ret;
688
689 while (len) {
690 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
691 iomap_dirty_actor);
692 if (ret <= 0)
693 return ret;
694 pos += ret;
695 len -= ret;
696 }
697
698 return 0;
699}
700EXPORT_SYMBOL_GPL(iomap_file_dirty);
701
Christoph Hellwigae259a92016-06-21 09:23:11 +1000702static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
703 unsigned bytes, struct iomap *iomap)
704{
705 struct page *page;
706 int status;
707
Tetsuo Handac718a972017-05-08 15:58:59 -0700708 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
709 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000710 if (status)
711 return status;
712
713 zero_user(page, offset, bytes);
714 mark_page_accessed(page);
715
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700716 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000717}
718
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000719static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
720 struct iomap *iomap)
721{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700722 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
723 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000724}
725
Christoph Hellwigae259a92016-06-21 09:23:11 +1000726static loff_t
727iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
728 void *data, struct iomap *iomap)
729{
730 bool *did_zero = data;
731 loff_t written = 0;
732 int status;
733
734 /* already zeroed? we're done. */
735 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
736 return count;
737
738 do {
739 unsigned offset, bytes;
740
741 offset = pos & (PAGE_SIZE - 1); /* Within page */
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700742 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000743
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000744 if (IS_DAX(inode))
745 status = iomap_dax_zero(pos, offset, bytes, iomap);
746 else
747 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000748 if (status < 0)
749 return status;
750
751 pos += bytes;
752 count -= bytes;
753 written += bytes;
754 if (did_zero)
755 *did_zero = true;
756 } while (count > 0);
757
758 return written;
759}
760
761int
762iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800763 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000764{
765 loff_t ret;
766
767 while (len > 0) {
768 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
769 ops, did_zero, iomap_zero_range_actor);
770 if (ret <= 0)
771 return ret;
772
773 pos += ret;
774 len -= ret;
775 }
776
777 return 0;
778}
779EXPORT_SYMBOL_GPL(iomap_zero_range);
780
781int
782iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800783 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000784{
Fabian Frederick93407472017-02-27 14:28:32 -0800785 unsigned int blocksize = i_blocksize(inode);
786 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000787
788 /* Block boundary? Nothing to do */
789 if (!off)
790 return 0;
791 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
792}
793EXPORT_SYMBOL_GPL(iomap_truncate_page);
794
795static loff_t
796iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
797 void *data, struct iomap *iomap)
798{
799 struct page *page = data;
800 int ret;
801
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700802 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
803 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
804 if (ret)
805 return ret;
806 block_commit_write(page, 0, length);
807 } else {
808 WARN_ON_ONCE(!PageUptodate(page));
809 WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE);
810 }
Christoph Hellwigae259a92016-06-21 09:23:11 +1000811
Christoph Hellwigae259a92016-06-21 09:23:11 +1000812 return length;
813}
814
Dave Jiang11bac802017-02-24 14:56:41 -0800815int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000816{
817 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800818 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000819 unsigned long length;
820 loff_t offset, size;
821 ssize_t ret;
822
823 lock_page(page);
824 size = i_size_read(inode);
825 if ((page->mapping != inode->i_mapping) ||
826 (page_offset(page) > size)) {
827 /* We overload EFAULT to mean page got truncated */
828 ret = -EFAULT;
829 goto out_unlock;
830 }
831
832 /* page is wholly or partially inside EOF */
833 if (((page->index + 1) << PAGE_SHIFT) > size)
834 length = size & ~PAGE_MASK;
835 else
836 length = PAGE_SIZE;
837
838 offset = page_offset(page);
839 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +1100840 ret = iomap_apply(inode, offset, length,
841 IOMAP_WRITE | IOMAP_FAULT, ops, page,
842 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000843 if (unlikely(ret <= 0))
844 goto out_unlock;
845 offset += ret;
846 length -= ret;
847 }
848
849 set_page_dirty(page);
850 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700851 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000852out_unlock:
853 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700854 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000855}
856EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000857
858struct fiemap_ctx {
859 struct fiemap_extent_info *fi;
860 struct iomap prev;
861};
862
863static int iomap_to_fiemap(struct fiemap_extent_info *fi,
864 struct iomap *iomap, u32 flags)
865{
866 switch (iomap->type) {
867 case IOMAP_HOLE:
868 /* skip holes */
869 return 0;
870 case IOMAP_DELALLOC:
871 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
872 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700873 case IOMAP_MAPPED:
874 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000875 case IOMAP_UNWRITTEN:
876 flags |= FIEMAP_EXTENT_UNWRITTEN;
877 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700878 case IOMAP_INLINE:
879 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000880 break;
881 }
882
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000883 if (iomap->flags & IOMAP_F_MERGED)
884 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +1000885 if (iomap->flags & IOMAP_F_SHARED)
886 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000887
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000888 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400889 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000890 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000891}
892
893static loff_t
894iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
895 struct iomap *iomap)
896{
897 struct fiemap_ctx *ctx = data;
898 loff_t ret = length;
899
900 if (iomap->type == IOMAP_HOLE)
901 return length;
902
903 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
904 ctx->prev = *iomap;
905 switch (ret) {
906 case 0: /* success */
907 return length;
908 case 1: /* extent array full */
909 return 0;
910 default:
911 return ret;
912 }
913}
914
915int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800916 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000917{
918 struct fiemap_ctx ctx;
919 loff_t ret;
920
921 memset(&ctx, 0, sizeof(ctx));
922 ctx.fi = fi;
923 ctx.prev.type = IOMAP_HOLE;
924
925 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
926 if (ret)
927 return ret;
928
Dave Chinner8896b8f2016-08-17 08:41:10 +1000929 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
930 ret = filemap_write_and_wait(inode->i_mapping);
931 if (ret)
932 return ret;
933 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000934
935 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100936 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000937 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +1000938 /* inode with no (attribute) mapping will give ENOENT */
939 if (ret == -ENOENT)
940 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000941 if (ret < 0)
942 return ret;
943 if (ret == 0)
944 break;
945
946 start += ret;
947 len -= ret;
948 }
949
950 if (ctx.prev.type != IOMAP_HOLE) {
951 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
952 if (ret < 0)
953 return ret;
954 }
955
956 return 0;
957}
958EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100959
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700960/*
961 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700962 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700963 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700964static bool
965page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
966 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700967{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700968 const struct address_space_operations *ops = inode->i_mapping->a_ops;
969 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700970 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700971 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700972
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700973 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
974 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700975
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700976 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700977 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700978 * Last offset smaller than the start of the page means we found
979 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700980 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700981 if (whence == SEEK_HOLE)
982 return true;
983 *lastoff = poff;
984 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700985
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700986 /*
987 * Just check the page unless we can and should check block ranges:
988 */
989 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
990 return PageUptodate(page) == seek_data;
991
992 lock_page(page);
993 if (unlikely(page->mapping != inode->i_mapping))
994 goto out_unlock_not_found;
995
996 for (off = 0; off < PAGE_SIZE; off += bsize) {
997 if ((*lastoff & ~PAGE_MASK) >= off + bsize)
998 continue;
999 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1000 unlock_page(page);
1001 return true;
1002 }
1003 *lastoff = poff + off + bsize;
1004 }
1005
1006out_unlock_not_found:
1007 unlock_page(page);
1008 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001009}
1010
1011/*
1012 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1013 *
1014 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -07001015 * and which are data: uptodate buffer heads count as data; everything else
1016 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001017 *
1018 * Returns the resulting offset on successs, and -ENOENT otherwise.
1019 */
1020static loff_t
1021page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1022 int whence)
1023{
1024 pgoff_t index = offset >> PAGE_SHIFT;
1025 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1026 loff_t lastoff = offset;
1027 struct pagevec pvec;
1028
1029 if (length <= 0)
1030 return -ENOENT;
1031
1032 pagevec_init(&pvec);
1033
1034 do {
1035 unsigned nr_pages, i;
1036
1037 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1038 end - 1);
1039 if (nr_pages == 0)
1040 break;
1041
1042 for (i = 0; i < nr_pages; i++) {
1043 struct page *page = pvec.pages[i];
1044
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001045 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001046 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001047 lastoff = page_offset(page) + PAGE_SIZE;
1048 }
1049 pagevec_release(&pvec);
1050 } while (index < end);
1051
1052 /* When no page at lastoff and we are not done, we found a hole. */
1053 if (whence != SEEK_HOLE)
1054 goto not_found;
1055
1056check_range:
1057 if (lastoff < offset + length)
1058 goto out;
1059not_found:
1060 lastoff = -ENOENT;
1061out:
1062 pagevec_release(&pvec);
1063 return lastoff;
1064}
1065
1066
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001067static loff_t
1068iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1069 void *data, struct iomap *iomap)
1070{
1071 switch (iomap->type) {
1072 case IOMAP_UNWRITTEN:
1073 offset = page_cache_seek_hole_data(inode, offset, length,
1074 SEEK_HOLE);
1075 if (offset < 0)
1076 return length;
1077 /* fall through */
1078 case IOMAP_HOLE:
1079 *(loff_t *)data = offset;
1080 return 0;
1081 default:
1082 return length;
1083 }
1084}
1085
1086loff_t
1087iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1088{
1089 loff_t size = i_size_read(inode);
1090 loff_t length = size - offset;
1091 loff_t ret;
1092
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001093 /* Nothing to be found before or beyond the end of the file. */
1094 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001095 return -ENXIO;
1096
1097 while (length > 0) {
1098 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1099 &offset, iomap_seek_hole_actor);
1100 if (ret < 0)
1101 return ret;
1102 if (ret == 0)
1103 break;
1104
1105 offset += ret;
1106 length -= ret;
1107 }
1108
1109 return offset;
1110}
1111EXPORT_SYMBOL_GPL(iomap_seek_hole);
1112
1113static loff_t
1114iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1115 void *data, struct iomap *iomap)
1116{
1117 switch (iomap->type) {
1118 case IOMAP_HOLE:
1119 return length;
1120 case IOMAP_UNWRITTEN:
1121 offset = page_cache_seek_hole_data(inode, offset, length,
1122 SEEK_DATA);
1123 if (offset < 0)
1124 return length;
1125 /*FALLTHRU*/
1126 default:
1127 *(loff_t *)data = offset;
1128 return 0;
1129 }
1130}
1131
1132loff_t
1133iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1134{
1135 loff_t size = i_size_read(inode);
1136 loff_t length = size - offset;
1137 loff_t ret;
1138
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001139 /* Nothing to be found before or beyond the end of the file. */
1140 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001141 return -ENXIO;
1142
1143 while (length > 0) {
1144 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1145 &offset, iomap_seek_data_actor);
1146 if (ret < 0)
1147 return ret;
1148 if (ret == 0)
1149 break;
1150
1151 offset += ret;
1152 length -= ret;
1153 }
1154
1155 if (length <= 0)
1156 return -ENXIO;
1157 return offset;
1158}
1159EXPORT_SYMBOL_GPL(iomap_seek_data);
1160
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001161/*
1162 * Private flags for iomap_dio, must not overlap with the public ones in
1163 * iomap.h:
1164 */
Dave Chinner3460cac2018-05-02 12:54:53 -07001165#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -07001166#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001167#define IOMAP_DIO_WRITE (1 << 30)
1168#define IOMAP_DIO_DIRTY (1 << 31)
1169
1170struct iomap_dio {
1171 struct kiocb *iocb;
1172 iomap_dio_end_io_t *end_io;
1173 loff_t i_size;
1174 loff_t size;
1175 atomic_t ref;
1176 unsigned flags;
1177 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001178 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001179
1180 union {
1181 /* used during submission and for synchronous completion: */
1182 struct {
1183 struct iov_iter *iter;
1184 struct task_struct *waiter;
1185 struct request_queue *last_queue;
1186 blk_qc_t cookie;
1187 } submit;
1188
1189 /* used for aio completion: */
1190 struct {
1191 struct work_struct work;
1192 } aio;
1193 };
1194};
1195
1196static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1197{
1198 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -06001199 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001200 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001201 ssize_t ret;
1202
1203 if (dio->end_io) {
1204 ret = dio->end_io(iocb,
1205 dio->error ? dio->error : dio->size,
1206 dio->flags);
1207 } else {
1208 ret = dio->error;
1209 }
1210
1211 if (likely(!ret)) {
1212 ret = dio->size;
1213 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -07001214 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001215 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -07001216 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001217 iocb->ki_pos += ret;
1218 }
1219
Eryu Guan5e25c262017-10-13 09:47:46 -07001220 /*
1221 * Try again to invalidate clean pages which might have been cached by
1222 * non-direct readahead, or faulted in by get_user_pages() if the source
1223 * of the write was an mmap'ed region of the file we're writing. Either
1224 * one is a pretty crazy thing to do, so we don't support it 100%. If
1225 * this invalidation fails, tough, the write still worked...
1226 *
1227 * And this page cache invalidation has to be after dio->end_io(), as
1228 * some filesystems convert unwritten extents to real allocations in
1229 * end_io() when necessary, otherwise a racing buffer read would cache
1230 * zeros from unwritten extents.
1231 */
1232 if (!dio->error &&
1233 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1234 int err;
1235 err = invalidate_inode_pages2_range(inode->i_mapping,
1236 offset >> PAGE_SHIFT,
1237 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001238 if (err)
1239 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001240 }
1241
Dave Chinner4f8ff442018-05-02 12:54:52 -07001242 /*
1243 * If this is a DSYNC write, make sure we push it to stable storage now
1244 * that we've written data.
1245 */
1246 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1247 ret = generic_write_sync(iocb, ret);
1248
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001249 inode_dio_end(file_inode(iocb->ki_filp));
1250 kfree(dio);
1251
1252 return ret;
1253}
1254
1255static void iomap_dio_complete_work(struct work_struct *work)
1256{
1257 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1258 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001259
Dave Chinner4f8ff442018-05-02 12:54:52 -07001260 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001261}
1262
1263/*
1264 * Set an error in the dio if none is set yet. We have to use cmpxchg
1265 * as the submission context and the completion context(s) can race to
1266 * update the error.
1267 */
1268static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1269{
1270 cmpxchg(&dio->error, 0, ret);
1271}
1272
1273static void iomap_dio_bio_end_io(struct bio *bio)
1274{
1275 struct iomap_dio *dio = bio->bi_private;
1276 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1277
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001278 if (bio->bi_status)
1279 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001280
1281 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001282 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001283 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001284 WRITE_ONCE(dio->submit.waiter, NULL);
1285 wake_up_process(waiter);
1286 } else if (dio->flags & IOMAP_DIO_WRITE) {
1287 struct inode *inode = file_inode(dio->iocb->ki_filp);
1288
1289 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1290 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1291 } else {
1292 iomap_dio_complete_work(&dio->aio.work);
1293 }
1294 }
1295
1296 if (should_dirty) {
1297 bio_check_pages_dirty(bio);
1298 } else {
1299 struct bio_vec *bvec;
1300 int i;
1301
1302 bio_for_each_segment_all(bvec, bio, i)
1303 put_page(bvec->bv_page);
1304 bio_put(bio);
1305 }
1306}
1307
1308static blk_qc_t
1309iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1310 unsigned len)
1311{
1312 struct page *page = ZERO_PAGE(0);
1313 struct bio *bio;
1314
1315 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001316 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001317 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001318 bio->bi_private = dio;
1319 bio->bi_end_io = iomap_dio_bio_end_io;
1320
1321 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001322 __bio_add_page(bio, page, len, 0);
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001323 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001324
1325 atomic_inc(&dio->ref);
1326 return submit_bio(bio);
1327}
1328
1329static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001330iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1331 struct iomap_dio *dio, struct iomap *iomap)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001332{
Fabian Frederick93407472017-02-27 14:28:32 -08001333 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1334 unsigned int fs_block_size = i_blocksize(inode), pad;
1335 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001336 struct iov_iter iter;
1337 struct bio *bio;
1338 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001339 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001340 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +01001341 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001342
1343 if ((pos | length | align) & ((1 << blkbits) - 1))
1344 return -EINVAL;
1345
Christoph Hellwig09230432018-07-03 09:07:46 -07001346 if (iomap->type == IOMAP_UNWRITTEN) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001347 dio->flags |= IOMAP_DIO_UNWRITTEN;
1348 need_zeroout = true;
Christoph Hellwig09230432018-07-03 09:07:46 -07001349 }
1350
1351 if (iomap->flags & IOMAP_F_SHARED)
1352 dio->flags |= IOMAP_DIO_COW;
1353
1354 if (iomap->flags & IOMAP_F_NEW) {
1355 need_zeroout = true;
1356 } else {
1357 /*
1358 * Use a FUA write if we need datasync semantics, this
1359 * is a pure data IO that doesn't require any metadata
1360 * updates and the underlying device supports FUA. This
1361 * allows us to avoid cache flushes on IO completion.
1362 */
1363 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1364 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1365 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1366 use_fua = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001367 }
1368
1369 /*
1370 * Operate on a partial iter trimmed to the extent we were called for.
1371 * We'll update the iter in the dio once we're done with this extent.
1372 */
1373 iter = *dio->submit.iter;
1374 iov_iter_truncate(&iter, length);
1375
1376 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1377 if (nr_pages <= 0)
1378 return nr_pages;
1379
1380 if (need_zeroout) {
1381 /* zero out from the start of the block to the write offset */
1382 pad = pos & (fs_block_size - 1);
1383 if (pad)
1384 iomap_dio_zero(dio, iomap, pos - pad, pad);
1385 }
1386
1387 do {
Al Virocfe057f2017-09-11 21:17:09 +01001388 size_t n;
1389 if (dio->error) {
1390 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001391 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001392 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001393
1394 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001395 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001396 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001397 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001398 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001399 bio->bi_private = dio;
1400 bio->bi_end_io = iomap_dio_bio_end_io;
1401
1402 ret = bio_iov_iter_get_pages(bio, &iter);
1403 if (unlikely(ret)) {
1404 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +01001405 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001406 }
1407
Al Virocfe057f2017-09-11 21:17:09 +01001408 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001409 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001410 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1411 if (use_fua)
1412 bio->bi_opf |= REQ_FUA;
1413 else
1414 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001415 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001416 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001417 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001418 if (dio->flags & IOMAP_DIO_DIRTY)
1419 bio_set_pages_dirty(bio);
1420 }
1421
Al Virocfe057f2017-09-11 21:17:09 +01001422 iov_iter_advance(dio->submit.iter, n);
1423
1424 dio->size += n;
1425 pos += n;
1426 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001427
1428 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1429
1430 atomic_inc(&dio->ref);
1431
1432 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1433 dio->submit.cookie = submit_bio(bio);
1434 } while (nr_pages);
1435
1436 if (need_zeroout) {
1437 /* zero out from the end of the write to the end of the block */
1438 pad = pos & (fs_block_size - 1);
1439 if (pad)
1440 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1441 }
Al Virocfe057f2017-09-11 21:17:09 +01001442 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001443}
1444
Christoph Hellwig09230432018-07-03 09:07:46 -07001445static loff_t
1446iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1447{
1448 length = iov_iter_zero(length, dio->submit.iter);
1449 dio->size += length;
1450 return length;
1451}
1452
1453static loff_t
1454iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1455 void *data, struct iomap *iomap)
1456{
1457 struct iomap_dio *dio = data;
1458
1459 switch (iomap->type) {
1460 case IOMAP_HOLE:
1461 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1462 return -EIO;
1463 return iomap_dio_hole_actor(length, dio);
1464 case IOMAP_UNWRITTEN:
1465 if (!(dio->flags & IOMAP_DIO_WRITE))
1466 return iomap_dio_hole_actor(length, dio);
1467 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1468 case IOMAP_MAPPED:
1469 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1470 default:
1471 WARN_ON_ONCE(1);
1472 return -EIO;
1473 }
1474}
1475
Dave Chinner4f8ff442018-05-02 12:54:52 -07001476/*
1477 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001478 * is being issued as AIO or not. This allows us to optimise pure data writes
1479 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1480 * REQ_FLUSH post write. This is slightly tricky because a single request here
1481 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1482 * may be pure data writes. In that case, we still need to do a full data sync
1483 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001484 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001485ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001486iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1487 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001488{
1489 struct address_space *mapping = iocb->ki_filp->f_mapping;
1490 struct inode *inode = file_inode(iocb->ki_filp);
1491 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001492 loff_t pos = iocb->ki_pos, start = pos;
1493 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001494 unsigned int flags = IOMAP_DIRECT;
1495 struct blk_plug plug;
1496 struct iomap_dio *dio;
1497
1498 lockdep_assert_held(&inode->i_rwsem);
1499
1500 if (!count)
1501 return 0;
1502
1503 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1504 if (!dio)
1505 return -ENOMEM;
1506
1507 dio->iocb = iocb;
1508 atomic_set(&dio->ref, 1);
1509 dio->size = 0;
1510 dio->i_size = i_size_read(inode);
1511 dio->end_io = end_io;
1512 dio->error = 0;
1513 dio->flags = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001514 dio->wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001515
1516 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001517 dio->submit.waiter = current;
1518 dio->submit.cookie = BLK_QC_T_NONE;
1519 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001520
1521 if (iov_iter_rw(iter) == READ) {
1522 if (pos >= dio->i_size)
1523 goto out_free_dio;
1524
1525 if (iter->type == ITER_IOVEC)
1526 dio->flags |= IOMAP_DIO_DIRTY;
1527 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001528 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001529 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001530
1531 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001532 if (iocb->ki_flags & IOCB_DSYNC)
1533 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001534
1535 /*
1536 * For datasync only writes, we optimistically try using FUA for
1537 * this IO. Any non-FUA write that occurs will clear this flag,
1538 * hence we know before completion whether a cache flush is
1539 * necessary.
1540 */
1541 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1542 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001543 }
1544
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001545 if (iocb->ki_flags & IOCB_NOWAIT) {
1546 if (filemap_range_has_page(mapping, start, end)) {
1547 ret = -EAGAIN;
1548 goto out_free_dio;
1549 }
1550 flags |= IOMAP_NOWAIT;
1551 }
1552
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001553 ret = filemap_write_and_wait_range(mapping, start, end);
1554 if (ret)
1555 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001556
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001557 /*
1558 * Try to invalidate cache pages for the range we're direct
1559 * writing. If this invalidation fails, tough, the write will
1560 * still work, but racing two incompatible write paths is a
1561 * pretty crazy thing to do, so we don't support it 100%.
1562 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001563 ret = invalidate_inode_pages2_range(mapping,
1564 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001565 if (ret)
1566 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001567 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001568
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001569 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001570 !inode->i_sb->s_dio_done_wq) {
1571 ret = sb_init_dio_done_wq(inode->i_sb);
1572 if (ret < 0)
1573 goto out_free_dio;
1574 }
1575
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001576 inode_dio_begin(inode);
1577
1578 blk_start_plug(&plug);
1579 do {
1580 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1581 iomap_dio_actor);
1582 if (ret <= 0) {
1583 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001584 if (ret == -ENOTBLK) {
1585 dio->wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001586 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001587 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001588 break;
1589 }
1590 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001591
1592 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1593 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001594 } while ((count = iov_iter_count(iter)) > 0);
1595 blk_finish_plug(&plug);
1596
1597 if (ret < 0)
1598 iomap_dio_set_error(dio, ret);
1599
Dave Chinner3460cac2018-05-02 12:54:53 -07001600 /*
1601 * If all the writes we issued were FUA, we don't need to flush the
1602 * cache on IO completion. Clear the sync flag for this case.
1603 */
1604 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1605 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1606
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001607 if (!atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001608 if (!dio->wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001609 return -EIOCBQUEUED;
1610
1611 for (;;) {
1612 set_current_state(TASK_UNINTERRUPTIBLE);
1613 if (!READ_ONCE(dio->submit.waiter))
1614 break;
1615
1616 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1617 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001618 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001619 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001620 io_schedule();
1621 }
1622 __set_current_state(TASK_RUNNING);
1623 }
1624
Eryu Guanc771c142017-03-02 15:02:06 -08001625 ret = iomap_dio_complete(dio);
1626
Eryu Guanc771c142017-03-02 15:02:06 -08001627 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001628
1629out_free_dio:
1630 kfree(dio);
1631 return ret;
1632}
1633EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001634
1635/* Swapfile activation */
1636
1637#ifdef CONFIG_SWAP
1638struct iomap_swapfile_info {
1639 struct iomap iomap; /* accumulated iomap */
1640 struct swap_info_struct *sis;
1641 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1642 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1643 unsigned long nr_pages; /* number of pages collected */
1644 int nr_extents; /* extent count */
1645};
1646
1647/*
1648 * Collect physical extents for this swap file. Physical extents reported to
1649 * the swap code must be trimmed to align to a page boundary. The logical
1650 * offset within the file is irrelevant since the swapfile code maps logical
1651 * page numbers of the swap device to the physical page-aligned extents.
1652 */
1653static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1654{
1655 struct iomap *iomap = &isi->iomap;
1656 unsigned long nr_pages;
1657 uint64_t first_ppage;
1658 uint64_t first_ppage_reported;
1659 uint64_t next_ppage;
1660 int error;
1661
1662 /*
1663 * Round the start up and the end down so that the physical
1664 * extent aligns to a page boundary.
1665 */
1666 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1667 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1668 PAGE_SHIFT;
1669
1670 /* Skip too-short physical extents. */
1671 if (first_ppage >= next_ppage)
1672 return 0;
1673 nr_pages = next_ppage - first_ppage;
1674
1675 /*
1676 * Calculate how much swap space we're adding; the first page contains
1677 * the swap header and doesn't count. The mm still wants that first
1678 * page fed to add_swap_extent, however.
1679 */
1680 first_ppage_reported = first_ppage;
1681 if (iomap->offset == 0)
1682 first_ppage_reported++;
1683 if (isi->lowest_ppage > first_ppage_reported)
1684 isi->lowest_ppage = first_ppage_reported;
1685 if (isi->highest_ppage < (next_ppage - 1))
1686 isi->highest_ppage = next_ppage - 1;
1687
1688 /* Add extent, set up for the next call. */
1689 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1690 if (error < 0)
1691 return error;
1692 isi->nr_extents += error;
1693 isi->nr_pages += nr_pages;
1694 return 0;
1695}
1696
1697/*
1698 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1699 * swap only cares about contiguous page-aligned physical extents and makes no
1700 * distinction between written and unwritten extents.
1701 */
1702static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1703 loff_t count, void *data, struct iomap *iomap)
1704{
1705 struct iomap_swapfile_info *isi = data;
1706 int error;
1707
Christoph Hellwig19319b52018-06-01 09:03:06 -07001708 switch (iomap->type) {
1709 case IOMAP_MAPPED:
1710 case IOMAP_UNWRITTEN:
1711 /* Only real or unwritten extents. */
1712 break;
1713 case IOMAP_INLINE:
1714 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07001715 pr_err("swapon: file is inline\n");
1716 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001717 default:
Omar Sandovalec601922018-05-16 11:13:34 -07001718 pr_err("swapon: file has unallocated extents\n");
1719 return -EINVAL;
1720 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001721
Omar Sandovalec601922018-05-16 11:13:34 -07001722 /* No uncommitted metadata or shared blocks. */
1723 if (iomap->flags & IOMAP_F_DIRTY) {
1724 pr_err("swapon: file is not committed\n");
1725 return -EINVAL;
1726 }
1727 if (iomap->flags & IOMAP_F_SHARED) {
1728 pr_err("swapon: file has shared extents\n");
1729 return -EINVAL;
1730 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001731
Omar Sandovalec601922018-05-16 11:13:34 -07001732 /* Only one bdev per swap file. */
1733 if (iomap->bdev != isi->sis->bdev) {
1734 pr_err("swapon: file is on multiple devices\n");
1735 return -EINVAL;
1736 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001737
1738 if (isi->iomap.length == 0) {
1739 /* No accumulated extent, so just store it. */
1740 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1741 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1742 /* Append this to the accumulated extent. */
1743 isi->iomap.length += iomap->length;
1744 } else {
1745 /* Otherwise, add the retained iomap and store this one. */
1746 error = iomap_swapfile_add_extent(isi);
1747 if (error)
1748 return error;
1749 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1750 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001751 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07001752}
1753
1754/*
1755 * Iterate a swap file's iomaps to construct physical extents that can be
1756 * passed to the swapfile subsystem.
1757 */
1758int iomap_swapfile_activate(struct swap_info_struct *sis,
1759 struct file *swap_file, sector_t *pagespan,
1760 const struct iomap_ops *ops)
1761{
1762 struct iomap_swapfile_info isi = {
1763 .sis = sis,
1764 .lowest_ppage = (sector_t)-1ULL,
1765 };
1766 struct address_space *mapping = swap_file->f_mapping;
1767 struct inode *inode = mapping->host;
1768 loff_t pos = 0;
1769 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1770 loff_t ret;
1771
Darrick J. Wong117a1482018-06-05 09:53:05 -07001772 /*
1773 * Persist all file mapping metadata so that we won't have any
1774 * IOMAP_F_DIRTY iomaps.
1775 */
1776 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07001777 if (ret)
1778 return ret;
1779
1780 while (len > 0) {
1781 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1782 ops, &isi, iomap_swapfile_activate_actor);
1783 if (ret <= 0)
1784 return ret;
1785
1786 pos += ret;
1787 len -= ret;
1788 }
1789
1790 if (isi.iomap.length) {
1791 ret = iomap_swapfile_add_extent(&isi);
1792 if (ret)
1793 return ret;
1794 }
1795
1796 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1797 sis->max = isi.nr_pages;
1798 sis->pages = isi.nr_pages - 1;
1799 sis->highest_bit = isi.nr_pages - 1;
1800 return isi.nr_extents;
1801}
1802EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1803#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07001804
1805static loff_t
1806iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
1807 void *data, struct iomap *iomap)
1808{
1809 sector_t *bno = data, addr;
1810
1811 if (iomap->type == IOMAP_MAPPED) {
1812 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
1813 if (addr > INT_MAX)
1814 WARN(1, "would truncate bmap result\n");
1815 else
1816 *bno = addr;
1817 }
1818 return 0;
1819}
1820
1821/* legacy ->bmap interface. 0 is the error return (!) */
1822sector_t
1823iomap_bmap(struct address_space *mapping, sector_t bno,
1824 const struct iomap_ops *ops)
1825{
1826 struct inode *inode = mapping->host;
1827 loff_t pos = bno >> inode->i_blkbits;
1828 unsigned blocksize = i_blocksize(inode);
1829
1830 if (filemap_write_and_wait(mapping))
1831 return 0;
1832
1833 bno = 0;
1834 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
1835 return bno;
1836}
1837EXPORT_SYMBOL_GPL(iomap_bmap);