blob: e0e9358baf3524ba1b3e3cebb23787e43df0d1c6 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060028#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080029#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080034
Dan Williamsb2e0d162016-01-15 16:55:59 -080035static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
36{
37 struct request_queue *q = bdev->bd_queue;
38 long rc = -EIO;
39
40 dax->addr = (void __pmem *) ERR_PTR(-EIO);
41 if (blk_queue_enter(q, true) != 0)
42 return rc;
43
44 rc = bdev_direct_access(bdev, dax);
45 if (rc < 0) {
46 dax->addr = (void __pmem *) ERR_PTR(rc);
47 blk_queue_exit(q);
48 return rc;
49 }
50 return rc;
51}
52
53static void dax_unmap_atomic(struct block_device *bdev,
54 const struct blk_dax_ctl *dax)
55{
56 if (IS_ERR(dax->addr))
57 return;
58 blk_queue_exit(bdev->bd_queue);
59}
60
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080061struct page *read_dax_sector(struct block_device *bdev, sector_t n)
62{
63 struct page *page = alloc_pages(GFP_KERNEL, 0);
64 struct blk_dax_ctl dax = {
65 .size = PAGE_SIZE,
66 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
67 };
68 long rc;
69
70 if (!page)
71 return ERR_PTR(-ENOMEM);
72
73 rc = dax_map_atomic(bdev, &dax);
74 if (rc < 0)
75 return ERR_PTR(rc);
76 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
77 dax_unmap_atomic(bdev, &dax);
78 return page;
79}
80
Dave Chinner1ca19152015-11-03 12:37:00 +110081/*
82 * dax_clear_blocks() is called from within transaction context from XFS,
83 * and hence this means the stack from this point must follow GFP_NOFS
84 * semantics for all operations.
85 */
Dan Williamsb2e0d162016-01-15 16:55:59 -080086int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080087{
88 struct block_device *bdev = inode->i_sb->s_bdev;
Dan Williamsb2e0d162016-01-15 16:55:59 -080089 struct blk_dax_ctl dax = {
90 .sector = block << (inode->i_blkbits - 9),
91 .size = _size,
92 };
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080093
94 might_sleep();
95 do {
Dan Williams0e749e52016-01-15 16:55:53 -080096 long count, sz;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080097
Dan Williamsb2e0d162016-01-15 16:55:59 -080098 count = dax_map_atomic(bdev, &dax);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080099 if (count < 0)
100 return count;
Dan Williams0e749e52016-01-15 16:55:53 -0800101 sz = min_t(long, count, SZ_128K);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800102 clear_pmem(dax.addr, sz);
103 dax.size -= sz;
104 dax.sector += sz / 512;
105 dax_unmap_atomic(bdev, &dax);
Dan Williams0e749e52016-01-15 16:55:53 -0800106 cond_resched();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800107 } while (dax.size);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800108
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600109 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800110 return 0;
111}
112EXPORT_SYMBOL_GPL(dax_clear_blocks);
113
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600114/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -0600115static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
116 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800117{
118 loff_t final = end - pos + first; /* The final byte of the buffer */
119
120 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -0600121 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800122 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -0600123 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800124}
125
126static bool buffer_written(struct buffer_head *bh)
127{
128 return buffer_mapped(bh) && !buffer_unwritten(bh);
129}
130
131/*
132 * When ext4 encounters a hole, it returns without modifying the buffer_head
133 * which means that we can't trust b_size. To cope with this, we set b_state
134 * to 0 before calling get_block and, if any bit is set, we know we can trust
135 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
136 * and would save us time calling get_block repeatedly.
137 */
138static bool buffer_size_valid(struct buffer_head *bh)
139{
140 return bh->b_state != 0;
141}
142
Dan Williamsb2e0d162016-01-15 16:55:59 -0800143
144static sector_t to_sector(const struct buffer_head *bh,
145 const struct inode *inode)
146{
147 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
148
149 return sector;
150}
151
Omar Sandovala95cd632015-03-16 04:33:51 -0700152static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
153 loff_t start, loff_t end, get_block_t get_block,
154 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800155{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800156 loff_t pos = start, max = start, bh_max = start;
157 bool hole = false, need_wmb = false;
158 struct block_device *bdev = NULL;
159 int rw = iov_iter_rw(iter), rc;
160 long map_len = 0;
161 struct blk_dax_ctl dax = {
162 .addr = (void __pmem *) ERR_PTR(-EIO),
163 };
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800164
Dan Williamsb2e0d162016-01-15 16:55:59 -0800165 if (rw == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800166 end = min(end, i_size_read(inode));
167
168 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600169 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800170 if (pos == max) {
171 unsigned blkbits = inode->i_blkbits;
Jeff Moyere94f5a22015-08-14 16:15:31 -0400172 long page = pos >> PAGE_SHIFT;
173 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800174 unsigned first = pos - (block << blkbits);
175 long size;
176
177 if (pos == bh_max) {
178 bh->b_size = PAGE_ALIGN(end - pos);
179 bh->b_state = 0;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800180 rc = get_block(inode, block, bh, rw == WRITE);
181 if (rc)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800182 break;
183 if (!buffer_size_valid(bh))
184 bh->b_size = 1 << blkbits;
185 bh_max = pos - first + bh->b_size;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800186 bdev = bh->b_bdev;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800187 } else {
188 unsigned done = bh->b_size -
189 (bh_max - (pos - first));
190 bh->b_blocknr += done >> blkbits;
191 bh->b_size -= done;
192 }
193
Dan Williamsb2e0d162016-01-15 16:55:59 -0800194 hole = rw == READ && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800195 if (hole) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800196 size = bh->b_size - first;
197 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800198 dax_unmap_atomic(bdev, &dax);
199 dax.sector = to_sector(bh, inode);
200 dax.size = bh->b_size;
201 map_len = dax_map_atomic(bdev, &dax);
202 if (map_len < 0) {
203 rc = map_len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800204 break;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800205 }
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600206 if (buffer_unwritten(bh) || buffer_new(bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800207 dax_new_buf(dax.addr, map_len, first,
208 pos, end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600209 need_wmb = true;
210 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800211 dax.addr += first;
212 size = map_len - first;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800213 }
214 max = min(pos + size, end);
215 }
216
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600217 if (iov_iter_rw(iter) == WRITE) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800218 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600219 need_wmb = true;
220 } else if (!hole)
Dan Williamsb2e0d162016-01-15 16:55:59 -0800221 len = copy_to_iter((void __force *) dax.addr, max - pos,
Ross Zwislere2e05392015-08-18 13:55:41 -0600222 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800223 else
224 len = iov_iter_zero(max - pos, iter);
225
Al Virocadfbb62015-11-10 19:42:49 -0700226 if (!len) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800227 rc = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800228 break;
Al Virocadfbb62015-11-10 19:42:49 -0700229 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800230
231 pos += len;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800232 if (!IS_ERR(dax.addr))
233 dax.addr += len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800234 }
235
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600236 if (need_wmb)
237 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800238 dax_unmap_atomic(bdev, &dax);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600239
Dan Williamsb2e0d162016-01-15 16:55:59 -0800240 return (pos == start) ? rc : pos - start;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800241}
242
243/**
244 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800245 * @iocb: The control block for this I/O
246 * @inode: The file which the I/O is directed at
247 * @iter: The addresses to do I/O from or to
248 * @pos: The file offset where the I/O starts
249 * @get_block: The filesystem method used to translate file offsets to blocks
250 * @end_io: A filesystem callback for I/O completion
251 * @flags: See below
252 *
253 * This function uses the same locking scheme as do_blockdev_direct_IO:
254 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
255 * caller for writes. For reads, we take and release the i_mutex ourselves.
256 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
257 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
258 * is in progress.
259 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700260ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
261 struct iov_iter *iter, loff_t pos, get_block_t get_block,
262 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800263{
264 struct buffer_head bh;
265 ssize_t retval = -EINVAL;
266 loff_t end = pos + iov_iter_count(iter);
267
268 memset(&bh, 0, sizeof(bh));
Ross Zwislereab95db2016-01-22 15:10:59 -0800269 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800270
Omar Sandovala95cd632015-03-16 04:33:51 -0700271 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800272 struct address_space *mapping = inode->i_mapping;
Al Viro59551022016-01-22 15:40:57 -0500273 inode_lock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800274 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
275 if (retval) {
Al Viro59551022016-01-22 15:40:57 -0500276 inode_unlock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800277 goto out;
278 }
279 }
280
281 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400282 if (!(flags & DIO_SKIP_DIO_COUNT))
283 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800284
Omar Sandovala95cd632015-03-16 04:33:51 -0700285 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800286
Omar Sandovala95cd632015-03-16 04:33:51 -0700287 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Al Viro59551022016-01-22 15:40:57 -0500288 inode_unlock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800289
290 if ((retval > 0) && end_io)
291 end_io(iocb, pos, retval, bh.b_private);
292
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400293 if (!(flags & DIO_SKIP_DIO_COUNT))
294 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800295 out:
296 return retval;
297}
298EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800299
300/*
301 * The user has performed a load from a hole in the file. Allocating
302 * a new page in the file would cause excessive storage usage for
303 * workloads with sparse files. We allocate a page cache page instead.
304 * We'll kick it out of the page cache if it's ever written to,
305 * otherwise it will simply fall out of the page cache under memory
306 * pressure without ever having been dirtied.
307 */
308static int dax_load_hole(struct address_space *mapping, struct page *page,
309 struct vm_fault *vmf)
310{
311 unsigned long size;
312 struct inode *inode = mapping->host;
313 if (!page)
314 page = find_or_create_page(mapping, vmf->pgoff,
315 GFP_KERNEL | __GFP_ZERO);
316 if (!page)
317 return VM_FAULT_OOM;
318 /* Recheck i_size under page lock to avoid truncate race */
319 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
320 if (vmf->pgoff >= size) {
321 unlock_page(page);
322 page_cache_release(page);
323 return VM_FAULT_SIGBUS;
324 }
325
326 vmf->page = page;
327 return VM_FAULT_LOCKED;
328}
329
Dan Williamsb2e0d162016-01-15 16:55:59 -0800330static int copy_user_bh(struct page *to, struct inode *inode,
331 struct buffer_head *bh, unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800332{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800333 struct blk_dax_ctl dax = {
334 .sector = to_sector(bh, inode),
335 .size = bh->b_size,
336 };
337 struct block_device *bdev = bh->b_bdev;
Ross Zwislere2e05392015-08-18 13:55:41 -0600338 void *vto;
339
Dan Williamsb2e0d162016-01-15 16:55:59 -0800340 if (dax_map_atomic(bdev, &dax) < 0)
341 return PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800342 vto = kmap_atomic(to);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800343 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800344 kunmap_atomic(vto);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800345 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800346 return 0;
347}
348
Ross Zwisler9973c982016-01-22 15:10:47 -0800349#define NO_SECTOR -1
350#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
351
352static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
353 sector_t sector, bool pmd_entry, bool dirty)
354{
355 struct radix_tree_root *page_tree = &mapping->page_tree;
356 pgoff_t pmd_index = DAX_PMD_INDEX(index);
357 int type, error = 0;
358 void *entry;
359
360 WARN_ON_ONCE(pmd_entry && !dirty);
361 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
362
363 spin_lock_irq(&mapping->tree_lock);
364
365 entry = radix_tree_lookup(page_tree, pmd_index);
366 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
367 index = pmd_index;
368 goto dirty;
369 }
370
371 entry = radix_tree_lookup(page_tree, index);
372 if (entry) {
373 type = RADIX_DAX_TYPE(entry);
374 if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
375 type != RADIX_DAX_PMD)) {
376 error = -EIO;
377 goto unlock;
378 }
379
380 if (!pmd_entry || type == RADIX_DAX_PMD)
381 goto dirty;
382
383 /*
384 * We only insert dirty PMD entries into the radix tree. This
385 * means we don't need to worry about removing a dirty PTE
386 * entry and inserting a clean PMD entry, thus reducing the
387 * range we would flush with a follow-up fsync/msync call.
388 */
389 radix_tree_delete(&mapping->page_tree, index);
390 mapping->nrexceptional--;
391 }
392
393 if (sector == NO_SECTOR) {
394 /*
395 * This can happen during correct operation if our pfn_mkwrite
396 * fault raced against a hole punch operation. If this
397 * happens the pte that was hole punched will have been
398 * unmapped and the radix tree entry will have been removed by
399 * the time we are called, but the call will still happen. We
400 * will return all the way up to wp_pfn_shared(), where the
401 * pte_same() check will fail, eventually causing page fault
402 * to be retried by the CPU.
403 */
404 goto unlock;
405 }
406
407 error = radix_tree_insert(page_tree, index,
408 RADIX_DAX_ENTRY(sector, pmd_entry));
409 if (error)
410 goto unlock;
411
412 mapping->nrexceptional++;
413 dirty:
414 if (dirty)
415 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
416 unlock:
417 spin_unlock_irq(&mapping->tree_lock);
418 return error;
419}
420
421static int dax_writeback_one(struct block_device *bdev,
422 struct address_space *mapping, pgoff_t index, void *entry)
423{
424 struct radix_tree_root *page_tree = &mapping->page_tree;
425 int type = RADIX_DAX_TYPE(entry);
426 struct radix_tree_node *node;
427 struct blk_dax_ctl dax;
428 void **slot;
429 int ret = 0;
430
431 spin_lock_irq(&mapping->tree_lock);
432 /*
433 * Regular page slots are stabilized by the page lock even
434 * without the tree itself locked. These unlocked entries
435 * need verification under the tree lock.
436 */
437 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
438 goto unlock;
439 if (*slot != entry)
440 goto unlock;
441
442 /* another fsync thread may have already written back this entry */
443 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
444 goto unlock;
445
446 if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
447 ret = -EIO;
448 goto unlock;
449 }
450
451 dax.sector = RADIX_DAX_SECTOR(entry);
452 dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
453 spin_unlock_irq(&mapping->tree_lock);
454
455 /*
456 * We cannot hold tree_lock while calling dax_map_atomic() because it
457 * eventually calls cond_resched().
458 */
459 ret = dax_map_atomic(bdev, &dax);
460 if (ret < 0)
461 return ret;
462
463 if (WARN_ON_ONCE(ret < dax.size)) {
464 ret = -EIO;
465 goto unmap;
466 }
467
468 wb_cache_pmem(dax.addr, dax.size);
469
470 spin_lock_irq(&mapping->tree_lock);
471 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
472 spin_unlock_irq(&mapping->tree_lock);
473 unmap:
474 dax_unmap_atomic(bdev, &dax);
475 return ret;
476
477 unlock:
478 spin_unlock_irq(&mapping->tree_lock);
479 return ret;
480}
481
482/*
483 * Flush the mapping to the persistent domain within the byte range of [start,
484 * end]. This is required by data integrity operations to ensure file data is
485 * on persistent storage prior to completion of the operation.
486 */
487int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
488 loff_t end)
489{
490 struct inode *inode = mapping->host;
491 struct block_device *bdev = inode->i_sb->s_bdev;
492 pgoff_t start_index, end_index, pmd_index;
493 pgoff_t indices[PAGEVEC_SIZE];
494 struct pagevec pvec;
495 bool done = false;
496 int i, ret = 0;
497 void *entry;
498
499 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
500 return -EIO;
501
502 start_index = start >> PAGE_CACHE_SHIFT;
503 end_index = end >> PAGE_CACHE_SHIFT;
504 pmd_index = DAX_PMD_INDEX(start_index);
505
506 rcu_read_lock();
507 entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
508 rcu_read_unlock();
509
510 /* see if the start of our range is covered by a PMD entry */
511 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
512 start_index = pmd_index;
513
514 tag_pages_for_writeback(mapping, start_index, end_index);
515
516 pagevec_init(&pvec, 0);
517 while (!done) {
518 pvec.nr = find_get_entries_tag(mapping, start_index,
519 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
520 pvec.pages, indices);
521
522 if (pvec.nr == 0)
523 break;
524
525 for (i = 0; i < pvec.nr; i++) {
526 if (indices[i] > end_index) {
527 done = true;
528 break;
529 }
530
531 ret = dax_writeback_one(bdev, mapping, indices[i],
532 pvec.pages[i]);
533 if (ret < 0)
534 return ret;
535 }
536 }
537 wmb_pmem();
538 return 0;
539}
540EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
541
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800542static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
543 struct vm_area_struct *vma, struct vm_fault *vmf)
544{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800545 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800546 struct address_space *mapping = inode->i_mapping;
547 struct block_device *bdev = bh->b_bdev;
548 struct blk_dax_ctl dax = {
549 .sector = to_sector(bh, inode),
550 .size = bh->b_size,
551 };
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800552 pgoff_t size;
553 int error;
554
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700555 i_mmap_lock_read(mapping);
556
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800557 /*
558 * Check truncate didn't happen while we were allocating a block.
559 * If it did, this block may or may not be still allocated to the
560 * file. We can't tell the filesystem to free it because we can't
561 * take i_mutex here. In the worst case, the file still has blocks
562 * allocated past the end of the file.
563 */
564 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
565 if (unlikely(vmf->pgoff >= size)) {
566 error = -EIO;
567 goto out;
568 }
569
Dan Williamsb2e0d162016-01-15 16:55:59 -0800570 if (dax_map_atomic(bdev, &dax) < 0) {
571 error = PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800572 goto out;
573 }
574
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600575 if (buffer_unwritten(bh) || buffer_new(bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800576 clear_pmem(dax.addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600577 wmb_pmem();
578 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800579 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800580
Ross Zwisler9973c982016-01-22 15:10:47 -0800581 error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
582 vmf->flags & FAULT_FLAG_WRITE);
583 if (error)
584 goto out;
585
Dan Williams01c8f1c2016-01-15 16:56:40 -0800586 error = vm_insert_mixed(vma, vaddr, dax.pfn);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800587
588 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700589 i_mmap_unlock_read(mapping);
590
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800591 return error;
592}
593
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000594/**
595 * __dax_fault - handle a page fault on a DAX file
596 * @vma: The virtual memory area where the fault occurred
597 * @vmf: The description of the fault
598 * @get_block: The filesystem method used to translate file offsets to blocks
Dave Chinnerb2442c52015-07-29 11:48:00 +1000599 * @complete_unwritten: The filesystem method used to convert unwritten blocks
600 * to written so the data written to them is exposed. This is required for
601 * required by write faults for filesystems that will return unwritten
602 * extent mappings from @get_block, but it is optional for reads as
603 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
604 * not support unwritten extents, the it should pass NULL.
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000605 *
606 * When a page fault occurs, filesystems may call this helper in their
607 * fault handler for DAX files. __dax_fault() assumes the caller has done all
608 * the necessary locking for the page fault to proceed successfully.
609 */
610int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000611 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800612{
613 struct file *file = vma->vm_file;
614 struct address_space *mapping = file->f_mapping;
615 struct inode *inode = mapping->host;
616 struct page *page;
617 struct buffer_head bh;
618 unsigned long vaddr = (unsigned long)vmf->virtual_address;
619 unsigned blkbits = inode->i_blkbits;
620 sector_t block;
621 pgoff_t size;
622 int error;
623 int major = 0;
624
625 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
626 if (vmf->pgoff >= size)
627 return VM_FAULT_SIGBUS;
628
629 memset(&bh, 0, sizeof(bh));
630 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
Ross Zwislereab95db2016-01-22 15:10:59 -0800631 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800632 bh.b_size = PAGE_SIZE;
633
634 repeat:
635 page = find_get_page(mapping, vmf->pgoff);
636 if (page) {
637 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
638 page_cache_release(page);
639 return VM_FAULT_RETRY;
640 }
641 if (unlikely(page->mapping != mapping)) {
642 unlock_page(page);
643 page_cache_release(page);
644 goto repeat;
645 }
646 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
647 if (unlikely(vmf->pgoff >= size)) {
648 /*
649 * We have a struct page covering a hole in the file
650 * from a read fault and we've raced with a truncate
651 */
652 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700653 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800654 }
655 }
656
657 error = get_block(inode, block, &bh, 0);
658 if (!error && (bh.b_size < PAGE_SIZE))
659 error = -EIO; /* fs corruption? */
660 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700661 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800662
663 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
664 if (vmf->flags & FAULT_FLAG_WRITE) {
665 error = get_block(inode, block, &bh, 1);
666 count_vm_event(PGMAJFAULT);
667 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
668 major = VM_FAULT_MAJOR;
669 if (!error && (bh.b_size < PAGE_SIZE))
670 error = -EIO;
671 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700672 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800673 } else {
674 return dax_load_hole(mapping, page, vmf);
675 }
676 }
677
678 if (vmf->cow_page) {
679 struct page *new_page = vmf->cow_page;
680 if (buffer_written(&bh))
Dan Williamsb2e0d162016-01-15 16:55:59 -0800681 error = copy_user_bh(new_page, inode, &bh, vaddr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800682 else
683 clear_user_highpage(new_page, vaddr);
684 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700685 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800686 vmf->page = page;
687 if (!page) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700688 i_mmap_lock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800689 /* Check we didn't race with truncate */
690 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
691 PAGE_SHIFT;
692 if (vmf->pgoff >= size) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700693 i_mmap_unlock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800694 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700695 goto out;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800696 }
697 }
698 return VM_FAULT_LOCKED;
699 }
700
701 /* Check we didn't race with a read fault installing a new page */
702 if (!page && major)
703 page = find_lock_page(mapping, vmf->pgoff);
704
705 if (page) {
706 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
707 PAGE_CACHE_SIZE, 0);
708 delete_from_page_cache(page);
709 unlock_page(page);
710 page_cache_release(page);
Ross Zwisler9973c982016-01-22 15:10:47 -0800711 page = NULL;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800712 }
713
Dave Chinnere842f292015-06-04 09:18:18 +1000714 /*
715 * If we successfully insert the new mapping over an unwritten extent,
716 * we need to ensure we convert the unwritten extent. If there is an
717 * error inserting the mapping, the filesystem needs to leave it as
718 * unwritten to prevent exposure of the stale underlying data to
719 * userspace, but we still need to call the completion function so
720 * the private resources on the mapping buffer can be released. We
721 * indicate what the callback should do via the uptodate variable, same
722 * as for normal BH based IO completions.
723 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800724 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnerb2442c52015-07-29 11:48:00 +1000725 if (buffer_unwritten(&bh)) {
726 if (complete_unwritten)
727 complete_unwritten(&bh, !error);
728 else
729 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
730 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800731
732 out:
733 if (error == -ENOMEM)
734 return VM_FAULT_OOM | major;
735 /* -EBUSY is fine, somebody else faulted on the same PTE */
736 if ((error < 0) && (error != -EBUSY))
737 return VM_FAULT_SIGBUS | major;
738 return VM_FAULT_NOPAGE | major;
739
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700740 unlock_page:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800741 if (page) {
742 unlock_page(page);
743 page_cache_release(page);
744 }
745 goto out;
746}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000747EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800748
749/**
750 * dax_fault - handle a page fault on a DAX file
751 * @vma: The virtual memory area where the fault occurred
752 * @vmf: The description of the fault
753 * @get_block: The filesystem method used to translate file offsets to blocks
754 *
755 * When a page fault occurs, filesystems may call this helper in their
756 * fault handler for DAX files.
757 */
758int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000759 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800760{
761 int result;
762 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
763
764 if (vmf->flags & FAULT_FLAG_WRITE) {
765 sb_start_pagefault(sb);
766 file_update_time(vma->vm_file);
767 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000768 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800769 if (vmf->flags & FAULT_FLAG_WRITE)
770 sb_end_pagefault(sb);
771
772 return result;
773}
774EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800775
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700776#ifdef CONFIG_TRANSPARENT_HUGEPAGE
777/*
778 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
779 * more often than one might expect in the below function.
780 */
781#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
782
Dan Williamscbb38e42016-01-15 16:56:58 -0800783static void __dax_dbg(struct buffer_head *bh, unsigned long address,
784 const char *reason, const char *fn)
785{
786 if (bh) {
787 char bname[BDEVNAME_SIZE];
788 bdevname(bh->b_bdev, bname);
789 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
790 "length %zd fallback: %s\n", fn, current->comm,
791 address, bname, bh->b_state, (u64)bh->b_blocknr,
792 bh->b_size, reason);
793 } else {
794 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
795 current->comm, address, reason);
796 }
797}
798
799#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
800
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700801int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
802 pmd_t *pmd, unsigned int flags, get_block_t get_block,
803 dax_iodone_t complete_unwritten)
804{
805 struct file *file = vma->vm_file;
806 struct address_space *mapping = file->f_mapping;
807 struct inode *inode = mapping->host;
808 struct buffer_head bh;
809 unsigned blkbits = inode->i_blkbits;
810 unsigned long pmd_addr = address & PMD_MASK;
811 bool write = flags & FAULT_FLAG_WRITE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800812 struct block_device *bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700813 pgoff_t size, pgoff;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800814 sector_t block;
Ross Zwisler9973c982016-01-22 15:10:47 -0800815 int error, result = 0;
816 bool alloc = false;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700817
Dan Williamsc046c322016-01-15 16:57:01 -0800818 /* dax pmd mappings require pfn_t_devmap() */
Dan Williamsee82c9e2015-11-15 16:06:32 -0800819 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
820 return VM_FAULT_FALLBACK;
821
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700822 /* Fall back to PTEs if we're going to COW */
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800823 if (write && !(vma->vm_flags & VM_SHARED)) {
824 split_huge_pmd(vma, pmd, address);
Dan Williamscbb38e42016-01-15 16:56:58 -0800825 dax_pmd_dbg(NULL, address, "cow write");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700826 return VM_FAULT_FALLBACK;
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800827 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700828 /* If the PMD would extend outside the VMA */
Dan Williamscbb38e42016-01-15 16:56:58 -0800829 if (pmd_addr < vma->vm_start) {
830 dax_pmd_dbg(NULL, address, "vma start unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700831 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800832 }
833 if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
834 dax_pmd_dbg(NULL, address, "vma end unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700835 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800836 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700837
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700838 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700839 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
840 if (pgoff >= size)
841 return VM_FAULT_SIGBUS;
842 /* If the PMD would cover blocks out of the file */
Dan Williamscbb38e42016-01-15 16:56:58 -0800843 if ((pgoff | PG_PMD_COLOUR) >= size) {
844 dax_pmd_dbg(NULL, address,
845 "offset + huge page size > file size");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700846 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800847 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700848
849 memset(&bh, 0, sizeof(bh));
Ross Zwislerd4bbe702016-01-22 15:10:31 -0800850 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700851 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
852
853 bh.b_size = PMD_SIZE;
Ross Zwisler9973c982016-01-22 15:10:47 -0800854
855 if (get_block(inode, block, &bh, 0) != 0)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700856 return VM_FAULT_SIGBUS;
Ross Zwisler9973c982016-01-22 15:10:47 -0800857
858 if (!buffer_mapped(&bh) && write) {
859 if (get_block(inode, block, &bh, 1) != 0)
860 return VM_FAULT_SIGBUS;
861 alloc = true;
862 }
863
Dan Williamsb2e0d162016-01-15 16:55:59 -0800864 bdev = bh.b_bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700865
866 /*
867 * If the filesystem isn't willing to tell us the length of a hole,
868 * just fall back to PTEs. Calling get_block 512 times in a loop
869 * would be silly.
870 */
Dan Williamscbb38e42016-01-15 16:56:58 -0800871 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
872 dax_pmd_dbg(&bh, address, "allocated block too small");
Ross Zwisler9973c982016-01-22 15:10:47 -0800873 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800874 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700875
Ross Zwisler9973c982016-01-22 15:10:47 -0800876 /*
877 * If we allocated new storage, make sure no process has any
878 * zero pages covering this hole
879 */
880 if (alloc) {
881 loff_t lstart = pgoff << PAGE_SHIFT;
882 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
883
884 truncate_pagecache_range(inode, lstart, lend);
885 }
886
Ross Zwislerde14b9c2016-01-22 15:10:34 -0800887 i_mmap_lock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700888
Matthew Wilcox84c4e5e2015-09-08 14:59:17 -0700889 /*
890 * If a truncate happened while we were allocating blocks, we may
891 * leave blocks allocated to the file that are beyond EOF. We can't
892 * take i_mutex here, so just leave them hanging; they'll be freed
893 * when the file is deleted.
894 */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700895 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
896 if (pgoff >= size) {
897 result = VM_FAULT_SIGBUS;
898 goto out;
899 }
Dan Williamscbb38e42016-01-15 16:56:58 -0800900 if ((pgoff | PG_PMD_COLOUR) >= size) {
Ross Zwislerde14b9c2016-01-22 15:10:34 -0800901 dax_pmd_dbg(&bh, address,
902 "offset + huge page size > file size");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700903 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -0800904 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700905
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700906 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700907 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700908 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700909 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700910
Dan Williamscbb38e42016-01-15 16:56:58 -0800911 if (unlikely(!zero_page)) {
912 dax_pmd_dbg(&bh, address, "no zero page");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700913 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -0800914 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700915
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700916 ptl = pmd_lock(vma->vm_mm, pmd);
917 if (!pmd_none(*pmd)) {
918 spin_unlock(ptl);
Dan Williamscbb38e42016-01-15 16:56:58 -0800919 dax_pmd_dbg(&bh, address, "pmd already present");
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700920 goto fallback;
921 }
922
Dan Williamscbb38e42016-01-15 16:56:58 -0800923 dev_dbg(part_to_dev(bdev->bd_part),
924 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
925 __func__, current->comm, address,
926 (unsigned long long) to_sector(&bh, inode));
927
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700928 entry = mk_pmd(zero_page, vma->vm_page_prot);
929 entry = pmd_mkhuge(entry);
930 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700931 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700932 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700933 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800934 struct blk_dax_ctl dax = {
935 .sector = to_sector(&bh, inode),
936 .size = PMD_SIZE,
937 };
938 long length = dax_map_atomic(bdev, &dax);
939
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700940 if (length < 0) {
941 result = VM_FAULT_SIGBUS;
942 goto out;
943 }
Dan Williamscbb38e42016-01-15 16:56:58 -0800944 if (length < PMD_SIZE) {
945 dax_pmd_dbg(&bh, address, "dax-length too small");
946 dax_unmap_atomic(bdev, &dax);
947 goto fallback;
948 }
949 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
950 dax_pmd_dbg(&bh, address, "pfn unaligned");
Dan Williamsb2e0d162016-01-15 16:55:59 -0800951 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700952 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800953 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700954
Dan Williamsc046c322016-01-15 16:57:01 -0800955 if (!pfn_t_devmap(dax.pfn)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800956 dax_unmap_atomic(bdev, &dax);
Dan Williamscbb38e42016-01-15 16:56:58 -0800957 dax_pmd_dbg(&bh, address, "pfn not in memmap");
Dan Williams152d7bd2015-11-12 18:33:54 -0800958 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800959 }
Dan Williams152d7bd2015-11-12 18:33:54 -0800960
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700961 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800962 clear_pmem(dax.addr, PMD_SIZE);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700963 wmb_pmem();
964 count_vm_event(PGMAJFAULT);
965 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
966 result |= VM_FAULT_MAJOR;
967 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800968 dax_unmap_atomic(bdev, &dax);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700969
Ross Zwisler9973c982016-01-22 15:10:47 -0800970 /*
971 * For PTE faults we insert a radix tree entry for reads, and
972 * leave it clean. Then on the first write we dirty the radix
973 * tree entry via the dax_pfn_mkwrite() path. This sequence
974 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
975 * call into get_block() to translate the pgoff to a sector in
976 * order to be able to create a new radix tree entry.
977 *
978 * The PMD path doesn't have an equivalent to
979 * dax_pfn_mkwrite(), though, so for a read followed by a
980 * write we traverse all the way through __dax_pmd_fault()
981 * twice. This means we can just skip inserting a radix tree
982 * entry completely on the initial read and just wait until
983 * the write to insert a dirty entry.
984 */
985 if (write) {
986 error = dax_radix_entry(mapping, pgoff, dax.sector,
987 true, true);
988 if (error) {
989 dax_pmd_dbg(&bh, address,
990 "PMD radix insertion failed");
991 goto fallback;
992 }
993 }
994
Dan Williamscbb38e42016-01-15 16:56:58 -0800995 dev_dbg(part_to_dev(bdev->bd_part),
996 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
997 __func__, current->comm, address,
998 pfn_t_to_pfn(dax.pfn),
999 (unsigned long long) dax.sector);
Dan Williams34c0fd52016-01-15 16:56:14 -08001000 result |= vmf_insert_pfn_pmd(vma, address, pmd,
Dan Williamsf25748e32016-01-15 16:56:43 -08001001 dax.pfn, write);
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001002 }
1003
1004 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -07001005 i_mmap_unlock_read(mapping);
1006
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001007 if (buffer_unwritten(&bh))
1008 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
1009
1010 return result;
1011
1012 fallback:
1013 count_vm_event(THP_FAULT_FALLBACK);
1014 result = VM_FAULT_FALLBACK;
1015 goto out;
1016}
1017EXPORT_SYMBOL_GPL(__dax_pmd_fault);
1018
1019/**
1020 * dax_pmd_fault - handle a PMD fault on a DAX file
1021 * @vma: The virtual memory area where the fault occurred
1022 * @vmf: The description of the fault
1023 * @get_block: The filesystem method used to translate file offsets to blocks
1024 *
1025 * When a page fault occurs, filesystems may call this helper in their
1026 * pmd_fault handler for DAX files.
1027 */
1028int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1029 pmd_t *pmd, unsigned int flags, get_block_t get_block,
1030 dax_iodone_t complete_unwritten)
1031{
1032 int result;
1033 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1034
1035 if (flags & FAULT_FLAG_WRITE) {
1036 sb_start_pagefault(sb);
1037 file_update_time(vma->vm_file);
1038 }
1039 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
1040 complete_unwritten);
1041 if (flags & FAULT_FLAG_WRITE)
1042 sb_end_pagefault(sb);
1043
1044 return result;
1045}
1046EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -07001047#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001048
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001049/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001050 * dax_pfn_mkwrite - handle first write to DAX page
1051 * @vma: The virtual memory area where the fault occurred
1052 * @vmf: The description of the fault
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001053 */
1054int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1055{
Ross Zwisler9973c982016-01-22 15:10:47 -08001056 struct file *file = vma->vm_file;
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001057
Ross Zwisler9973c982016-01-22 15:10:47 -08001058 /*
1059 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1060 * RADIX_DAX_PTE entry already exists in the radix tree from a
1061 * previous call to __dax_fault(). We just want to look up that PTE
1062 * entry using vmf->pgoff and make sure the dirty tag is set. This
1063 * saves us from having to make a call to get_block() here to look
1064 * up the sector.
1065 */
1066 dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true);
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001067 return VM_FAULT_NOPAGE;
1068}
1069EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1070
1071/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001072 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001073 * @inode: The file being truncated
1074 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001075 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001076 * @get_block: The filesystem method used to translate file offsets to blocks
1077 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001078 * This function can be called by a filesystem when it is zeroing part of a
1079 * page in a DAX file. This is intended for hole-punch operations. If
1080 * you are truncating a file, the helper function dax_truncate_page() may be
1081 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001082 *
1083 * We work in terms of PAGE_CACHE_SIZE here for commonality with
1084 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1085 * took care of disposing of the unnecessary blocks. Even if the filesystem
1086 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001087 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001088 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001089int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1090 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001091{
1092 struct buffer_head bh;
1093 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1094 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001095 int err;
1096
1097 /* Block boundary? Nothing to do */
1098 if (!length)
1099 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001100 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001101
1102 memset(&bh, 0, sizeof(bh));
Ross Zwislereab95db2016-01-22 15:10:59 -08001103 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001104 bh.b_size = PAGE_CACHE_SIZE;
1105 err = get_block(inode, index, &bh, 0);
1106 if (err < 0)
1107 return err;
1108 if (buffer_written(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -08001109 struct block_device *bdev = bh.b_bdev;
1110 struct blk_dax_ctl dax = {
1111 .sector = to_sector(&bh, inode),
1112 .size = PAGE_CACHE_SIZE,
1113 };
1114
1115 if (dax_map_atomic(bdev, &dax) < 0)
1116 return PTR_ERR(dax.addr);
1117 clear_pmem(dax.addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -06001118 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -08001119 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001120 }
1121
1122 return 0;
1123}
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001124EXPORT_SYMBOL_GPL(dax_zero_page_range);
1125
1126/**
1127 * dax_truncate_page - handle a partial page being truncated in a DAX file
1128 * @inode: The file being truncated
1129 * @from: The file offset that is being truncated to
1130 * @get_block: The filesystem method used to translate file offsets to blocks
1131 *
1132 * Similar to block_truncate_page(), this function can be called by a
1133 * filesystem when it is truncating a DAX file to handle the partial page.
1134 *
1135 * We work in terms of PAGE_CACHE_SIZE here for commonality with
1136 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1137 * took care of disposing of the unnecessary blocks. Even if the filesystem
1138 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1139 * since the file might be mmapped.
1140 */
1141int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1142{
1143 unsigned length = PAGE_CACHE_ALIGN(from) - from;
1144 return dax_zero_page_range(inode, from, length, get_block);
1145}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001146EXPORT_SYMBOL_GPL(dax_truncate_page);