blob: d602410d8e52c6fea96735c88e9eb1463f273279 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060028#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080029#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080034
NeilBrowne4b27492016-05-11 11:58:47 +020035#define RADIX_DAX_MASK 0xf
36#define RADIX_DAX_SHIFT 4
37#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
38#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
39#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
40#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
41#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
42 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
43
Dan Williamsb2e0d162016-01-15 16:55:59 -080044static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
45{
46 struct request_queue *q = bdev->bd_queue;
47 long rc = -EIO;
48
49 dax->addr = (void __pmem *) ERR_PTR(-EIO);
50 if (blk_queue_enter(q, true) != 0)
51 return rc;
52
53 rc = bdev_direct_access(bdev, dax);
54 if (rc < 0) {
55 dax->addr = (void __pmem *) ERR_PTR(rc);
56 blk_queue_exit(q);
57 return rc;
58 }
59 return rc;
60}
61
62static void dax_unmap_atomic(struct block_device *bdev,
63 const struct blk_dax_ctl *dax)
64{
65 if (IS_ERR(dax->addr))
66 return;
67 blk_queue_exit(bdev->bd_queue);
68}
69
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080070struct page *read_dax_sector(struct block_device *bdev, sector_t n)
71{
72 struct page *page = alloc_pages(GFP_KERNEL, 0);
73 struct blk_dax_ctl dax = {
74 .size = PAGE_SIZE,
75 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
76 };
77 long rc;
78
79 if (!page)
80 return ERR_PTR(-ENOMEM);
81
82 rc = dax_map_atomic(bdev, &dax);
83 if (rc < 0)
84 return ERR_PTR(rc);
85 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
86 dax_unmap_atomic(bdev, &dax);
87 return page;
88}
89
Dave Chinner1ca19152015-11-03 12:37:00 +110090/*
Ross Zwisler20a90f52016-02-26 15:19:52 -080091 * dax_clear_sectors() is called from within transaction context from XFS,
Dave Chinner1ca19152015-11-03 12:37:00 +110092 * and hence this means the stack from this point must follow GFP_NOFS
93 * semantics for all operations.
94 */
Ross Zwisler20a90f52016-02-26 15:19:52 -080095int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080096{
Dan Williamsb2e0d162016-01-15 16:55:59 -080097 struct blk_dax_ctl dax = {
Ross Zwisler20a90f52016-02-26 15:19:52 -080098 .sector = _sector,
Dan Williamsb2e0d162016-01-15 16:55:59 -080099 .size = _size,
100 };
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800101
102 might_sleep();
103 do {
Dan Williams0e749e52016-01-15 16:55:53 -0800104 long count, sz;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800105
Dan Williamsb2e0d162016-01-15 16:55:59 -0800106 count = dax_map_atomic(bdev, &dax);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800107 if (count < 0)
108 return count;
Dan Williams0e749e52016-01-15 16:55:53 -0800109 sz = min_t(long, count, SZ_128K);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800110 clear_pmem(dax.addr, sz);
111 dax.size -= sz;
112 dax.sector += sz / 512;
113 dax_unmap_atomic(bdev, &dax);
Dan Williams0e749e52016-01-15 16:55:53 -0800114 cond_resched();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800115 } while (dax.size);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800116
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600117 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800118 return 0;
119}
Ross Zwisler20a90f52016-02-26 15:19:52 -0800120EXPORT_SYMBOL_GPL(dax_clear_sectors);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -0800121
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800122static bool buffer_written(struct buffer_head *bh)
123{
124 return buffer_mapped(bh) && !buffer_unwritten(bh);
125}
126
127/*
128 * When ext4 encounters a hole, it returns without modifying the buffer_head
129 * which means that we can't trust b_size. To cope with this, we set b_state
130 * to 0 before calling get_block and, if any bit is set, we know we can trust
131 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
132 * and would save us time calling get_block repeatedly.
133 */
134static bool buffer_size_valid(struct buffer_head *bh)
135{
136 return bh->b_state != 0;
137}
138
Dan Williamsb2e0d162016-01-15 16:55:59 -0800139
140static sector_t to_sector(const struct buffer_head *bh,
141 const struct inode *inode)
142{
143 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
144
145 return sector;
146}
147
Omar Sandovala95cd632015-03-16 04:33:51 -0700148static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
149 loff_t start, loff_t end, get_block_t get_block,
150 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800151{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800152 loff_t pos = start, max = start, bh_max = start;
153 bool hole = false, need_wmb = false;
154 struct block_device *bdev = NULL;
155 int rw = iov_iter_rw(iter), rc;
156 long map_len = 0;
157 struct blk_dax_ctl dax = {
158 .addr = (void __pmem *) ERR_PTR(-EIO),
159 };
Jan Kara069c77b2016-05-11 11:58:51 +0200160 unsigned blkbits = inode->i_blkbits;
161 sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
162 >> blkbits;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800163
Dan Williamsb2e0d162016-01-15 16:55:59 -0800164 if (rw == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800165 end = min(end, i_size_read(inode));
166
167 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600168 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800169 if (pos == max) {
Jeff Moyere94f5a22015-08-14 16:15:31 -0400170 long page = pos >> PAGE_SHIFT;
171 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800172 unsigned first = pos - (block << blkbits);
173 long size;
174
175 if (pos == bh_max) {
176 bh->b_size = PAGE_ALIGN(end - pos);
177 bh->b_state = 0;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800178 rc = get_block(inode, block, bh, rw == WRITE);
179 if (rc)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800180 break;
181 if (!buffer_size_valid(bh))
182 bh->b_size = 1 << blkbits;
183 bh_max = pos - first + bh->b_size;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800184 bdev = bh->b_bdev;
Jan Kara069c77b2016-05-11 11:58:51 +0200185 /*
186 * We allow uninitialized buffers for writes
187 * beyond EOF as those cannot race with faults
188 */
189 WARN_ON_ONCE(
190 (buffer_new(bh) && block < file_blks) ||
191 (rw == WRITE && buffer_unwritten(bh)));
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800192 } else {
193 unsigned done = bh->b_size -
194 (bh_max - (pos - first));
195 bh->b_blocknr += done >> blkbits;
196 bh->b_size -= done;
197 }
198
Dan Williamsb2e0d162016-01-15 16:55:59 -0800199 hole = rw == READ && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800200 if (hole) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800201 size = bh->b_size - first;
202 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800203 dax_unmap_atomic(bdev, &dax);
204 dax.sector = to_sector(bh, inode);
205 dax.size = bh->b_size;
206 map_len = dax_map_atomic(bdev, &dax);
207 if (map_len < 0) {
208 rc = map_len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800209 break;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800210 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800211 dax.addr += first;
212 size = map_len - first;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800213 }
214 max = min(pos + size, end);
215 }
216
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600217 if (iov_iter_rw(iter) == WRITE) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800218 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600219 need_wmb = true;
220 } else if (!hole)
Dan Williamsb2e0d162016-01-15 16:55:59 -0800221 len = copy_to_iter((void __force *) dax.addr, max - pos,
Ross Zwislere2e05392015-08-18 13:55:41 -0600222 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800223 else
224 len = iov_iter_zero(max - pos, iter);
225
Al Virocadfbb62015-11-10 19:42:49 -0700226 if (!len) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800227 rc = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800228 break;
Al Virocadfbb62015-11-10 19:42:49 -0700229 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800230
231 pos += len;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800232 if (!IS_ERR(dax.addr))
233 dax.addr += len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800234 }
235
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600236 if (need_wmb)
237 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800238 dax_unmap_atomic(bdev, &dax);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600239
Dan Williamsb2e0d162016-01-15 16:55:59 -0800240 return (pos == start) ? rc : pos - start;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800241}
242
243/**
244 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800245 * @iocb: The control block for this I/O
246 * @inode: The file which the I/O is directed at
247 * @iter: The addresses to do I/O from or to
248 * @pos: The file offset where the I/O starts
249 * @get_block: The filesystem method used to translate file offsets to blocks
250 * @end_io: A filesystem callback for I/O completion
251 * @flags: See below
252 *
253 * This function uses the same locking scheme as do_blockdev_direct_IO:
254 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
255 * caller for writes. For reads, we take and release the i_mutex ourselves.
256 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
257 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
258 * is in progress.
259 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700260ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
261 struct iov_iter *iter, loff_t pos, get_block_t get_block,
262 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800263{
264 struct buffer_head bh;
265 ssize_t retval = -EINVAL;
266 loff_t end = pos + iov_iter_count(iter);
267
268 memset(&bh, 0, sizeof(bh));
Ross Zwislereab95db2016-01-22 15:10:59 -0800269 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800270
Jan Karac3d98e32016-05-11 11:58:52 +0200271 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Al Viro59551022016-01-22 15:40:57 -0500272 inode_lock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800273
274 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400275 if (!(flags & DIO_SKIP_DIO_COUNT))
276 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800277
Omar Sandovala95cd632015-03-16 04:33:51 -0700278 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800279
Omar Sandovala95cd632015-03-16 04:33:51 -0700280 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Al Viro59551022016-01-22 15:40:57 -0500281 inode_unlock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800282
Christoph Hellwig187372a2016-02-08 14:40:51 +1100283 if (end_io) {
284 int err;
285
286 err = end_io(iocb, pos, retval, bh.b_private);
287 if (err)
288 retval = err;
289 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800290
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400291 if (!(flags & DIO_SKIP_DIO_COUNT))
292 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800293 return retval;
294}
295EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800296
297/*
298 * The user has performed a load from a hole in the file. Allocating
299 * a new page in the file would cause excessive storage usage for
300 * workloads with sparse files. We allocate a page cache page instead.
301 * We'll kick it out of the page cache if it's ever written to,
302 * otherwise it will simply fall out of the page cache under memory
303 * pressure without ever having been dirtied.
304 */
305static int dax_load_hole(struct address_space *mapping, struct page *page,
306 struct vm_fault *vmf)
307{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800308 if (!page)
309 page = find_or_create_page(mapping, vmf->pgoff,
310 GFP_KERNEL | __GFP_ZERO);
311 if (!page)
312 return VM_FAULT_OOM;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800313
314 vmf->page = page;
315 return VM_FAULT_LOCKED;
316}
317
Dan Williamsb2e0d162016-01-15 16:55:59 -0800318static int copy_user_bh(struct page *to, struct inode *inode,
319 struct buffer_head *bh, unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800320{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800321 struct blk_dax_ctl dax = {
322 .sector = to_sector(bh, inode),
323 .size = bh->b_size,
324 };
325 struct block_device *bdev = bh->b_bdev;
Ross Zwislere2e05392015-08-18 13:55:41 -0600326 void *vto;
327
Dan Williamsb2e0d162016-01-15 16:55:59 -0800328 if (dax_map_atomic(bdev, &dax) < 0)
329 return PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800330 vto = kmap_atomic(to);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800331 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800332 kunmap_atomic(vto);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800333 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800334 return 0;
335}
336
Ross Zwisler9973c982016-01-22 15:10:47 -0800337#define NO_SECTOR -1
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300338#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
Ross Zwisler9973c982016-01-22 15:10:47 -0800339
340static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
341 sector_t sector, bool pmd_entry, bool dirty)
342{
343 struct radix_tree_root *page_tree = &mapping->page_tree;
344 pgoff_t pmd_index = DAX_PMD_INDEX(index);
345 int type, error = 0;
346 void *entry;
347
348 WARN_ON_ONCE(pmd_entry && !dirty);
Dmitry Monakhovd2b2a282016-02-05 15:36:55 -0800349 if (dirty)
350 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Ross Zwisler9973c982016-01-22 15:10:47 -0800351
352 spin_lock_irq(&mapping->tree_lock);
353
354 entry = radix_tree_lookup(page_tree, pmd_index);
355 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
356 index = pmd_index;
357 goto dirty;
358 }
359
360 entry = radix_tree_lookup(page_tree, index);
361 if (entry) {
362 type = RADIX_DAX_TYPE(entry);
363 if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
364 type != RADIX_DAX_PMD)) {
365 error = -EIO;
366 goto unlock;
367 }
368
369 if (!pmd_entry || type == RADIX_DAX_PMD)
370 goto dirty;
371
372 /*
373 * We only insert dirty PMD entries into the radix tree. This
374 * means we don't need to worry about removing a dirty PTE
375 * entry and inserting a clean PMD entry, thus reducing the
376 * range we would flush with a follow-up fsync/msync call.
377 */
378 radix_tree_delete(&mapping->page_tree, index);
379 mapping->nrexceptional--;
380 }
381
382 if (sector == NO_SECTOR) {
383 /*
384 * This can happen during correct operation if our pfn_mkwrite
385 * fault raced against a hole punch operation. If this
386 * happens the pte that was hole punched will have been
387 * unmapped and the radix tree entry will have been removed by
388 * the time we are called, but the call will still happen. We
389 * will return all the way up to wp_pfn_shared(), where the
390 * pte_same() check will fail, eventually causing page fault
391 * to be retried by the CPU.
392 */
393 goto unlock;
394 }
395
396 error = radix_tree_insert(page_tree, index,
397 RADIX_DAX_ENTRY(sector, pmd_entry));
398 if (error)
399 goto unlock;
400
401 mapping->nrexceptional++;
402 dirty:
403 if (dirty)
404 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
405 unlock:
406 spin_unlock_irq(&mapping->tree_lock);
407 return error;
408}
409
410static int dax_writeback_one(struct block_device *bdev,
411 struct address_space *mapping, pgoff_t index, void *entry)
412{
413 struct radix_tree_root *page_tree = &mapping->page_tree;
414 int type = RADIX_DAX_TYPE(entry);
415 struct radix_tree_node *node;
416 struct blk_dax_ctl dax;
417 void **slot;
418 int ret = 0;
419
420 spin_lock_irq(&mapping->tree_lock);
421 /*
422 * Regular page slots are stabilized by the page lock even
423 * without the tree itself locked. These unlocked entries
424 * need verification under the tree lock.
425 */
426 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
427 goto unlock;
428 if (*slot != entry)
429 goto unlock;
430
431 /* another fsync thread may have already written back this entry */
432 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
433 goto unlock;
434
435 if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
436 ret = -EIO;
437 goto unlock;
438 }
439
440 dax.sector = RADIX_DAX_SECTOR(entry);
441 dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
442 spin_unlock_irq(&mapping->tree_lock);
443
444 /*
445 * We cannot hold tree_lock while calling dax_map_atomic() because it
446 * eventually calls cond_resched().
447 */
448 ret = dax_map_atomic(bdev, &dax);
449 if (ret < 0)
450 return ret;
451
452 if (WARN_ON_ONCE(ret < dax.size)) {
453 ret = -EIO;
454 goto unmap;
455 }
456
457 wb_cache_pmem(dax.addr, dax.size);
458
459 spin_lock_irq(&mapping->tree_lock);
460 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
461 spin_unlock_irq(&mapping->tree_lock);
462 unmap:
463 dax_unmap_atomic(bdev, &dax);
464 return ret;
465
466 unlock:
467 spin_unlock_irq(&mapping->tree_lock);
468 return ret;
469}
470
471/*
472 * Flush the mapping to the persistent domain within the byte range of [start,
473 * end]. This is required by data integrity operations to ensure file data is
474 * on persistent storage prior to completion of the operation.
475 */
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800476int dax_writeback_mapping_range(struct address_space *mapping,
477 struct block_device *bdev, struct writeback_control *wbc)
Ross Zwisler9973c982016-01-22 15:10:47 -0800478{
479 struct inode *inode = mapping->host;
Ross Zwisler9973c982016-01-22 15:10:47 -0800480 pgoff_t start_index, end_index, pmd_index;
481 pgoff_t indices[PAGEVEC_SIZE];
482 struct pagevec pvec;
483 bool done = false;
484 int i, ret = 0;
485 void *entry;
486
487 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
488 return -EIO;
489
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800490 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
491 return 0;
492
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300493 start_index = wbc->range_start >> PAGE_SHIFT;
494 end_index = wbc->range_end >> PAGE_SHIFT;
Ross Zwisler9973c982016-01-22 15:10:47 -0800495 pmd_index = DAX_PMD_INDEX(start_index);
496
497 rcu_read_lock();
498 entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
499 rcu_read_unlock();
500
501 /* see if the start of our range is covered by a PMD entry */
502 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
503 start_index = pmd_index;
504
505 tag_pages_for_writeback(mapping, start_index, end_index);
506
507 pagevec_init(&pvec, 0);
508 while (!done) {
509 pvec.nr = find_get_entries_tag(mapping, start_index,
510 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
511 pvec.pages, indices);
512
513 if (pvec.nr == 0)
514 break;
515
516 for (i = 0; i < pvec.nr; i++) {
517 if (indices[i] > end_index) {
518 done = true;
519 break;
520 }
521
522 ret = dax_writeback_one(bdev, mapping, indices[i],
523 pvec.pages[i]);
524 if (ret < 0)
525 return ret;
526 }
527 }
528 wmb_pmem();
529 return 0;
530}
531EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
532
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800533static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
534 struct vm_area_struct *vma, struct vm_fault *vmf)
535{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800536 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800537 struct address_space *mapping = inode->i_mapping;
538 struct block_device *bdev = bh->b_bdev;
539 struct blk_dax_ctl dax = {
540 .sector = to_sector(bh, inode),
541 .size = bh->b_size,
542 };
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800543 int error;
544
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700545 i_mmap_lock_read(mapping);
546
Dan Williamsb2e0d162016-01-15 16:55:59 -0800547 if (dax_map_atomic(bdev, &dax) < 0) {
548 error = PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800549 goto out;
550 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800551 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800552
Ross Zwisler9973c982016-01-22 15:10:47 -0800553 error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
554 vmf->flags & FAULT_FLAG_WRITE);
555 if (error)
556 goto out;
557
Dan Williams01c8f1c2016-01-15 16:56:40 -0800558 error = vm_insert_mixed(vma, vaddr, dax.pfn);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800559
560 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700561 i_mmap_unlock_read(mapping);
562
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800563 return error;
564}
565
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000566/**
567 * __dax_fault - handle a page fault on a DAX file
568 * @vma: The virtual memory area where the fault occurred
569 * @vmf: The description of the fault
570 * @get_block: The filesystem method used to translate file offsets to blocks
571 *
572 * When a page fault occurs, filesystems may call this helper in their
573 * fault handler for DAX files. __dax_fault() assumes the caller has done all
574 * the necessary locking for the page fault to proceed successfully.
575 */
576int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Jan Kara02fbd132016-05-11 11:58:48 +0200577 get_block_t get_block)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800578{
579 struct file *file = vma->vm_file;
580 struct address_space *mapping = file->f_mapping;
581 struct inode *inode = mapping->host;
582 struct page *page;
583 struct buffer_head bh;
584 unsigned long vaddr = (unsigned long)vmf->virtual_address;
585 unsigned blkbits = inode->i_blkbits;
586 sector_t block;
587 pgoff_t size;
588 int error;
589 int major = 0;
590
591 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
592 if (vmf->pgoff >= size)
593 return VM_FAULT_SIGBUS;
594
595 memset(&bh, 0, sizeof(bh));
596 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
Ross Zwislereab95db2016-01-22 15:10:59 -0800597 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800598 bh.b_size = PAGE_SIZE;
599
600 repeat:
601 page = find_get_page(mapping, vmf->pgoff);
602 if (page) {
603 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300604 put_page(page);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800605 return VM_FAULT_RETRY;
606 }
607 if (unlikely(page->mapping != mapping)) {
608 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300609 put_page(page);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800610 goto repeat;
611 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800612 }
613
614 error = get_block(inode, block, &bh, 0);
615 if (!error && (bh.b_size < PAGE_SIZE))
616 error = -EIO; /* fs corruption? */
617 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700618 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800619
Jan Karaaef39ab2016-05-13 00:38:15 -0400620 if (!buffer_mapped(&bh) && !vmf->cow_page) {
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800621 if (vmf->flags & FAULT_FLAG_WRITE) {
622 error = get_block(inode, block, &bh, 1);
623 count_vm_event(PGMAJFAULT);
624 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
625 major = VM_FAULT_MAJOR;
626 if (!error && (bh.b_size < PAGE_SIZE))
627 error = -EIO;
628 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700629 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800630 } else {
631 return dax_load_hole(mapping, page, vmf);
632 }
633 }
634
635 if (vmf->cow_page) {
636 struct page *new_page = vmf->cow_page;
637 if (buffer_written(&bh))
Dan Williamsb2e0d162016-01-15 16:55:59 -0800638 error = copy_user_bh(new_page, inode, &bh, vaddr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800639 else
640 clear_user_highpage(new_page, vaddr);
641 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700642 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800643 vmf->page = page;
Jan Kara7795bec2016-05-11 11:58:53 +0200644 if (!page)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700645 i_mmap_lock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800646 return VM_FAULT_LOCKED;
647 }
648
649 /* Check we didn't race with a read fault installing a new page */
650 if (!page && major)
651 page = find_lock_page(mapping, vmf->pgoff);
652
653 if (page) {
654 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300655 PAGE_SIZE, 0);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800656 delete_from_page_cache(page);
657 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300658 put_page(page);
Ross Zwisler9973c982016-01-22 15:10:47 -0800659 page = NULL;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800660 }
661
Jan Kara02fbd132016-05-11 11:58:48 +0200662 /* Filesystem should not return unwritten buffers to us! */
Jan Kara2b109452016-05-11 11:58:50 +0200663 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800664 error = dax_insert_mapping(inode, &bh, vma, vmf);
665
666 out:
667 if (error == -ENOMEM)
668 return VM_FAULT_OOM | major;
669 /* -EBUSY is fine, somebody else faulted on the same PTE */
670 if ((error < 0) && (error != -EBUSY))
671 return VM_FAULT_SIGBUS | major;
672 return VM_FAULT_NOPAGE | major;
673
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700674 unlock_page:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800675 if (page) {
676 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300677 put_page(page);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800678 }
679 goto out;
680}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000681EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800682
683/**
684 * dax_fault - handle a page fault on a DAX file
685 * @vma: The virtual memory area where the fault occurred
686 * @vmf: The description of the fault
687 * @get_block: The filesystem method used to translate file offsets to blocks
688 *
689 * When a page fault occurs, filesystems may call this helper in their
690 * fault handler for DAX files.
691 */
692int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Jan Kara02fbd132016-05-11 11:58:48 +0200693 get_block_t get_block)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800694{
695 int result;
696 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
697
698 if (vmf->flags & FAULT_FLAG_WRITE) {
699 sb_start_pagefault(sb);
700 file_update_time(vma->vm_file);
701 }
Jan Kara02fbd132016-05-11 11:58:48 +0200702 result = __dax_fault(vma, vmf, get_block);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800703 if (vmf->flags & FAULT_FLAG_WRITE)
704 sb_end_pagefault(sb);
705
706 return result;
707}
708EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800709
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700710#ifdef CONFIG_TRANSPARENT_HUGEPAGE
711/*
712 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
713 * more often than one might expect in the below function.
714 */
715#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
716
Dan Williamscbb38e42016-01-15 16:56:58 -0800717static void __dax_dbg(struct buffer_head *bh, unsigned long address,
718 const char *reason, const char *fn)
719{
720 if (bh) {
721 char bname[BDEVNAME_SIZE];
722 bdevname(bh->b_bdev, bname);
723 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
724 "length %zd fallback: %s\n", fn, current->comm,
725 address, bname, bh->b_state, (u64)bh->b_blocknr,
726 bh->b_size, reason);
727 } else {
728 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
729 current->comm, address, reason);
730 }
731}
732
733#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
734
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700735int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
Jan Kara02fbd132016-05-11 11:58:48 +0200736 pmd_t *pmd, unsigned int flags, get_block_t get_block)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700737{
738 struct file *file = vma->vm_file;
739 struct address_space *mapping = file->f_mapping;
740 struct inode *inode = mapping->host;
741 struct buffer_head bh;
742 unsigned blkbits = inode->i_blkbits;
743 unsigned long pmd_addr = address & PMD_MASK;
744 bool write = flags & FAULT_FLAG_WRITE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800745 struct block_device *bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700746 pgoff_t size, pgoff;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800747 sector_t block;
Ross Zwisler9973c982016-01-22 15:10:47 -0800748 int error, result = 0;
749 bool alloc = false;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700750
Dan Williamsc046c322016-01-15 16:57:01 -0800751 /* dax pmd mappings require pfn_t_devmap() */
Dan Williamsee82c9e2015-11-15 16:06:32 -0800752 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
753 return VM_FAULT_FALLBACK;
754
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700755 /* Fall back to PTEs if we're going to COW */
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800756 if (write && !(vma->vm_flags & VM_SHARED)) {
757 split_huge_pmd(vma, pmd, address);
Dan Williamscbb38e42016-01-15 16:56:58 -0800758 dax_pmd_dbg(NULL, address, "cow write");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700759 return VM_FAULT_FALLBACK;
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800760 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700761 /* If the PMD would extend outside the VMA */
Dan Williamscbb38e42016-01-15 16:56:58 -0800762 if (pmd_addr < vma->vm_start) {
763 dax_pmd_dbg(NULL, address, "vma start unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700764 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800765 }
766 if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
767 dax_pmd_dbg(NULL, address, "vma end unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700768 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800769 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700770
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700771 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700772 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
773 if (pgoff >= size)
774 return VM_FAULT_SIGBUS;
775 /* If the PMD would cover blocks out of the file */
Dan Williamscbb38e42016-01-15 16:56:58 -0800776 if ((pgoff | PG_PMD_COLOUR) >= size) {
777 dax_pmd_dbg(NULL, address,
778 "offset + huge page size > file size");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700779 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800780 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700781
782 memset(&bh, 0, sizeof(bh));
Ross Zwislerd4bbe702016-01-22 15:10:31 -0800783 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700784 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
785
786 bh.b_size = PMD_SIZE;
Ross Zwisler9973c982016-01-22 15:10:47 -0800787
788 if (get_block(inode, block, &bh, 0) != 0)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700789 return VM_FAULT_SIGBUS;
Ross Zwisler9973c982016-01-22 15:10:47 -0800790
791 if (!buffer_mapped(&bh) && write) {
792 if (get_block(inode, block, &bh, 1) != 0)
793 return VM_FAULT_SIGBUS;
794 alloc = true;
Jan Kara2b109452016-05-11 11:58:50 +0200795 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
Ross Zwisler9973c982016-01-22 15:10:47 -0800796 }
797
Dan Williamsb2e0d162016-01-15 16:55:59 -0800798 bdev = bh.b_bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700799
800 /*
801 * If the filesystem isn't willing to tell us the length of a hole,
802 * just fall back to PTEs. Calling get_block 512 times in a loop
803 * would be silly.
804 */
Dan Williamscbb38e42016-01-15 16:56:58 -0800805 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
806 dax_pmd_dbg(&bh, address, "allocated block too small");
Ross Zwisler9973c982016-01-22 15:10:47 -0800807 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800808 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700809
Ross Zwisler9973c982016-01-22 15:10:47 -0800810 /*
811 * If we allocated new storage, make sure no process has any
812 * zero pages covering this hole
813 */
814 if (alloc) {
815 loff_t lstart = pgoff << PAGE_SHIFT;
816 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
817
818 truncate_pagecache_range(inode, lstart, lend);
819 }
820
Ross Zwislerde14b9c2016-01-22 15:10:34 -0800821 i_mmap_lock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700822
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700823 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700824 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700825 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700826 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700827
Dan Williamscbb38e42016-01-15 16:56:58 -0800828 if (unlikely(!zero_page)) {
829 dax_pmd_dbg(&bh, address, "no zero page");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700830 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -0800831 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700832
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700833 ptl = pmd_lock(vma->vm_mm, pmd);
834 if (!pmd_none(*pmd)) {
835 spin_unlock(ptl);
Dan Williamscbb38e42016-01-15 16:56:58 -0800836 dax_pmd_dbg(&bh, address, "pmd already present");
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700837 goto fallback;
838 }
839
Dan Williamscbb38e42016-01-15 16:56:58 -0800840 dev_dbg(part_to_dev(bdev->bd_part),
841 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
842 __func__, current->comm, address,
843 (unsigned long long) to_sector(&bh, inode));
844
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700845 entry = mk_pmd(zero_page, vma->vm_page_prot);
846 entry = pmd_mkhuge(entry);
847 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700848 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700849 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700850 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800851 struct blk_dax_ctl dax = {
852 .sector = to_sector(&bh, inode),
853 .size = PMD_SIZE,
854 };
855 long length = dax_map_atomic(bdev, &dax);
856
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700857 if (length < 0) {
Dan Williams8b3db9792016-02-24 14:02:06 -0800858 dax_pmd_dbg(&bh, address, "dax-error fallback");
859 goto fallback;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700860 }
Dan Williamscbb38e42016-01-15 16:56:58 -0800861 if (length < PMD_SIZE) {
862 dax_pmd_dbg(&bh, address, "dax-length too small");
863 dax_unmap_atomic(bdev, &dax);
864 goto fallback;
865 }
866 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
867 dax_pmd_dbg(&bh, address, "pfn unaligned");
Dan Williamsb2e0d162016-01-15 16:55:59 -0800868 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700869 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800870 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700871
Dan Williamsc046c322016-01-15 16:57:01 -0800872 if (!pfn_t_devmap(dax.pfn)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800873 dax_unmap_atomic(bdev, &dax);
Dan Williamscbb38e42016-01-15 16:56:58 -0800874 dax_pmd_dbg(&bh, address, "pfn not in memmap");
Dan Williams152d7bd2015-11-12 18:33:54 -0800875 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800876 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800877 dax_unmap_atomic(bdev, &dax);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700878
Ross Zwisler9973c982016-01-22 15:10:47 -0800879 /*
880 * For PTE faults we insert a radix tree entry for reads, and
881 * leave it clean. Then on the first write we dirty the radix
882 * tree entry via the dax_pfn_mkwrite() path. This sequence
883 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
884 * call into get_block() to translate the pgoff to a sector in
885 * order to be able to create a new radix tree entry.
886 *
887 * The PMD path doesn't have an equivalent to
888 * dax_pfn_mkwrite(), though, so for a read followed by a
889 * write we traverse all the way through __dax_pmd_fault()
890 * twice. This means we can just skip inserting a radix tree
891 * entry completely on the initial read and just wait until
892 * the write to insert a dirty entry.
893 */
894 if (write) {
895 error = dax_radix_entry(mapping, pgoff, dax.sector,
896 true, true);
897 if (error) {
898 dax_pmd_dbg(&bh, address,
899 "PMD radix insertion failed");
900 goto fallback;
901 }
902 }
903
Dan Williamscbb38e42016-01-15 16:56:58 -0800904 dev_dbg(part_to_dev(bdev->bd_part),
905 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
906 __func__, current->comm, address,
907 pfn_t_to_pfn(dax.pfn),
908 (unsigned long long) dax.sector);
Dan Williams34c0fd52016-01-15 16:56:14 -0800909 result |= vmf_insert_pfn_pmd(vma, address, pmd,
Dan Williamsf25748e32016-01-15 16:56:43 -0800910 dax.pfn, write);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700911 }
912
913 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700914 i_mmap_unlock_read(mapping);
915
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700916 return result;
917
918 fallback:
919 count_vm_event(THP_FAULT_FALLBACK);
920 result = VM_FAULT_FALLBACK;
921 goto out;
922}
923EXPORT_SYMBOL_GPL(__dax_pmd_fault);
924
925/**
926 * dax_pmd_fault - handle a PMD fault on a DAX file
927 * @vma: The virtual memory area where the fault occurred
928 * @vmf: The description of the fault
929 * @get_block: The filesystem method used to translate file offsets to blocks
930 *
931 * When a page fault occurs, filesystems may call this helper in their
932 * pmd_fault handler for DAX files.
933 */
934int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
Jan Kara02fbd132016-05-11 11:58:48 +0200935 pmd_t *pmd, unsigned int flags, get_block_t get_block)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700936{
937 int result;
938 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
939
940 if (flags & FAULT_FLAG_WRITE) {
941 sb_start_pagefault(sb);
942 file_update_time(vma->vm_file);
943 }
Jan Kara02fbd132016-05-11 11:58:48 +0200944 result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700945 if (flags & FAULT_FLAG_WRITE)
946 sb_end_pagefault(sb);
947
948 return result;
949}
950EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -0700951#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700952
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800953/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700954 * dax_pfn_mkwrite - handle first write to DAX page
955 * @vma: The virtual memory area where the fault occurred
956 * @vmf: The description of the fault
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700957 */
958int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
959{
Ross Zwisler9973c982016-01-22 15:10:47 -0800960 struct file *file = vma->vm_file;
Ross Zwisler30f471f2016-03-09 14:08:27 -0800961 int error;
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700962
Ross Zwisler9973c982016-01-22 15:10:47 -0800963 /*
964 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
965 * RADIX_DAX_PTE entry already exists in the radix tree from a
966 * previous call to __dax_fault(). We just want to look up that PTE
967 * entry using vmf->pgoff and make sure the dirty tag is set. This
968 * saves us from having to make a call to get_block() here to look
969 * up the sector.
970 */
Ross Zwisler30f471f2016-03-09 14:08:27 -0800971 error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
972 true);
973
974 if (error == -ENOMEM)
975 return VM_FAULT_OOM;
976 if (error)
977 return VM_FAULT_SIGBUS;
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700978 return VM_FAULT_NOPAGE;
979}
980EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
981
982/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800983 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800984 * @inode: The file being truncated
985 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800986 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800987 * @get_block: The filesystem method used to translate file offsets to blocks
988 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800989 * This function can be called by a filesystem when it is zeroing part of a
990 * page in a DAX file. This is intended for hole-punch operations. If
991 * you are truncating a file, the helper function dax_truncate_page() may be
992 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800993 *
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300994 * We work in terms of PAGE_SIZE here for commonality with
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800995 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
996 * took care of disposing of the unnecessary blocks. Even if the filesystem
997 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800998 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800999 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001000int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1001 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001002{
1003 struct buffer_head bh;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001004 pgoff_t index = from >> PAGE_SHIFT;
1005 unsigned offset = from & (PAGE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001006 int err;
1007
1008 /* Block boundary? Nothing to do */
1009 if (!length)
1010 return 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001011 BUG_ON((offset + length) > PAGE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001012
1013 memset(&bh, 0, sizeof(bh));
Ross Zwislereab95db2016-01-22 15:10:59 -08001014 bh.b_bdev = inode->i_sb->s_bdev;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001015 bh.b_size = PAGE_SIZE;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001016 err = get_block(inode, index, &bh, 0);
1017 if (err < 0)
1018 return err;
1019 if (buffer_written(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -08001020 struct block_device *bdev = bh.b_bdev;
1021 struct blk_dax_ctl dax = {
1022 .sector = to_sector(&bh, inode),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001023 .size = PAGE_SIZE,
Dan Williamsb2e0d162016-01-15 16:55:59 -08001024 };
1025
1026 if (dax_map_atomic(bdev, &dax) < 0)
1027 return PTR_ERR(dax.addr);
1028 clear_pmem(dax.addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -06001029 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -08001030 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001031 }
1032
1033 return 0;
1034}
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001035EXPORT_SYMBOL_GPL(dax_zero_page_range);
1036
1037/**
1038 * dax_truncate_page - handle a partial page being truncated in a DAX file
1039 * @inode: The file being truncated
1040 * @from: The file offset that is being truncated to
1041 * @get_block: The filesystem method used to translate file offsets to blocks
1042 *
1043 * Similar to block_truncate_page(), this function can be called by a
1044 * filesystem when it is truncating a DAX file to handle the partial page.
1045 *
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03001046 * We work in terms of PAGE_SIZE here for commonality with
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001047 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1048 * took care of disposing of the unnecessary blocks. Even if the filesystem
1049 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1050 * since the file might be mmapped.
1051 */
1052int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1053{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001054 unsigned length = PAGE_ALIGN(from) - from;
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001055 return dax_zero_page_range(inode, from, length, get_block);
1056}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001057EXPORT_SYMBOL_GPL(dax_truncate_page);