blob: 5c548d821a2a67835891e91a0606154fca2648e3 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060027#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080029#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080030#include <linux/vmstat.h>
Dan Williams0e749e52016-01-15 16:55:53 -080031#include <linux/sizes.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080032
Dan Williamsb2e0d162016-01-15 16:55:59 -080033static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
34{
35 struct request_queue *q = bdev->bd_queue;
36 long rc = -EIO;
37
38 dax->addr = (void __pmem *) ERR_PTR(-EIO);
39 if (blk_queue_enter(q, true) != 0)
40 return rc;
41
42 rc = bdev_direct_access(bdev, dax);
43 if (rc < 0) {
44 dax->addr = (void __pmem *) ERR_PTR(rc);
45 blk_queue_exit(q);
46 return rc;
47 }
48 return rc;
49}
50
51static void dax_unmap_atomic(struct block_device *bdev,
52 const struct blk_dax_ctl *dax)
53{
54 if (IS_ERR(dax->addr))
55 return;
56 blk_queue_exit(bdev->bd_queue);
57}
58
Dave Chinner1ca19152015-11-03 12:37:00 +110059/*
60 * dax_clear_blocks() is called from within transaction context from XFS,
61 * and hence this means the stack from this point must follow GFP_NOFS
62 * semantics for all operations.
63 */
Dan Williamsb2e0d162016-01-15 16:55:59 -080064int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080065{
66 struct block_device *bdev = inode->i_sb->s_bdev;
Dan Williamsb2e0d162016-01-15 16:55:59 -080067 struct blk_dax_ctl dax = {
68 .sector = block << (inode->i_blkbits - 9),
69 .size = _size,
70 };
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080071
72 might_sleep();
73 do {
Dan Williams0e749e52016-01-15 16:55:53 -080074 long count, sz;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080075
Dan Williamsb2e0d162016-01-15 16:55:59 -080076 count = dax_map_atomic(bdev, &dax);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080077 if (count < 0)
78 return count;
Dan Williams0e749e52016-01-15 16:55:53 -080079 sz = min_t(long, count, SZ_128K);
Dan Williamsb2e0d162016-01-15 16:55:59 -080080 clear_pmem(dax.addr, sz);
81 dax.size -= sz;
82 dax.sector += sz / 512;
83 dax_unmap_atomic(bdev, &dax);
Dan Williams0e749e52016-01-15 16:55:53 -080084 cond_resched();
Dan Williamsb2e0d162016-01-15 16:55:59 -080085 } while (dax.size);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080086
Ross Zwisler2765cfb2015-08-18 13:55:40 -060087 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080088 return 0;
89}
90EXPORT_SYMBOL_GPL(dax_clear_blocks);
91
Ross Zwisler2765cfb2015-08-18 13:55:40 -060092/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -060093static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
94 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080095{
96 loff_t final = end - pos + first; /* The final byte of the buffer */
97
98 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -060099 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800100 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -0600101 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800102}
103
104static bool buffer_written(struct buffer_head *bh)
105{
106 return buffer_mapped(bh) && !buffer_unwritten(bh);
107}
108
109/*
110 * When ext4 encounters a hole, it returns without modifying the buffer_head
111 * which means that we can't trust b_size. To cope with this, we set b_state
112 * to 0 before calling get_block and, if any bit is set, we know we can trust
113 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
114 * and would save us time calling get_block repeatedly.
115 */
116static bool buffer_size_valid(struct buffer_head *bh)
117{
118 return bh->b_state != 0;
119}
120
Dan Williamsb2e0d162016-01-15 16:55:59 -0800121
122static sector_t to_sector(const struct buffer_head *bh,
123 const struct inode *inode)
124{
125 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
126
127 return sector;
128}
129
Omar Sandovala95cd632015-03-16 04:33:51 -0700130static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
131 loff_t start, loff_t end, get_block_t get_block,
132 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800133{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800134 loff_t pos = start, max = start, bh_max = start;
135 bool hole = false, need_wmb = false;
136 struct block_device *bdev = NULL;
137 int rw = iov_iter_rw(iter), rc;
138 long map_len = 0;
139 struct blk_dax_ctl dax = {
140 .addr = (void __pmem *) ERR_PTR(-EIO),
141 };
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800142
Dan Williamsb2e0d162016-01-15 16:55:59 -0800143 if (rw == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800144 end = min(end, i_size_read(inode));
145
146 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600147 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800148 if (pos == max) {
149 unsigned blkbits = inode->i_blkbits;
Jeff Moyere94f5a22015-08-14 16:15:31 -0400150 long page = pos >> PAGE_SHIFT;
151 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800152 unsigned first = pos - (block << blkbits);
153 long size;
154
155 if (pos == bh_max) {
156 bh->b_size = PAGE_ALIGN(end - pos);
157 bh->b_state = 0;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800158 rc = get_block(inode, block, bh, rw == WRITE);
159 if (rc)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800160 break;
161 if (!buffer_size_valid(bh))
162 bh->b_size = 1 << blkbits;
163 bh_max = pos - first + bh->b_size;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800164 bdev = bh->b_bdev;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800165 } else {
166 unsigned done = bh->b_size -
167 (bh_max - (pos - first));
168 bh->b_blocknr += done >> blkbits;
169 bh->b_size -= done;
170 }
171
Dan Williamsb2e0d162016-01-15 16:55:59 -0800172 hole = rw == READ && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800173 if (hole) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800174 size = bh->b_size - first;
175 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800176 dax_unmap_atomic(bdev, &dax);
177 dax.sector = to_sector(bh, inode);
178 dax.size = bh->b_size;
179 map_len = dax_map_atomic(bdev, &dax);
180 if (map_len < 0) {
181 rc = map_len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800182 break;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800183 }
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600184 if (buffer_unwritten(bh) || buffer_new(bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800185 dax_new_buf(dax.addr, map_len, first,
186 pos, end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600187 need_wmb = true;
188 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800189 dax.addr += first;
190 size = map_len - first;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800191 }
192 max = min(pos + size, end);
193 }
194
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600195 if (iov_iter_rw(iter) == WRITE) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800196 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600197 need_wmb = true;
198 } else if (!hole)
Dan Williamsb2e0d162016-01-15 16:55:59 -0800199 len = copy_to_iter((void __force *) dax.addr, max - pos,
Ross Zwislere2e05392015-08-18 13:55:41 -0600200 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800201 else
202 len = iov_iter_zero(max - pos, iter);
203
Al Virocadfbb62015-11-10 19:42:49 -0700204 if (!len) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800205 rc = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800206 break;
Al Virocadfbb62015-11-10 19:42:49 -0700207 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800208
209 pos += len;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800210 if (!IS_ERR(dax.addr))
211 dax.addr += len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800212 }
213
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600214 if (need_wmb)
215 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800216 dax_unmap_atomic(bdev, &dax);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600217
Dan Williamsb2e0d162016-01-15 16:55:59 -0800218 return (pos == start) ? rc : pos - start;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800219}
220
221/**
222 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800223 * @iocb: The control block for this I/O
224 * @inode: The file which the I/O is directed at
225 * @iter: The addresses to do I/O from or to
226 * @pos: The file offset where the I/O starts
227 * @get_block: The filesystem method used to translate file offsets to blocks
228 * @end_io: A filesystem callback for I/O completion
229 * @flags: See below
230 *
231 * This function uses the same locking scheme as do_blockdev_direct_IO:
232 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
233 * caller for writes. For reads, we take and release the i_mutex ourselves.
234 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
235 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
236 * is in progress.
237 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700238ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
239 struct iov_iter *iter, loff_t pos, get_block_t get_block,
240 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800241{
242 struct buffer_head bh;
243 ssize_t retval = -EINVAL;
244 loff_t end = pos + iov_iter_count(iter);
245
246 memset(&bh, 0, sizeof(bh));
247
Omar Sandovala95cd632015-03-16 04:33:51 -0700248 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800249 struct address_space *mapping = inode->i_mapping;
250 mutex_lock(&inode->i_mutex);
251 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
252 if (retval) {
253 mutex_unlock(&inode->i_mutex);
254 goto out;
255 }
256 }
257
258 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400259 if (!(flags & DIO_SKIP_DIO_COUNT))
260 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800261
Omar Sandovala95cd632015-03-16 04:33:51 -0700262 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800263
Omar Sandovala95cd632015-03-16 04:33:51 -0700264 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800265 mutex_unlock(&inode->i_mutex);
266
267 if ((retval > 0) && end_io)
268 end_io(iocb, pos, retval, bh.b_private);
269
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400270 if (!(flags & DIO_SKIP_DIO_COUNT))
271 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800272 out:
273 return retval;
274}
275EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800276
277/*
278 * The user has performed a load from a hole in the file. Allocating
279 * a new page in the file would cause excessive storage usage for
280 * workloads with sparse files. We allocate a page cache page instead.
281 * We'll kick it out of the page cache if it's ever written to,
282 * otherwise it will simply fall out of the page cache under memory
283 * pressure without ever having been dirtied.
284 */
285static int dax_load_hole(struct address_space *mapping, struct page *page,
286 struct vm_fault *vmf)
287{
288 unsigned long size;
289 struct inode *inode = mapping->host;
290 if (!page)
291 page = find_or_create_page(mapping, vmf->pgoff,
292 GFP_KERNEL | __GFP_ZERO);
293 if (!page)
294 return VM_FAULT_OOM;
295 /* Recheck i_size under page lock to avoid truncate race */
296 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
297 if (vmf->pgoff >= size) {
298 unlock_page(page);
299 page_cache_release(page);
300 return VM_FAULT_SIGBUS;
301 }
302
303 vmf->page = page;
304 return VM_FAULT_LOCKED;
305}
306
Dan Williamsb2e0d162016-01-15 16:55:59 -0800307static int copy_user_bh(struct page *to, struct inode *inode,
308 struct buffer_head *bh, unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800309{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800310 struct blk_dax_ctl dax = {
311 .sector = to_sector(bh, inode),
312 .size = bh->b_size,
313 };
314 struct block_device *bdev = bh->b_bdev;
Ross Zwislere2e05392015-08-18 13:55:41 -0600315 void *vto;
316
Dan Williamsb2e0d162016-01-15 16:55:59 -0800317 if (dax_map_atomic(bdev, &dax) < 0)
318 return PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800319 vto = kmap_atomic(to);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800320 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800321 kunmap_atomic(vto);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800322 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800323 return 0;
324}
325
326static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
327 struct vm_area_struct *vma, struct vm_fault *vmf)
328{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800329 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800330 struct address_space *mapping = inode->i_mapping;
331 struct block_device *bdev = bh->b_bdev;
332 struct blk_dax_ctl dax = {
333 .sector = to_sector(bh, inode),
334 .size = bh->b_size,
335 };
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800336 pgoff_t size;
337 int error;
338
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700339 i_mmap_lock_read(mapping);
340
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800341 /*
342 * Check truncate didn't happen while we were allocating a block.
343 * If it did, this block may or may not be still allocated to the
344 * file. We can't tell the filesystem to free it because we can't
345 * take i_mutex here. In the worst case, the file still has blocks
346 * allocated past the end of the file.
347 */
348 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
349 if (unlikely(vmf->pgoff >= size)) {
350 error = -EIO;
351 goto out;
352 }
353
Dan Williamsb2e0d162016-01-15 16:55:59 -0800354 if (dax_map_atomic(bdev, &dax) < 0) {
355 error = PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800356 goto out;
357 }
358
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600359 if (buffer_unwritten(bh) || buffer_new(bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800360 clear_pmem(dax.addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600361 wmb_pmem();
362 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800363 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800364
Dan Williamsb2e0d162016-01-15 16:55:59 -0800365 error = vm_insert_mixed(vma, vaddr, dax.pfn);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800366
367 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700368 i_mmap_unlock_read(mapping);
369
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800370 return error;
371}
372
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000373/**
374 * __dax_fault - handle a page fault on a DAX file
375 * @vma: The virtual memory area where the fault occurred
376 * @vmf: The description of the fault
377 * @get_block: The filesystem method used to translate file offsets to blocks
Dave Chinnerb2442c52015-07-29 11:48:00 +1000378 * @complete_unwritten: The filesystem method used to convert unwritten blocks
379 * to written so the data written to them is exposed. This is required for
380 * required by write faults for filesystems that will return unwritten
381 * extent mappings from @get_block, but it is optional for reads as
382 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
383 * not support unwritten extents, the it should pass NULL.
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000384 *
385 * When a page fault occurs, filesystems may call this helper in their
386 * fault handler for DAX files. __dax_fault() assumes the caller has done all
387 * the necessary locking for the page fault to proceed successfully.
388 */
389int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000390 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800391{
392 struct file *file = vma->vm_file;
393 struct address_space *mapping = file->f_mapping;
394 struct inode *inode = mapping->host;
395 struct page *page;
396 struct buffer_head bh;
397 unsigned long vaddr = (unsigned long)vmf->virtual_address;
398 unsigned blkbits = inode->i_blkbits;
399 sector_t block;
400 pgoff_t size;
401 int error;
402 int major = 0;
403
404 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
405 if (vmf->pgoff >= size)
406 return VM_FAULT_SIGBUS;
407
408 memset(&bh, 0, sizeof(bh));
409 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
410 bh.b_size = PAGE_SIZE;
411
412 repeat:
413 page = find_get_page(mapping, vmf->pgoff);
414 if (page) {
415 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
416 page_cache_release(page);
417 return VM_FAULT_RETRY;
418 }
419 if (unlikely(page->mapping != mapping)) {
420 unlock_page(page);
421 page_cache_release(page);
422 goto repeat;
423 }
424 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
425 if (unlikely(vmf->pgoff >= size)) {
426 /*
427 * We have a struct page covering a hole in the file
428 * from a read fault and we've raced with a truncate
429 */
430 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700431 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800432 }
433 }
434
435 error = get_block(inode, block, &bh, 0);
436 if (!error && (bh.b_size < PAGE_SIZE))
437 error = -EIO; /* fs corruption? */
438 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700439 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800440
441 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
442 if (vmf->flags & FAULT_FLAG_WRITE) {
443 error = get_block(inode, block, &bh, 1);
444 count_vm_event(PGMAJFAULT);
445 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
446 major = VM_FAULT_MAJOR;
447 if (!error && (bh.b_size < PAGE_SIZE))
448 error = -EIO;
449 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700450 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800451 } else {
452 return dax_load_hole(mapping, page, vmf);
453 }
454 }
455
456 if (vmf->cow_page) {
457 struct page *new_page = vmf->cow_page;
458 if (buffer_written(&bh))
Dan Williamsb2e0d162016-01-15 16:55:59 -0800459 error = copy_user_bh(new_page, inode, &bh, vaddr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800460 else
461 clear_user_highpage(new_page, vaddr);
462 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700463 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800464 vmf->page = page;
465 if (!page) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700466 i_mmap_lock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800467 /* Check we didn't race with truncate */
468 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
469 PAGE_SHIFT;
470 if (vmf->pgoff >= size) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700471 i_mmap_unlock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800472 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700473 goto out;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800474 }
475 }
476 return VM_FAULT_LOCKED;
477 }
478
479 /* Check we didn't race with a read fault installing a new page */
480 if (!page && major)
481 page = find_lock_page(mapping, vmf->pgoff);
482
483 if (page) {
484 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
485 PAGE_CACHE_SIZE, 0);
486 delete_from_page_cache(page);
487 unlock_page(page);
488 page_cache_release(page);
489 }
490
Dave Chinnere842f292015-06-04 09:18:18 +1000491 /*
492 * If we successfully insert the new mapping over an unwritten extent,
493 * we need to ensure we convert the unwritten extent. If there is an
494 * error inserting the mapping, the filesystem needs to leave it as
495 * unwritten to prevent exposure of the stale underlying data to
496 * userspace, but we still need to call the completion function so
497 * the private resources on the mapping buffer can be released. We
498 * indicate what the callback should do via the uptodate variable, same
499 * as for normal BH based IO completions.
500 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800501 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnerb2442c52015-07-29 11:48:00 +1000502 if (buffer_unwritten(&bh)) {
503 if (complete_unwritten)
504 complete_unwritten(&bh, !error);
505 else
506 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
507 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800508
509 out:
510 if (error == -ENOMEM)
511 return VM_FAULT_OOM | major;
512 /* -EBUSY is fine, somebody else faulted on the same PTE */
513 if ((error < 0) && (error != -EBUSY))
514 return VM_FAULT_SIGBUS | major;
515 return VM_FAULT_NOPAGE | major;
516
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700517 unlock_page:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800518 if (page) {
519 unlock_page(page);
520 page_cache_release(page);
521 }
522 goto out;
523}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000524EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800525
526/**
527 * dax_fault - handle a page fault on a DAX file
528 * @vma: The virtual memory area where the fault occurred
529 * @vmf: The description of the fault
530 * @get_block: The filesystem method used to translate file offsets to blocks
531 *
532 * When a page fault occurs, filesystems may call this helper in their
533 * fault handler for DAX files.
534 */
535int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000536 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800537{
538 int result;
539 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
540
541 if (vmf->flags & FAULT_FLAG_WRITE) {
542 sb_start_pagefault(sb);
543 file_update_time(vma->vm_file);
544 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000545 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800546 if (vmf->flags & FAULT_FLAG_WRITE)
547 sb_end_pagefault(sb);
548
549 return result;
550}
551EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800552
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700553#ifdef CONFIG_TRANSPARENT_HUGEPAGE
554/*
555 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
556 * more often than one might expect in the below function.
557 */
558#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
559
560int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
561 pmd_t *pmd, unsigned int flags, get_block_t get_block,
562 dax_iodone_t complete_unwritten)
563{
564 struct file *file = vma->vm_file;
565 struct address_space *mapping = file->f_mapping;
566 struct inode *inode = mapping->host;
567 struct buffer_head bh;
568 unsigned blkbits = inode->i_blkbits;
569 unsigned long pmd_addr = address & PMD_MASK;
570 bool write = flags & FAULT_FLAG_WRITE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800571 struct block_device *bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700572 pgoff_t size, pgoff;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800573 sector_t block;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700574 int result = 0;
575
Dan Williamsee82c9e2015-11-15 16:06:32 -0800576 /* dax pmd mappings are broken wrt gup and fork */
577 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
578 return VM_FAULT_FALLBACK;
579
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700580 /* Fall back to PTEs if we're going to COW */
581 if (write && !(vma->vm_flags & VM_SHARED))
582 return VM_FAULT_FALLBACK;
583 /* If the PMD would extend outside the VMA */
584 if (pmd_addr < vma->vm_start)
585 return VM_FAULT_FALLBACK;
586 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
587 return VM_FAULT_FALLBACK;
588
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700589 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700590 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
591 if (pgoff >= size)
592 return VM_FAULT_SIGBUS;
593 /* If the PMD would cover blocks out of the file */
594 if ((pgoff | PG_PMD_COLOUR) >= size)
595 return VM_FAULT_FALLBACK;
596
597 memset(&bh, 0, sizeof(bh));
598 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
599
600 bh.b_size = PMD_SIZE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800601 if (get_block(inode, block, &bh, write) != 0)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700602 return VM_FAULT_SIGBUS;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800603 bdev = bh.b_bdev;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700604 i_mmap_lock_read(mapping);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700605
606 /*
607 * If the filesystem isn't willing to tell us the length of a hole,
608 * just fall back to PTEs. Calling get_block 512 times in a loop
609 * would be silly.
610 */
611 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
612 goto fallback;
613
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700614 /*
615 * If we allocated new storage, make sure no process has any
616 * zero pages covering this hole
617 */
618 if (buffer_new(&bh)) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700619 i_mmap_unlock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700620 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700621 i_mmap_lock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700622 }
623
Matthew Wilcox84c4e5e2015-09-08 14:59:17 -0700624 /*
625 * If a truncate happened while we were allocating blocks, we may
626 * leave blocks allocated to the file that are beyond EOF. We can't
627 * take i_mutex here, so just leave them hanging; they'll be freed
628 * when the file is deleted.
629 */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700630 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
631 if (pgoff >= size) {
632 result = VM_FAULT_SIGBUS;
633 goto out;
634 }
635 if ((pgoff | PG_PMD_COLOUR) >= size)
636 goto fallback;
637
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700638 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700639 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700640 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700641 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700642
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700643 if (unlikely(!zero_page))
644 goto fallback;
645
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700646 ptl = pmd_lock(vma->vm_mm, pmd);
647 if (!pmd_none(*pmd)) {
648 spin_unlock(ptl);
649 goto fallback;
650 }
651
652 entry = mk_pmd(zero_page, vma->vm_page_prot);
653 entry = pmd_mkhuge(entry);
654 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700655 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700656 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700657 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800658 struct blk_dax_ctl dax = {
659 .sector = to_sector(&bh, inode),
660 .size = PMD_SIZE,
661 };
662 long length = dax_map_atomic(bdev, &dax);
663
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700664 if (length < 0) {
665 result = VM_FAULT_SIGBUS;
666 goto out;
667 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800668 if ((length < PMD_SIZE) || (dax.pfn & PG_PMD_COLOUR)) {
669 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700670 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800671 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700672
Dan Williams152d7bd2015-11-12 18:33:54 -0800673 /*
674 * TODO: teach vmf_insert_pfn_pmd() to support
675 * 'pte_special' for pmds
676 */
Dan Williamsb2e0d162016-01-15 16:55:59 -0800677 if (pfn_valid(dax.pfn)) {
678 dax_unmap_atomic(bdev, &dax);
Dan Williams152d7bd2015-11-12 18:33:54 -0800679 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800680 }
Dan Williams152d7bd2015-11-12 18:33:54 -0800681
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700682 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800683 clear_pmem(dax.addr, PMD_SIZE);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700684 wmb_pmem();
685 count_vm_event(PGMAJFAULT);
686 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
687 result |= VM_FAULT_MAJOR;
688 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800689 dax_unmap_atomic(bdev, &dax);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700690
Dan Williamsb2e0d162016-01-15 16:55:59 -0800691 result |= vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700692 }
693
694 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700695 i_mmap_unlock_read(mapping);
696
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700697 if (buffer_unwritten(&bh))
698 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
699
700 return result;
701
702 fallback:
703 count_vm_event(THP_FAULT_FALLBACK);
704 result = VM_FAULT_FALLBACK;
705 goto out;
706}
707EXPORT_SYMBOL_GPL(__dax_pmd_fault);
708
709/**
710 * dax_pmd_fault - handle a PMD fault on a DAX file
711 * @vma: The virtual memory area where the fault occurred
712 * @vmf: The description of the fault
713 * @get_block: The filesystem method used to translate file offsets to blocks
714 *
715 * When a page fault occurs, filesystems may call this helper in their
716 * pmd_fault handler for DAX files.
717 */
718int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
719 pmd_t *pmd, unsigned int flags, get_block_t get_block,
720 dax_iodone_t complete_unwritten)
721{
722 int result;
723 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
724
725 if (flags & FAULT_FLAG_WRITE) {
726 sb_start_pagefault(sb);
727 file_update_time(vma->vm_file);
728 }
729 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
730 complete_unwritten);
731 if (flags & FAULT_FLAG_WRITE)
732 sb_end_pagefault(sb);
733
734 return result;
735}
736EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -0700737#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700738
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800739/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700740 * dax_pfn_mkwrite - handle first write to DAX page
741 * @vma: The virtual memory area where the fault occurred
742 * @vmf: The description of the fault
743 *
744 */
745int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
746{
747 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
748
749 sb_start_pagefault(sb);
750 file_update_time(vma->vm_file);
751 sb_end_pagefault(sb);
752 return VM_FAULT_NOPAGE;
753}
754EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
755
756/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800757 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800758 * @inode: The file being truncated
759 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800760 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800761 * @get_block: The filesystem method used to translate file offsets to blocks
762 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800763 * This function can be called by a filesystem when it is zeroing part of a
764 * page in a DAX file. This is intended for hole-punch operations. If
765 * you are truncating a file, the helper function dax_truncate_page() may be
766 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800767 *
768 * We work in terms of PAGE_CACHE_SIZE here for commonality with
769 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
770 * took care of disposing of the unnecessary blocks. Even if the filesystem
771 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800772 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800773 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800774int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
775 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800776{
777 struct buffer_head bh;
778 pgoff_t index = from >> PAGE_CACHE_SHIFT;
779 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800780 int err;
781
782 /* Block boundary? Nothing to do */
783 if (!length)
784 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800785 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800786
787 memset(&bh, 0, sizeof(bh));
788 bh.b_size = PAGE_CACHE_SIZE;
789 err = get_block(inode, index, &bh, 0);
790 if (err < 0)
791 return err;
792 if (buffer_written(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800793 struct block_device *bdev = bh.b_bdev;
794 struct blk_dax_ctl dax = {
795 .sector = to_sector(&bh, inode),
796 .size = PAGE_CACHE_SIZE,
797 };
798
799 if (dax_map_atomic(bdev, &dax) < 0)
800 return PTR_ERR(dax.addr);
801 clear_pmem(dax.addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600802 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800803 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800804 }
805
806 return 0;
807}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800808EXPORT_SYMBOL_GPL(dax_zero_page_range);
809
810/**
811 * dax_truncate_page - handle a partial page being truncated in a DAX file
812 * @inode: The file being truncated
813 * @from: The file offset that is being truncated to
814 * @get_block: The filesystem method used to translate file offsets to blocks
815 *
816 * Similar to block_truncate_page(), this function can be called by a
817 * filesystem when it is truncating a DAX file to handle the partial page.
818 *
819 * We work in terms of PAGE_CACHE_SIZE here for commonality with
820 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
821 * took care of disposing of the unnecessary blocks. Even if the filesystem
822 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
823 * since the file might be mmapped.
824 */
825int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
826{
827 unsigned length = PAGE_CACHE_ALIGN(from) - from;
828 return dax_zero_page_range(inode, from, length, get_block);
829}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800830EXPORT_SYMBOL_GPL(dax_truncate_page);