blob: 11721c0fc12765b80e25de26904565fd0be3505a [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060027#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080029#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080030#include <linux/vmstat.h>
Dan Williams0e749e52016-01-15 16:55:53 -080031#include <linux/sizes.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080032
Dave Chinner1ca19152015-11-03 12:37:00 +110033/*
34 * dax_clear_blocks() is called from within transaction context from XFS,
35 * and hence this means the stack from this point must follow GFP_NOFS
36 * semantics for all operations.
37 */
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080038int dax_clear_blocks(struct inode *inode, sector_t block, long size)
39{
40 struct block_device *bdev = inode->i_sb->s_bdev;
41 sector_t sector = block << (inode->i_blkbits - 9);
42
43 might_sleep();
44 do {
Ross Zwislere2e05392015-08-18 13:55:41 -060045 void __pmem *addr;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080046 unsigned long pfn;
Dan Williams0e749e52016-01-15 16:55:53 -080047 long count, sz;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080048
49 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
50 if (count < 0)
51 return count;
Dan Williams0e749e52016-01-15 16:55:53 -080052 sz = min_t(long, count, SZ_128K);
53 clear_pmem(addr, sz);
54 size -= sz;
55 BUG_ON(sz & 511);
56 sector += sz / 512;
57 cond_resched();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080058 } while (size);
59
Ross Zwisler2765cfb2015-08-18 13:55:40 -060060 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080061 return 0;
62}
63EXPORT_SYMBOL_GPL(dax_clear_blocks);
64
Ross Zwislere2e05392015-08-18 13:55:41 -060065static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
66 unsigned blkbits)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080067{
68 unsigned long pfn;
69 sector_t sector = bh->b_blocknr << (blkbits - 9);
70 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71}
72
Ross Zwisler2765cfb2015-08-18 13:55:40 -060073/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -060074static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
75 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080076{
77 loff_t final = end - pos + first; /* The final byte of the buffer */
78
79 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -060080 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080081 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -060082 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080083}
84
85static bool buffer_written(struct buffer_head *bh)
86{
87 return buffer_mapped(bh) && !buffer_unwritten(bh);
88}
89
90/*
91 * When ext4 encounters a hole, it returns without modifying the buffer_head
92 * which means that we can't trust b_size. To cope with this, we set b_state
93 * to 0 before calling get_block and, if any bit is set, we know we can trust
94 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
95 * and would save us time calling get_block repeatedly.
96 */
97static bool buffer_size_valid(struct buffer_head *bh)
98{
99 return bh->b_state != 0;
100}
101
Omar Sandovala95cd632015-03-16 04:33:51 -0700102static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
103 loff_t start, loff_t end, get_block_t get_block,
104 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800105{
106 ssize_t retval = 0;
107 loff_t pos = start;
108 loff_t max = start;
109 loff_t bh_max = start;
Ross Zwislere2e05392015-08-18 13:55:41 -0600110 void __pmem *addr;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800111 bool hole = false;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600112 bool need_wmb = false;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800113
Omar Sandovala95cd632015-03-16 04:33:51 -0700114 if (iov_iter_rw(iter) != WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800115 end = min(end, i_size_read(inode));
116
117 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600118 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800119 if (pos == max) {
120 unsigned blkbits = inode->i_blkbits;
Jeff Moyere94f5a22015-08-14 16:15:31 -0400121 long page = pos >> PAGE_SHIFT;
122 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800123 unsigned first = pos - (block << blkbits);
124 long size;
125
126 if (pos == bh_max) {
127 bh->b_size = PAGE_ALIGN(end - pos);
128 bh->b_state = 0;
129 retval = get_block(inode, block, bh,
Omar Sandovala95cd632015-03-16 04:33:51 -0700130 iov_iter_rw(iter) == WRITE);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800131 if (retval)
132 break;
133 if (!buffer_size_valid(bh))
134 bh->b_size = 1 << blkbits;
135 bh_max = pos - first + bh->b_size;
136 } else {
137 unsigned done = bh->b_size -
138 (bh_max - (pos - first));
139 bh->b_blocknr += done >> blkbits;
140 bh->b_size -= done;
141 }
142
Omar Sandovala95cd632015-03-16 04:33:51 -0700143 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800144 if (hole) {
145 addr = NULL;
146 size = bh->b_size - first;
147 } else {
148 retval = dax_get_addr(bh, &addr, blkbits);
149 if (retval < 0)
150 break;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600151 if (buffer_unwritten(bh) || buffer_new(bh)) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800152 dax_new_buf(addr, retval, first, pos,
153 end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600154 need_wmb = true;
155 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800156 addr += first;
157 size = retval - first;
158 }
159 max = min(pos + size, end);
160 }
161
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600162 if (iov_iter_rw(iter) == WRITE) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600163 len = copy_from_iter_pmem(addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600164 need_wmb = true;
165 } else if (!hole)
Ross Zwislere2e05392015-08-18 13:55:41 -0600166 len = copy_to_iter((void __force *)addr, max - pos,
167 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800168 else
169 len = iov_iter_zero(max - pos, iter);
170
Al Virocadfbb62015-11-10 19:42:49 -0700171 if (!len) {
172 retval = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800173 break;
Al Virocadfbb62015-11-10 19:42:49 -0700174 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800175
176 pos += len;
177 addr += len;
178 }
179
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600180 if (need_wmb)
181 wmb_pmem();
182
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800183 return (pos == start) ? retval : pos - start;
184}
185
186/**
187 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800188 * @iocb: The control block for this I/O
189 * @inode: The file which the I/O is directed at
190 * @iter: The addresses to do I/O from or to
191 * @pos: The file offset where the I/O starts
192 * @get_block: The filesystem method used to translate file offsets to blocks
193 * @end_io: A filesystem callback for I/O completion
194 * @flags: See below
195 *
196 * This function uses the same locking scheme as do_blockdev_direct_IO:
197 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
198 * caller for writes. For reads, we take and release the i_mutex ourselves.
199 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
200 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
201 * is in progress.
202 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700203ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
204 struct iov_iter *iter, loff_t pos, get_block_t get_block,
205 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800206{
207 struct buffer_head bh;
208 ssize_t retval = -EINVAL;
209 loff_t end = pos + iov_iter_count(iter);
210
211 memset(&bh, 0, sizeof(bh));
212
Omar Sandovala95cd632015-03-16 04:33:51 -0700213 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800214 struct address_space *mapping = inode->i_mapping;
215 mutex_lock(&inode->i_mutex);
216 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
217 if (retval) {
218 mutex_unlock(&inode->i_mutex);
219 goto out;
220 }
221 }
222
223 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400224 if (!(flags & DIO_SKIP_DIO_COUNT))
225 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800226
Omar Sandovala95cd632015-03-16 04:33:51 -0700227 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800228
Omar Sandovala95cd632015-03-16 04:33:51 -0700229 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800230 mutex_unlock(&inode->i_mutex);
231
232 if ((retval > 0) && end_io)
233 end_io(iocb, pos, retval, bh.b_private);
234
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400235 if (!(flags & DIO_SKIP_DIO_COUNT))
236 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800237 out:
238 return retval;
239}
240EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800241
242/*
243 * The user has performed a load from a hole in the file. Allocating
244 * a new page in the file would cause excessive storage usage for
245 * workloads with sparse files. We allocate a page cache page instead.
246 * We'll kick it out of the page cache if it's ever written to,
247 * otherwise it will simply fall out of the page cache under memory
248 * pressure without ever having been dirtied.
249 */
250static int dax_load_hole(struct address_space *mapping, struct page *page,
251 struct vm_fault *vmf)
252{
253 unsigned long size;
254 struct inode *inode = mapping->host;
255 if (!page)
256 page = find_or_create_page(mapping, vmf->pgoff,
257 GFP_KERNEL | __GFP_ZERO);
258 if (!page)
259 return VM_FAULT_OOM;
260 /* Recheck i_size under page lock to avoid truncate race */
261 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
262 if (vmf->pgoff >= size) {
263 unlock_page(page);
264 page_cache_release(page);
265 return VM_FAULT_SIGBUS;
266 }
267
268 vmf->page = page;
269 return VM_FAULT_LOCKED;
270}
271
272static int copy_user_bh(struct page *to, struct buffer_head *bh,
273 unsigned blkbits, unsigned long vaddr)
274{
Ross Zwislere2e05392015-08-18 13:55:41 -0600275 void __pmem *vfrom;
276 void *vto;
277
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800278 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
279 return -EIO;
280 vto = kmap_atomic(to);
Ross Zwislere2e05392015-08-18 13:55:41 -0600281 copy_user_page(vto, (void __force *)vfrom, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800282 kunmap_atomic(vto);
283 return 0;
284}
285
286static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
287 struct vm_area_struct *vma, struct vm_fault *vmf)
288{
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700289 struct address_space *mapping = inode->i_mapping;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800290 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
291 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Ross Zwislere2e05392015-08-18 13:55:41 -0600292 void __pmem *addr;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800293 unsigned long pfn;
294 pgoff_t size;
295 int error;
296
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700297 i_mmap_lock_read(mapping);
298
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800299 /*
300 * Check truncate didn't happen while we were allocating a block.
301 * If it did, this block may or may not be still allocated to the
302 * file. We can't tell the filesystem to free it because we can't
303 * take i_mutex here. In the worst case, the file still has blocks
304 * allocated past the end of the file.
305 */
306 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
307 if (unlikely(vmf->pgoff >= size)) {
308 error = -EIO;
309 goto out;
310 }
311
312 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
313 if (error < 0)
314 goto out;
315 if (error < PAGE_SIZE) {
316 error = -EIO;
317 goto out;
318 }
319
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600320 if (buffer_unwritten(bh) || buffer_new(bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600321 clear_pmem(addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600322 wmb_pmem();
323 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800324
325 error = vm_insert_mixed(vma, vaddr, pfn);
326
327 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700328 i_mmap_unlock_read(mapping);
329
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800330 return error;
331}
332
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000333/**
334 * __dax_fault - handle a page fault on a DAX file
335 * @vma: The virtual memory area where the fault occurred
336 * @vmf: The description of the fault
337 * @get_block: The filesystem method used to translate file offsets to blocks
Dave Chinnerb2442c52015-07-29 11:48:00 +1000338 * @complete_unwritten: The filesystem method used to convert unwritten blocks
339 * to written so the data written to them is exposed. This is required for
340 * required by write faults for filesystems that will return unwritten
341 * extent mappings from @get_block, but it is optional for reads as
342 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
343 * not support unwritten extents, the it should pass NULL.
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000344 *
345 * When a page fault occurs, filesystems may call this helper in their
346 * fault handler for DAX files. __dax_fault() assumes the caller has done all
347 * the necessary locking for the page fault to proceed successfully.
348 */
349int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000350 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800351{
352 struct file *file = vma->vm_file;
353 struct address_space *mapping = file->f_mapping;
354 struct inode *inode = mapping->host;
355 struct page *page;
356 struct buffer_head bh;
357 unsigned long vaddr = (unsigned long)vmf->virtual_address;
358 unsigned blkbits = inode->i_blkbits;
359 sector_t block;
360 pgoff_t size;
361 int error;
362 int major = 0;
363
364 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
365 if (vmf->pgoff >= size)
366 return VM_FAULT_SIGBUS;
367
368 memset(&bh, 0, sizeof(bh));
369 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
370 bh.b_size = PAGE_SIZE;
371
372 repeat:
373 page = find_get_page(mapping, vmf->pgoff);
374 if (page) {
375 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
376 page_cache_release(page);
377 return VM_FAULT_RETRY;
378 }
379 if (unlikely(page->mapping != mapping)) {
380 unlock_page(page);
381 page_cache_release(page);
382 goto repeat;
383 }
384 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
385 if (unlikely(vmf->pgoff >= size)) {
386 /*
387 * We have a struct page covering a hole in the file
388 * from a read fault and we've raced with a truncate
389 */
390 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700391 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800392 }
393 }
394
395 error = get_block(inode, block, &bh, 0);
396 if (!error && (bh.b_size < PAGE_SIZE))
397 error = -EIO; /* fs corruption? */
398 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700399 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800400
401 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
402 if (vmf->flags & FAULT_FLAG_WRITE) {
403 error = get_block(inode, block, &bh, 1);
404 count_vm_event(PGMAJFAULT);
405 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
406 major = VM_FAULT_MAJOR;
407 if (!error && (bh.b_size < PAGE_SIZE))
408 error = -EIO;
409 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700410 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800411 } else {
412 return dax_load_hole(mapping, page, vmf);
413 }
414 }
415
416 if (vmf->cow_page) {
417 struct page *new_page = vmf->cow_page;
418 if (buffer_written(&bh))
419 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
420 else
421 clear_user_highpage(new_page, vaddr);
422 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700423 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800424 vmf->page = page;
425 if (!page) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700426 i_mmap_lock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800427 /* Check we didn't race with truncate */
428 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
429 PAGE_SHIFT;
430 if (vmf->pgoff >= size) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700431 i_mmap_unlock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800432 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700433 goto out;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800434 }
435 }
436 return VM_FAULT_LOCKED;
437 }
438
439 /* Check we didn't race with a read fault installing a new page */
440 if (!page && major)
441 page = find_lock_page(mapping, vmf->pgoff);
442
443 if (page) {
444 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
445 PAGE_CACHE_SIZE, 0);
446 delete_from_page_cache(page);
447 unlock_page(page);
448 page_cache_release(page);
449 }
450
Dave Chinnere842f292015-06-04 09:18:18 +1000451 /*
452 * If we successfully insert the new mapping over an unwritten extent,
453 * we need to ensure we convert the unwritten extent. If there is an
454 * error inserting the mapping, the filesystem needs to leave it as
455 * unwritten to prevent exposure of the stale underlying data to
456 * userspace, but we still need to call the completion function so
457 * the private resources on the mapping buffer can be released. We
458 * indicate what the callback should do via the uptodate variable, same
459 * as for normal BH based IO completions.
460 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800461 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnerb2442c52015-07-29 11:48:00 +1000462 if (buffer_unwritten(&bh)) {
463 if (complete_unwritten)
464 complete_unwritten(&bh, !error);
465 else
466 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
467 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800468
469 out:
470 if (error == -ENOMEM)
471 return VM_FAULT_OOM | major;
472 /* -EBUSY is fine, somebody else faulted on the same PTE */
473 if ((error < 0) && (error != -EBUSY))
474 return VM_FAULT_SIGBUS | major;
475 return VM_FAULT_NOPAGE | major;
476
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700477 unlock_page:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800478 if (page) {
479 unlock_page(page);
480 page_cache_release(page);
481 }
482 goto out;
483}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000484EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800485
486/**
487 * dax_fault - handle a page fault on a DAX file
488 * @vma: The virtual memory area where the fault occurred
489 * @vmf: The description of the fault
490 * @get_block: The filesystem method used to translate file offsets to blocks
491 *
492 * When a page fault occurs, filesystems may call this helper in their
493 * fault handler for DAX files.
494 */
495int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000496 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800497{
498 int result;
499 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
500
501 if (vmf->flags & FAULT_FLAG_WRITE) {
502 sb_start_pagefault(sb);
503 file_update_time(vma->vm_file);
504 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000505 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800506 if (vmf->flags & FAULT_FLAG_WRITE)
507 sb_end_pagefault(sb);
508
509 return result;
510}
511EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800512
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700513#ifdef CONFIG_TRANSPARENT_HUGEPAGE
514/*
515 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
516 * more often than one might expect in the below function.
517 */
518#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
519
520int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
521 pmd_t *pmd, unsigned int flags, get_block_t get_block,
522 dax_iodone_t complete_unwritten)
523{
524 struct file *file = vma->vm_file;
525 struct address_space *mapping = file->f_mapping;
526 struct inode *inode = mapping->host;
527 struct buffer_head bh;
528 unsigned blkbits = inode->i_blkbits;
529 unsigned long pmd_addr = address & PMD_MASK;
530 bool write = flags & FAULT_FLAG_WRITE;
531 long length;
Ross Zwislerd77e92e2015-09-09 10:29:40 -0600532 void __pmem *kaddr;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700533 pgoff_t size, pgoff;
534 sector_t block, sector;
535 unsigned long pfn;
536 int result = 0;
537
Dan Williamsee82c9e2015-11-15 16:06:32 -0800538 /* dax pmd mappings are broken wrt gup and fork */
539 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
540 return VM_FAULT_FALLBACK;
541
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700542 /* Fall back to PTEs if we're going to COW */
543 if (write && !(vma->vm_flags & VM_SHARED))
544 return VM_FAULT_FALLBACK;
545 /* If the PMD would extend outside the VMA */
546 if (pmd_addr < vma->vm_start)
547 return VM_FAULT_FALLBACK;
548 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
549 return VM_FAULT_FALLBACK;
550
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700551 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700552 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
553 if (pgoff >= size)
554 return VM_FAULT_SIGBUS;
555 /* If the PMD would cover blocks out of the file */
556 if ((pgoff | PG_PMD_COLOUR) >= size)
557 return VM_FAULT_FALLBACK;
558
559 memset(&bh, 0, sizeof(bh));
560 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
561
562 bh.b_size = PMD_SIZE;
563 length = get_block(inode, block, &bh, write);
564 if (length)
565 return VM_FAULT_SIGBUS;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700566 i_mmap_lock_read(mapping);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700567
568 /*
569 * If the filesystem isn't willing to tell us the length of a hole,
570 * just fall back to PTEs. Calling get_block 512 times in a loop
571 * would be silly.
572 */
573 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
574 goto fallback;
575
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700576 /*
577 * If we allocated new storage, make sure no process has any
578 * zero pages covering this hole
579 */
580 if (buffer_new(&bh)) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700581 i_mmap_unlock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700582 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700583 i_mmap_lock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700584 }
585
Matthew Wilcox84c4e5e2015-09-08 14:59:17 -0700586 /*
587 * If a truncate happened while we were allocating blocks, we may
588 * leave blocks allocated to the file that are beyond EOF. We can't
589 * take i_mutex here, so just leave them hanging; they'll be freed
590 * when the file is deleted.
591 */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700592 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
593 if (pgoff >= size) {
594 result = VM_FAULT_SIGBUS;
595 goto out;
596 }
597 if ((pgoff | PG_PMD_COLOUR) >= size)
598 goto fallback;
599
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700600 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700601 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700602 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700603 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700604
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700605 if (unlikely(!zero_page))
606 goto fallback;
607
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700608 ptl = pmd_lock(vma->vm_mm, pmd);
609 if (!pmd_none(*pmd)) {
610 spin_unlock(ptl);
611 goto fallback;
612 }
613
614 entry = mk_pmd(zero_page, vma->vm_page_prot);
615 entry = pmd_mkhuge(entry);
616 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700617 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700618 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700619 } else {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700620 sector = bh.b_blocknr << (blkbits - 9);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700621 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
622 bh.b_size);
623 if (length < 0) {
624 result = VM_FAULT_SIGBUS;
625 goto out;
626 }
627 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
628 goto fallback;
629
Dan Williams152d7bd2015-11-12 18:33:54 -0800630 /*
631 * TODO: teach vmf_insert_pfn_pmd() to support
632 * 'pte_special' for pmds
633 */
634 if (pfn_valid(pfn))
635 goto fallback;
636
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700637 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
Dan Williams52db4002016-01-15 16:55:49 -0800638 clear_pmem(kaddr, PMD_SIZE);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700639 wmb_pmem();
640 count_vm_event(PGMAJFAULT);
641 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
642 result |= VM_FAULT_MAJOR;
643 }
644
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700645 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
646 }
647
648 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700649 i_mmap_unlock_read(mapping);
650
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700651 if (buffer_unwritten(&bh))
652 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
653
654 return result;
655
656 fallback:
657 count_vm_event(THP_FAULT_FALLBACK);
658 result = VM_FAULT_FALLBACK;
659 goto out;
660}
661EXPORT_SYMBOL_GPL(__dax_pmd_fault);
662
663/**
664 * dax_pmd_fault - handle a PMD fault on a DAX file
665 * @vma: The virtual memory area where the fault occurred
666 * @vmf: The description of the fault
667 * @get_block: The filesystem method used to translate file offsets to blocks
668 *
669 * When a page fault occurs, filesystems may call this helper in their
670 * pmd_fault handler for DAX files.
671 */
672int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
673 pmd_t *pmd, unsigned int flags, get_block_t get_block,
674 dax_iodone_t complete_unwritten)
675{
676 int result;
677 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
678
679 if (flags & FAULT_FLAG_WRITE) {
680 sb_start_pagefault(sb);
681 file_update_time(vma->vm_file);
682 }
683 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
684 complete_unwritten);
685 if (flags & FAULT_FLAG_WRITE)
686 sb_end_pagefault(sb);
687
688 return result;
689}
690EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -0700691#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700692
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800693/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700694 * dax_pfn_mkwrite - handle first write to DAX page
695 * @vma: The virtual memory area where the fault occurred
696 * @vmf: The description of the fault
697 *
698 */
699int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
700{
701 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
702
703 sb_start_pagefault(sb);
704 file_update_time(vma->vm_file);
705 sb_end_pagefault(sb);
706 return VM_FAULT_NOPAGE;
707}
708EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
709
710/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800711 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800712 * @inode: The file being truncated
713 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800714 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800715 * @get_block: The filesystem method used to translate file offsets to blocks
716 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800717 * This function can be called by a filesystem when it is zeroing part of a
718 * page in a DAX file. This is intended for hole-punch operations. If
719 * you are truncating a file, the helper function dax_truncate_page() may be
720 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800721 *
722 * We work in terms of PAGE_CACHE_SIZE here for commonality with
723 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
724 * took care of disposing of the unnecessary blocks. Even if the filesystem
725 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800726 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800727 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800728int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
729 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800730{
731 struct buffer_head bh;
732 pgoff_t index = from >> PAGE_CACHE_SHIFT;
733 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800734 int err;
735
736 /* Block boundary? Nothing to do */
737 if (!length)
738 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800739 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800740
741 memset(&bh, 0, sizeof(bh));
742 bh.b_size = PAGE_CACHE_SIZE;
743 err = get_block(inode, index, &bh, 0);
744 if (err < 0)
745 return err;
746 if (buffer_written(&bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600747 void __pmem *addr;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800748 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
749 if (err < 0)
750 return err;
Ross Zwislere2e05392015-08-18 13:55:41 -0600751 clear_pmem(addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600752 wmb_pmem();
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800753 }
754
755 return 0;
756}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800757EXPORT_SYMBOL_GPL(dax_zero_page_range);
758
759/**
760 * dax_truncate_page - handle a partial page being truncated in a DAX file
761 * @inode: The file being truncated
762 * @from: The file offset that is being truncated to
763 * @get_block: The filesystem method used to translate file offsets to blocks
764 *
765 * Similar to block_truncate_page(), this function can be called by a
766 * filesystem when it is truncating a DAX file to handle the partial page.
767 *
768 * We work in terms of PAGE_CACHE_SIZE here for commonality with
769 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
770 * took care of disposing of the unnecessary blocks. Even if the filesystem
771 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
772 * since the file might be mmapped.
773 */
774int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
775{
776 unsigned length = PAGE_CACHE_ALIGN(from) - from;
777 return dax_zero_page_range(inode, from, length, get_block);
778}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800779EXPORT_SYMBOL_GPL(dax_truncate_page);