blob: 7b653e9aa8d1cee30e2b157cc407f947675bda64 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060027#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080029#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080030#include <linux/vmstat.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080031
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080032int dax_clear_blocks(struct inode *inode, sector_t block, long size)
33{
34 struct block_device *bdev = inode->i_sb->s_bdev;
35 sector_t sector = block << (inode->i_blkbits - 9);
36
37 might_sleep();
38 do {
Ross Zwislere2e05392015-08-18 13:55:41 -060039 void __pmem *addr;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080040 unsigned long pfn;
41 long count;
42
43 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
44 if (count < 0)
45 return count;
46 BUG_ON(size < count);
47 while (count > 0) {
48 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
49 if (pgsz > count)
50 pgsz = count;
Ross Zwislere2e05392015-08-18 13:55:41 -060051 clear_pmem(addr, pgsz);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080052 addr += pgsz;
53 size -= pgsz;
54 count -= pgsz;
55 BUG_ON(pgsz & 511);
56 sector += pgsz / 512;
57 cond_resched();
58 }
59 } while (size);
60
Ross Zwisler2765cfb2015-08-18 13:55:40 -060061 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080062 return 0;
63}
64EXPORT_SYMBOL_GPL(dax_clear_blocks);
65
Ross Zwislere2e05392015-08-18 13:55:41 -060066static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
67 unsigned blkbits)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080068{
69 unsigned long pfn;
70 sector_t sector = bh->b_blocknr << (blkbits - 9);
71 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
72}
73
Ross Zwisler2765cfb2015-08-18 13:55:40 -060074/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -060075static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
76 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080077{
78 loff_t final = end - pos + first; /* The final byte of the buffer */
79
80 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -060081 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080082 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -060083 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080084}
85
86static bool buffer_written(struct buffer_head *bh)
87{
88 return buffer_mapped(bh) && !buffer_unwritten(bh);
89}
90
91/*
92 * When ext4 encounters a hole, it returns without modifying the buffer_head
93 * which means that we can't trust b_size. To cope with this, we set b_state
94 * to 0 before calling get_block and, if any bit is set, we know we can trust
95 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
96 * and would save us time calling get_block repeatedly.
97 */
98static bool buffer_size_valid(struct buffer_head *bh)
99{
100 return bh->b_state != 0;
101}
102
Omar Sandovala95cd632015-03-16 04:33:51 -0700103static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
104 loff_t start, loff_t end, get_block_t get_block,
105 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800106{
107 ssize_t retval = 0;
108 loff_t pos = start;
109 loff_t max = start;
110 loff_t bh_max = start;
Ross Zwislere2e05392015-08-18 13:55:41 -0600111 void __pmem *addr;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800112 bool hole = false;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600113 bool need_wmb = false;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800114
Omar Sandovala95cd632015-03-16 04:33:51 -0700115 if (iov_iter_rw(iter) != WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800116 end = min(end, i_size_read(inode));
117
118 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600119 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800120 if (pos == max) {
121 unsigned blkbits = inode->i_blkbits;
Jeff Moyere94f5a22015-08-14 16:15:31 -0400122 long page = pos >> PAGE_SHIFT;
123 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800124 unsigned first = pos - (block << blkbits);
125 long size;
126
127 if (pos == bh_max) {
128 bh->b_size = PAGE_ALIGN(end - pos);
129 bh->b_state = 0;
130 retval = get_block(inode, block, bh,
Omar Sandovala95cd632015-03-16 04:33:51 -0700131 iov_iter_rw(iter) == WRITE);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800132 if (retval)
133 break;
134 if (!buffer_size_valid(bh))
135 bh->b_size = 1 << blkbits;
136 bh_max = pos - first + bh->b_size;
137 } else {
138 unsigned done = bh->b_size -
139 (bh_max - (pos - first));
140 bh->b_blocknr += done >> blkbits;
141 bh->b_size -= done;
142 }
143
Omar Sandovala95cd632015-03-16 04:33:51 -0700144 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800145 if (hole) {
146 addr = NULL;
147 size = bh->b_size - first;
148 } else {
149 retval = dax_get_addr(bh, &addr, blkbits);
150 if (retval < 0)
151 break;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600152 if (buffer_unwritten(bh) || buffer_new(bh)) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800153 dax_new_buf(addr, retval, first, pos,
154 end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600155 need_wmb = true;
156 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800157 addr += first;
158 size = retval - first;
159 }
160 max = min(pos + size, end);
161 }
162
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600163 if (iov_iter_rw(iter) == WRITE) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600164 len = copy_from_iter_pmem(addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600165 need_wmb = true;
166 } else if (!hole)
Ross Zwislere2e05392015-08-18 13:55:41 -0600167 len = copy_to_iter((void __force *)addr, max - pos,
168 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800169 else
170 len = iov_iter_zero(max - pos, iter);
171
Al Virocadfbb62015-11-10 19:42:49 -0700172 if (!len) {
173 retval = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800174 break;
Al Virocadfbb62015-11-10 19:42:49 -0700175 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800176
177 pos += len;
178 addr += len;
179 }
180
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600181 if (need_wmb)
182 wmb_pmem();
183
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800184 return (pos == start) ? retval : pos - start;
185}
186
187/**
188 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800189 * @iocb: The control block for this I/O
190 * @inode: The file which the I/O is directed at
191 * @iter: The addresses to do I/O from or to
192 * @pos: The file offset where the I/O starts
193 * @get_block: The filesystem method used to translate file offsets to blocks
194 * @end_io: A filesystem callback for I/O completion
195 * @flags: See below
196 *
197 * This function uses the same locking scheme as do_blockdev_direct_IO:
198 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
199 * caller for writes. For reads, we take and release the i_mutex ourselves.
200 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
201 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
202 * is in progress.
203 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700204ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
205 struct iov_iter *iter, loff_t pos, get_block_t get_block,
206 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800207{
208 struct buffer_head bh;
209 ssize_t retval = -EINVAL;
210 loff_t end = pos + iov_iter_count(iter);
211
212 memset(&bh, 0, sizeof(bh));
213
Omar Sandovala95cd632015-03-16 04:33:51 -0700214 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800215 struct address_space *mapping = inode->i_mapping;
216 mutex_lock(&inode->i_mutex);
217 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
218 if (retval) {
219 mutex_unlock(&inode->i_mutex);
220 goto out;
221 }
222 }
223
224 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400225 if (!(flags & DIO_SKIP_DIO_COUNT))
226 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800227
Omar Sandovala95cd632015-03-16 04:33:51 -0700228 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800229
Omar Sandovala95cd632015-03-16 04:33:51 -0700230 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800231 mutex_unlock(&inode->i_mutex);
232
233 if ((retval > 0) && end_io)
234 end_io(iocb, pos, retval, bh.b_private);
235
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400236 if (!(flags & DIO_SKIP_DIO_COUNT))
237 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800238 out:
239 return retval;
240}
241EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800242
243/*
244 * The user has performed a load from a hole in the file. Allocating
245 * a new page in the file would cause excessive storage usage for
246 * workloads with sparse files. We allocate a page cache page instead.
247 * We'll kick it out of the page cache if it's ever written to,
248 * otherwise it will simply fall out of the page cache under memory
249 * pressure without ever having been dirtied.
250 */
251static int dax_load_hole(struct address_space *mapping, struct page *page,
252 struct vm_fault *vmf)
253{
254 unsigned long size;
255 struct inode *inode = mapping->host;
256 if (!page)
257 page = find_or_create_page(mapping, vmf->pgoff,
258 GFP_KERNEL | __GFP_ZERO);
259 if (!page)
260 return VM_FAULT_OOM;
261 /* Recheck i_size under page lock to avoid truncate race */
262 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
263 if (vmf->pgoff >= size) {
264 unlock_page(page);
265 page_cache_release(page);
266 return VM_FAULT_SIGBUS;
267 }
268
269 vmf->page = page;
270 return VM_FAULT_LOCKED;
271}
272
273static int copy_user_bh(struct page *to, struct buffer_head *bh,
274 unsigned blkbits, unsigned long vaddr)
275{
Ross Zwislere2e05392015-08-18 13:55:41 -0600276 void __pmem *vfrom;
277 void *vto;
278
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800279 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
280 return -EIO;
281 vto = kmap_atomic(to);
Ross Zwislere2e05392015-08-18 13:55:41 -0600282 copy_user_page(vto, (void __force *)vfrom, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800283 kunmap_atomic(vto);
284 return 0;
285}
286
287static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
288 struct vm_area_struct *vma, struct vm_fault *vmf)
289{
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700290 struct address_space *mapping = inode->i_mapping;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800291 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
292 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Ross Zwislere2e05392015-08-18 13:55:41 -0600293 void __pmem *addr;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800294 unsigned long pfn;
295 pgoff_t size;
296 int error;
297
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700298 i_mmap_lock_read(mapping);
299
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800300 /*
301 * Check truncate didn't happen while we were allocating a block.
302 * If it did, this block may or may not be still allocated to the
303 * file. We can't tell the filesystem to free it because we can't
304 * take i_mutex here. In the worst case, the file still has blocks
305 * allocated past the end of the file.
306 */
307 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
308 if (unlikely(vmf->pgoff >= size)) {
309 error = -EIO;
310 goto out;
311 }
312
313 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
314 if (error < 0)
315 goto out;
316 if (error < PAGE_SIZE) {
317 error = -EIO;
318 goto out;
319 }
320
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600321 if (buffer_unwritten(bh) || buffer_new(bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600322 clear_pmem(addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600323 wmb_pmem();
324 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800325
326 error = vm_insert_mixed(vma, vaddr, pfn);
327
328 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700329 i_mmap_unlock_read(mapping);
330
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800331 return error;
332}
333
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000334/**
335 * __dax_fault - handle a page fault on a DAX file
336 * @vma: The virtual memory area where the fault occurred
337 * @vmf: The description of the fault
338 * @get_block: The filesystem method used to translate file offsets to blocks
Dave Chinnerb2442c52015-07-29 11:48:00 +1000339 * @complete_unwritten: The filesystem method used to convert unwritten blocks
340 * to written so the data written to them is exposed. This is required for
341 * required by write faults for filesystems that will return unwritten
342 * extent mappings from @get_block, but it is optional for reads as
343 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
344 * not support unwritten extents, the it should pass NULL.
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000345 *
346 * When a page fault occurs, filesystems may call this helper in their
347 * fault handler for DAX files. __dax_fault() assumes the caller has done all
348 * the necessary locking for the page fault to proceed successfully.
349 */
350int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000351 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800352{
353 struct file *file = vma->vm_file;
354 struct address_space *mapping = file->f_mapping;
355 struct inode *inode = mapping->host;
356 struct page *page;
357 struct buffer_head bh;
358 unsigned long vaddr = (unsigned long)vmf->virtual_address;
359 unsigned blkbits = inode->i_blkbits;
360 sector_t block;
361 pgoff_t size;
362 int error;
363 int major = 0;
364
365 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
366 if (vmf->pgoff >= size)
367 return VM_FAULT_SIGBUS;
368
369 memset(&bh, 0, sizeof(bh));
370 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
371 bh.b_size = PAGE_SIZE;
372
373 repeat:
374 page = find_get_page(mapping, vmf->pgoff);
375 if (page) {
376 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
377 page_cache_release(page);
378 return VM_FAULT_RETRY;
379 }
380 if (unlikely(page->mapping != mapping)) {
381 unlock_page(page);
382 page_cache_release(page);
383 goto repeat;
384 }
385 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
386 if (unlikely(vmf->pgoff >= size)) {
387 /*
388 * We have a struct page covering a hole in the file
389 * from a read fault and we've raced with a truncate
390 */
391 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700392 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800393 }
394 }
395
396 error = get_block(inode, block, &bh, 0);
397 if (!error && (bh.b_size < PAGE_SIZE))
398 error = -EIO; /* fs corruption? */
399 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700400 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800401
402 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
403 if (vmf->flags & FAULT_FLAG_WRITE) {
404 error = get_block(inode, block, &bh, 1);
405 count_vm_event(PGMAJFAULT);
406 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
407 major = VM_FAULT_MAJOR;
408 if (!error && (bh.b_size < PAGE_SIZE))
409 error = -EIO;
410 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700411 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800412 } else {
413 return dax_load_hole(mapping, page, vmf);
414 }
415 }
416
417 if (vmf->cow_page) {
418 struct page *new_page = vmf->cow_page;
419 if (buffer_written(&bh))
420 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
421 else
422 clear_user_highpage(new_page, vaddr);
423 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700424 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800425 vmf->page = page;
426 if (!page) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700427 i_mmap_lock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800428 /* Check we didn't race with truncate */
429 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
430 PAGE_SHIFT;
431 if (vmf->pgoff >= size) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700432 i_mmap_unlock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800433 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700434 goto out;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800435 }
436 }
437 return VM_FAULT_LOCKED;
438 }
439
440 /* Check we didn't race with a read fault installing a new page */
441 if (!page && major)
442 page = find_lock_page(mapping, vmf->pgoff);
443
444 if (page) {
445 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
446 PAGE_CACHE_SIZE, 0);
447 delete_from_page_cache(page);
448 unlock_page(page);
449 page_cache_release(page);
450 }
451
Dave Chinnere842f292015-06-04 09:18:18 +1000452 /*
453 * If we successfully insert the new mapping over an unwritten extent,
454 * we need to ensure we convert the unwritten extent. If there is an
455 * error inserting the mapping, the filesystem needs to leave it as
456 * unwritten to prevent exposure of the stale underlying data to
457 * userspace, but we still need to call the completion function so
458 * the private resources on the mapping buffer can be released. We
459 * indicate what the callback should do via the uptodate variable, same
460 * as for normal BH based IO completions.
461 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800462 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnerb2442c52015-07-29 11:48:00 +1000463 if (buffer_unwritten(&bh)) {
464 if (complete_unwritten)
465 complete_unwritten(&bh, !error);
466 else
467 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
468 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800469
470 out:
471 if (error == -ENOMEM)
472 return VM_FAULT_OOM | major;
473 /* -EBUSY is fine, somebody else faulted on the same PTE */
474 if ((error < 0) && (error != -EBUSY))
475 return VM_FAULT_SIGBUS | major;
476 return VM_FAULT_NOPAGE | major;
477
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700478 unlock_page:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800479 if (page) {
480 unlock_page(page);
481 page_cache_release(page);
482 }
483 goto out;
484}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000485EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800486
487/**
488 * dax_fault - handle a page fault on a DAX file
489 * @vma: The virtual memory area where the fault occurred
490 * @vmf: The description of the fault
491 * @get_block: The filesystem method used to translate file offsets to blocks
492 *
493 * When a page fault occurs, filesystems may call this helper in their
494 * fault handler for DAX files.
495 */
496int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000497 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800498{
499 int result;
500 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
501
502 if (vmf->flags & FAULT_FLAG_WRITE) {
503 sb_start_pagefault(sb);
504 file_update_time(vma->vm_file);
505 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000506 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800507 if (vmf->flags & FAULT_FLAG_WRITE)
508 sb_end_pagefault(sb);
509
510 return result;
511}
512EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800513
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
515/*
516 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
517 * more often than one might expect in the below function.
518 */
519#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
520
521int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
522 pmd_t *pmd, unsigned int flags, get_block_t get_block,
523 dax_iodone_t complete_unwritten)
524{
525 struct file *file = vma->vm_file;
526 struct address_space *mapping = file->f_mapping;
527 struct inode *inode = mapping->host;
528 struct buffer_head bh;
529 unsigned blkbits = inode->i_blkbits;
530 unsigned long pmd_addr = address & PMD_MASK;
531 bool write = flags & FAULT_FLAG_WRITE;
532 long length;
Ross Zwislerd77e92e2015-09-09 10:29:40 -0600533 void __pmem *kaddr;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700534 pgoff_t size, pgoff;
535 sector_t block, sector;
536 unsigned long pfn;
537 int result = 0;
538
539 /* Fall back to PTEs if we're going to COW */
540 if (write && !(vma->vm_flags & VM_SHARED))
541 return VM_FAULT_FALLBACK;
542 /* If the PMD would extend outside the VMA */
543 if (pmd_addr < vma->vm_start)
544 return VM_FAULT_FALLBACK;
545 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
546 return VM_FAULT_FALLBACK;
547
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700548 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700549 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
550 if (pgoff >= size)
551 return VM_FAULT_SIGBUS;
552 /* If the PMD would cover blocks out of the file */
553 if ((pgoff | PG_PMD_COLOUR) >= size)
554 return VM_FAULT_FALLBACK;
555
556 memset(&bh, 0, sizeof(bh));
557 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
558
559 bh.b_size = PMD_SIZE;
560 length = get_block(inode, block, &bh, write);
561 if (length)
562 return VM_FAULT_SIGBUS;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700563 i_mmap_lock_read(mapping);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700564
565 /*
566 * If the filesystem isn't willing to tell us the length of a hole,
567 * just fall back to PTEs. Calling get_block 512 times in a loop
568 * would be silly.
569 */
570 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
571 goto fallback;
572
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700573 /*
574 * If we allocated new storage, make sure no process has any
575 * zero pages covering this hole
576 */
577 if (buffer_new(&bh)) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700578 i_mmap_unlock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700579 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700580 i_mmap_lock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700581 }
582
Matthew Wilcox84c4e5e2015-09-08 14:59:17 -0700583 /*
584 * If a truncate happened while we were allocating blocks, we may
585 * leave blocks allocated to the file that are beyond EOF. We can't
586 * take i_mutex here, so just leave them hanging; they'll be freed
587 * when the file is deleted.
588 */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700589 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
590 if (pgoff >= size) {
591 result = VM_FAULT_SIGBUS;
592 goto out;
593 }
594 if ((pgoff | PG_PMD_COLOUR) >= size)
595 goto fallback;
596
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700597 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700598 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700599 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700600 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700601
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700602 if (unlikely(!zero_page))
603 goto fallback;
604
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700605 ptl = pmd_lock(vma->vm_mm, pmd);
606 if (!pmd_none(*pmd)) {
607 spin_unlock(ptl);
608 goto fallback;
609 }
610
611 entry = mk_pmd(zero_page, vma->vm_page_prot);
612 entry = pmd_mkhuge(entry);
613 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700614 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700615 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700616 } else {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700617 sector = bh.b_blocknr << (blkbits - 9);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700618 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
619 bh.b_size);
620 if (length < 0) {
621 result = VM_FAULT_SIGBUS;
622 goto out;
623 }
624 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
625 goto fallback;
626
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700627 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
628 int i;
629 for (i = 0; i < PTRS_PER_PMD; i++)
630 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
631 wmb_pmem();
632 count_vm_event(PGMAJFAULT);
633 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
634 result |= VM_FAULT_MAJOR;
635 }
636
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700637 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
638 }
639
640 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700641 i_mmap_unlock_read(mapping);
642
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700643 if (buffer_unwritten(&bh))
644 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
645
646 return result;
647
648 fallback:
649 count_vm_event(THP_FAULT_FALLBACK);
650 result = VM_FAULT_FALLBACK;
651 goto out;
652}
653EXPORT_SYMBOL_GPL(__dax_pmd_fault);
654
655/**
656 * dax_pmd_fault - handle a PMD fault on a DAX file
657 * @vma: The virtual memory area where the fault occurred
658 * @vmf: The description of the fault
659 * @get_block: The filesystem method used to translate file offsets to blocks
660 *
661 * When a page fault occurs, filesystems may call this helper in their
662 * pmd_fault handler for DAX files.
663 */
664int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
665 pmd_t *pmd, unsigned int flags, get_block_t get_block,
666 dax_iodone_t complete_unwritten)
667{
668 int result;
669 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
670
671 if (flags & FAULT_FLAG_WRITE) {
672 sb_start_pagefault(sb);
673 file_update_time(vma->vm_file);
674 }
675 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
676 complete_unwritten);
677 if (flags & FAULT_FLAG_WRITE)
678 sb_end_pagefault(sb);
679
680 return result;
681}
682EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -0700683#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700684
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800685/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700686 * dax_pfn_mkwrite - handle first write to DAX page
687 * @vma: The virtual memory area where the fault occurred
688 * @vmf: The description of the fault
689 *
690 */
691int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
692{
693 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
694
695 sb_start_pagefault(sb);
696 file_update_time(vma->vm_file);
697 sb_end_pagefault(sb);
698 return VM_FAULT_NOPAGE;
699}
700EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
701
702/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800703 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800704 * @inode: The file being truncated
705 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800706 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800707 * @get_block: The filesystem method used to translate file offsets to blocks
708 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800709 * This function can be called by a filesystem when it is zeroing part of a
710 * page in a DAX file. This is intended for hole-punch operations. If
711 * you are truncating a file, the helper function dax_truncate_page() may be
712 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800713 *
714 * We work in terms of PAGE_CACHE_SIZE here for commonality with
715 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
716 * took care of disposing of the unnecessary blocks. Even if the filesystem
717 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800718 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800719 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800720int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
721 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800722{
723 struct buffer_head bh;
724 pgoff_t index = from >> PAGE_CACHE_SHIFT;
725 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800726 int err;
727
728 /* Block boundary? Nothing to do */
729 if (!length)
730 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800731 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800732
733 memset(&bh, 0, sizeof(bh));
734 bh.b_size = PAGE_CACHE_SIZE;
735 err = get_block(inode, index, &bh, 0);
736 if (err < 0)
737 return err;
738 if (buffer_written(&bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600739 void __pmem *addr;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800740 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
741 if (err < 0)
742 return err;
Ross Zwislere2e05392015-08-18 13:55:41 -0600743 clear_pmem(addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600744 wmb_pmem();
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800745 }
746
747 return 0;
748}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800749EXPORT_SYMBOL_GPL(dax_zero_page_range);
750
751/**
752 * dax_truncate_page - handle a partial page being truncated in a DAX file
753 * @inode: The file being truncated
754 * @from: The file offset that is being truncated to
755 * @get_block: The filesystem method used to translate file offsets to blocks
756 *
757 * Similar to block_truncate_page(), this function can be called by a
758 * filesystem when it is truncating a DAX file to handle the partial page.
759 *
760 * We work in terms of PAGE_CACHE_SIZE here for commonality with
761 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
762 * took care of disposing of the unnecessary blocks. Even if the filesystem
763 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
764 * since the file might be mmapped.
765 */
766int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
767{
768 unsigned length = PAGE_CACHE_ALIGN(from) - from;
769 return dax_zero_page_range(inode, from, length, get_block);
770}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800771EXPORT_SYMBOL_GPL(dax_truncate_page);