blob: 74033ad1bc9291e540fe17d4a367818b9b557aa0 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060027#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080029#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080030#include <linux/vmstat.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080031
Dave Chinner1ca19152015-11-03 12:37:00 +110032/*
33 * dax_clear_blocks() is called from within transaction context from XFS,
34 * and hence this means the stack from this point must follow GFP_NOFS
35 * semantics for all operations.
36 */
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080037int dax_clear_blocks(struct inode *inode, sector_t block, long size)
38{
39 struct block_device *bdev = inode->i_sb->s_bdev;
40 sector_t sector = block << (inode->i_blkbits - 9);
41
42 might_sleep();
43 do {
Ross Zwislere2e05392015-08-18 13:55:41 -060044 void __pmem *addr;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080045 unsigned long pfn;
46 long count;
47
48 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
49 if (count < 0)
50 return count;
51 BUG_ON(size < count);
52 while (count > 0) {
53 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
54 if (pgsz > count)
55 pgsz = count;
Ross Zwislere2e05392015-08-18 13:55:41 -060056 clear_pmem(addr, pgsz);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080057 addr += pgsz;
58 size -= pgsz;
59 count -= pgsz;
60 BUG_ON(pgsz & 511);
61 sector += pgsz / 512;
62 cond_resched();
63 }
64 } while (size);
65
Ross Zwisler2765cfb2015-08-18 13:55:40 -060066 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080067 return 0;
68}
69EXPORT_SYMBOL_GPL(dax_clear_blocks);
70
Ross Zwislere2e05392015-08-18 13:55:41 -060071static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
72 unsigned blkbits)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080073{
74 unsigned long pfn;
75 sector_t sector = bh->b_blocknr << (blkbits - 9);
76 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
77}
78
Ross Zwisler2765cfb2015-08-18 13:55:40 -060079/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -060080static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
81 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080082{
83 loff_t final = end - pos + first; /* The final byte of the buffer */
84
85 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -060086 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080087 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -060088 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080089}
90
91static bool buffer_written(struct buffer_head *bh)
92{
93 return buffer_mapped(bh) && !buffer_unwritten(bh);
94}
95
96/*
97 * When ext4 encounters a hole, it returns without modifying the buffer_head
98 * which means that we can't trust b_size. To cope with this, we set b_state
99 * to 0 before calling get_block and, if any bit is set, we know we can trust
100 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
101 * and would save us time calling get_block repeatedly.
102 */
103static bool buffer_size_valid(struct buffer_head *bh)
104{
105 return bh->b_state != 0;
106}
107
Omar Sandovala95cd632015-03-16 04:33:51 -0700108static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
109 loff_t start, loff_t end, get_block_t get_block,
110 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800111{
112 ssize_t retval = 0;
113 loff_t pos = start;
114 loff_t max = start;
115 loff_t bh_max = start;
Ross Zwislere2e05392015-08-18 13:55:41 -0600116 void __pmem *addr;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800117 bool hole = false;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600118 bool need_wmb = false;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800119
Omar Sandovala95cd632015-03-16 04:33:51 -0700120 if (iov_iter_rw(iter) != WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800121 end = min(end, i_size_read(inode));
122
123 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600124 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800125 if (pos == max) {
126 unsigned blkbits = inode->i_blkbits;
Jeff Moyere94f5a22015-08-14 16:15:31 -0400127 long page = pos >> PAGE_SHIFT;
128 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800129 unsigned first = pos - (block << blkbits);
130 long size;
131
132 if (pos == bh_max) {
133 bh->b_size = PAGE_ALIGN(end - pos);
134 bh->b_state = 0;
135 retval = get_block(inode, block, bh,
Omar Sandovala95cd632015-03-16 04:33:51 -0700136 iov_iter_rw(iter) == WRITE);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800137 if (retval)
138 break;
139 if (!buffer_size_valid(bh))
140 bh->b_size = 1 << blkbits;
141 bh_max = pos - first + bh->b_size;
142 } else {
143 unsigned done = bh->b_size -
144 (bh_max - (pos - first));
145 bh->b_blocknr += done >> blkbits;
146 bh->b_size -= done;
147 }
148
Omar Sandovala95cd632015-03-16 04:33:51 -0700149 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800150 if (hole) {
151 addr = NULL;
152 size = bh->b_size - first;
153 } else {
154 retval = dax_get_addr(bh, &addr, blkbits);
155 if (retval < 0)
156 break;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600157 if (buffer_unwritten(bh) || buffer_new(bh)) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800158 dax_new_buf(addr, retval, first, pos,
159 end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600160 need_wmb = true;
161 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800162 addr += first;
163 size = retval - first;
164 }
165 max = min(pos + size, end);
166 }
167
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600168 if (iov_iter_rw(iter) == WRITE) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600169 len = copy_from_iter_pmem(addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600170 need_wmb = true;
171 } else if (!hole)
Ross Zwislere2e05392015-08-18 13:55:41 -0600172 len = copy_to_iter((void __force *)addr, max - pos,
173 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800174 else
175 len = iov_iter_zero(max - pos, iter);
176
177 if (!len)
178 break;
179
180 pos += len;
181 addr += len;
182 }
183
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600184 if (need_wmb)
185 wmb_pmem();
186
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800187 return (pos == start) ? retval : pos - start;
188}
189
190/**
191 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800192 * @iocb: The control block for this I/O
193 * @inode: The file which the I/O is directed at
194 * @iter: The addresses to do I/O from or to
195 * @pos: The file offset where the I/O starts
196 * @get_block: The filesystem method used to translate file offsets to blocks
197 * @end_io: A filesystem callback for I/O completion
198 * @flags: See below
199 *
200 * This function uses the same locking scheme as do_blockdev_direct_IO:
201 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
202 * caller for writes. For reads, we take and release the i_mutex ourselves.
203 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
204 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
205 * is in progress.
206 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700207ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
208 struct iov_iter *iter, loff_t pos, get_block_t get_block,
209 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800210{
211 struct buffer_head bh;
212 ssize_t retval = -EINVAL;
213 loff_t end = pos + iov_iter_count(iter);
214
215 memset(&bh, 0, sizeof(bh));
216
Omar Sandovala95cd632015-03-16 04:33:51 -0700217 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800218 struct address_space *mapping = inode->i_mapping;
219 mutex_lock(&inode->i_mutex);
220 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
221 if (retval) {
222 mutex_unlock(&inode->i_mutex);
223 goto out;
224 }
225 }
226
227 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400228 if (!(flags & DIO_SKIP_DIO_COUNT))
229 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800230
Omar Sandovala95cd632015-03-16 04:33:51 -0700231 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800232
Omar Sandovala95cd632015-03-16 04:33:51 -0700233 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800234 mutex_unlock(&inode->i_mutex);
235
236 if ((retval > 0) && end_io)
237 end_io(iocb, pos, retval, bh.b_private);
238
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400239 if (!(flags & DIO_SKIP_DIO_COUNT))
240 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800241 out:
242 return retval;
243}
244EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800245
246/*
247 * The user has performed a load from a hole in the file. Allocating
248 * a new page in the file would cause excessive storage usage for
249 * workloads with sparse files. We allocate a page cache page instead.
250 * We'll kick it out of the page cache if it's ever written to,
251 * otherwise it will simply fall out of the page cache under memory
252 * pressure without ever having been dirtied.
253 */
254static int dax_load_hole(struct address_space *mapping, struct page *page,
255 struct vm_fault *vmf)
256{
257 unsigned long size;
258 struct inode *inode = mapping->host;
259 if (!page)
260 page = find_or_create_page(mapping, vmf->pgoff,
261 GFP_KERNEL | __GFP_ZERO);
262 if (!page)
263 return VM_FAULT_OOM;
264 /* Recheck i_size under page lock to avoid truncate race */
265 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
266 if (vmf->pgoff >= size) {
267 unlock_page(page);
268 page_cache_release(page);
269 return VM_FAULT_SIGBUS;
270 }
271
272 vmf->page = page;
273 return VM_FAULT_LOCKED;
274}
275
276static int copy_user_bh(struct page *to, struct buffer_head *bh,
277 unsigned blkbits, unsigned long vaddr)
278{
Ross Zwislere2e05392015-08-18 13:55:41 -0600279 void __pmem *vfrom;
280 void *vto;
281
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800282 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
283 return -EIO;
284 vto = kmap_atomic(to);
Ross Zwislere2e05392015-08-18 13:55:41 -0600285 copy_user_page(vto, (void __force *)vfrom, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800286 kunmap_atomic(vto);
287 return 0;
288}
289
290static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
291 struct vm_area_struct *vma, struct vm_fault *vmf)
292{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800293 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
294 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Ross Zwislere2e05392015-08-18 13:55:41 -0600295 void __pmem *addr;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800296 unsigned long pfn;
297 pgoff_t size;
298 int error;
299
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800300 /*
301 * Check truncate didn't happen while we were allocating a block.
302 * If it did, this block may or may not be still allocated to the
303 * file. We can't tell the filesystem to free it because we can't
304 * take i_mutex here. In the worst case, the file still has blocks
305 * allocated past the end of the file.
306 */
307 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
308 if (unlikely(vmf->pgoff >= size)) {
309 error = -EIO;
310 goto out;
311 }
312
313 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
314 if (error < 0)
315 goto out;
316 if (error < PAGE_SIZE) {
317 error = -EIO;
318 goto out;
319 }
320
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600321 if (buffer_unwritten(bh) || buffer_new(bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600322 clear_pmem(addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600323 wmb_pmem();
324 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800325
326 error = vm_insert_mixed(vma, vaddr, pfn);
327
328 out:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800329 return error;
330}
331
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000332/**
333 * __dax_fault - handle a page fault on a DAX file
334 * @vma: The virtual memory area where the fault occurred
335 * @vmf: The description of the fault
336 * @get_block: The filesystem method used to translate file offsets to blocks
Dave Chinnerb2442c52015-07-29 11:48:00 +1000337 * @complete_unwritten: The filesystem method used to convert unwritten blocks
338 * to written so the data written to them is exposed. This is required for
339 * required by write faults for filesystems that will return unwritten
340 * extent mappings from @get_block, but it is optional for reads as
341 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
342 * not support unwritten extents, the it should pass NULL.
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000343 *
344 * When a page fault occurs, filesystems may call this helper in their
345 * fault handler for DAX files. __dax_fault() assumes the caller has done all
346 * the necessary locking for the page fault to proceed successfully.
347 */
348int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000349 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800350{
351 struct file *file = vma->vm_file;
352 struct address_space *mapping = file->f_mapping;
353 struct inode *inode = mapping->host;
354 struct page *page;
355 struct buffer_head bh;
356 unsigned long vaddr = (unsigned long)vmf->virtual_address;
357 unsigned blkbits = inode->i_blkbits;
358 sector_t block;
359 pgoff_t size;
360 int error;
361 int major = 0;
362
363 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
364 if (vmf->pgoff >= size)
365 return VM_FAULT_SIGBUS;
366
367 memset(&bh, 0, sizeof(bh));
368 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
369 bh.b_size = PAGE_SIZE;
370
371 repeat:
372 page = find_get_page(mapping, vmf->pgoff);
373 if (page) {
374 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
375 page_cache_release(page);
376 return VM_FAULT_RETRY;
377 }
378 if (unlikely(page->mapping != mapping)) {
379 unlock_page(page);
380 page_cache_release(page);
381 goto repeat;
382 }
383 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
384 if (unlikely(vmf->pgoff >= size)) {
385 /*
386 * We have a struct page covering a hole in the file
387 * from a read fault and we've raced with a truncate
388 */
389 error = -EIO;
Matthew Wilcox84317292015-09-08 14:59:25 -0700390 goto unlock;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800391 }
Matthew Wilcox84317292015-09-08 14:59:25 -0700392 } else {
393 i_mmap_lock_write(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800394 }
395
396 error = get_block(inode, block, &bh, 0);
397 if (!error && (bh.b_size < PAGE_SIZE))
398 error = -EIO; /* fs corruption? */
399 if (error)
Matthew Wilcox84317292015-09-08 14:59:25 -0700400 goto unlock;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800401
402 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
403 if (vmf->flags & FAULT_FLAG_WRITE) {
404 error = get_block(inode, block, &bh, 1);
405 count_vm_event(PGMAJFAULT);
406 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
407 major = VM_FAULT_MAJOR;
408 if (!error && (bh.b_size < PAGE_SIZE))
409 error = -EIO;
410 if (error)
Matthew Wilcox84317292015-09-08 14:59:25 -0700411 goto unlock;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800412 } else {
Matthew Wilcox84317292015-09-08 14:59:25 -0700413 i_mmap_unlock_write(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800414 return dax_load_hole(mapping, page, vmf);
415 }
416 }
417
418 if (vmf->cow_page) {
419 struct page *new_page = vmf->cow_page;
420 if (buffer_written(&bh))
421 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
422 else
423 clear_user_highpage(new_page, vaddr);
424 if (error)
Matthew Wilcox84317292015-09-08 14:59:25 -0700425 goto unlock;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800426 vmf->page = page;
427 if (!page) {
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800428 /* Check we didn't race with truncate */
429 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
430 PAGE_SHIFT;
431 if (vmf->pgoff >= size) {
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800432 error = -EIO;
Matthew Wilcox84317292015-09-08 14:59:25 -0700433 goto unlock;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800434 }
435 }
436 return VM_FAULT_LOCKED;
437 }
438
439 /* Check we didn't race with a read fault installing a new page */
440 if (!page && major)
441 page = find_lock_page(mapping, vmf->pgoff);
442
443 if (page) {
444 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
445 PAGE_CACHE_SIZE, 0);
446 delete_from_page_cache(page);
447 unlock_page(page);
448 page_cache_release(page);
449 }
450
Dave Chinnere842f292015-06-04 09:18:18 +1000451 /*
452 * If we successfully insert the new mapping over an unwritten extent,
453 * we need to ensure we convert the unwritten extent. If there is an
454 * error inserting the mapping, the filesystem needs to leave it as
455 * unwritten to prevent exposure of the stale underlying data to
456 * userspace, but we still need to call the completion function so
457 * the private resources on the mapping buffer can be released. We
458 * indicate what the callback should do via the uptodate variable, same
459 * as for normal BH based IO completions.
460 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800461 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnerb2442c52015-07-29 11:48:00 +1000462 if (buffer_unwritten(&bh)) {
463 if (complete_unwritten)
464 complete_unwritten(&bh, !error);
465 else
466 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
467 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800468
Matthew Wilcox84317292015-09-08 14:59:25 -0700469 if (!page)
470 i_mmap_unlock_write(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800471 out:
472 if (error == -ENOMEM)
473 return VM_FAULT_OOM | major;
474 /* -EBUSY is fine, somebody else faulted on the same PTE */
475 if ((error < 0) && (error != -EBUSY))
476 return VM_FAULT_SIGBUS | major;
477 return VM_FAULT_NOPAGE | major;
478
Matthew Wilcox84317292015-09-08 14:59:25 -0700479 unlock:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800480 if (page) {
481 unlock_page(page);
482 page_cache_release(page);
Matthew Wilcox84317292015-09-08 14:59:25 -0700483 } else {
484 i_mmap_unlock_write(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800485 }
Matthew Wilcox84317292015-09-08 14:59:25 -0700486
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800487 goto out;
488}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000489EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800490
491/**
492 * dax_fault - handle a page fault on a DAX file
493 * @vma: The virtual memory area where the fault occurred
494 * @vmf: The description of the fault
495 * @get_block: The filesystem method used to translate file offsets to blocks
496 *
497 * When a page fault occurs, filesystems may call this helper in their
498 * fault handler for DAX files.
499 */
500int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000501 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800502{
503 int result;
504 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
505
506 if (vmf->flags & FAULT_FLAG_WRITE) {
507 sb_start_pagefault(sb);
508 file_update_time(vma->vm_file);
509 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000510 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800511 if (vmf->flags & FAULT_FLAG_WRITE)
512 sb_end_pagefault(sb);
513
514 return result;
515}
516EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800517
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700518#ifdef CONFIG_TRANSPARENT_HUGEPAGE
519/*
520 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
521 * more often than one might expect in the below function.
522 */
523#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
524
525int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
526 pmd_t *pmd, unsigned int flags, get_block_t get_block,
527 dax_iodone_t complete_unwritten)
528{
529 struct file *file = vma->vm_file;
530 struct address_space *mapping = file->f_mapping;
531 struct inode *inode = mapping->host;
532 struct buffer_head bh;
533 unsigned blkbits = inode->i_blkbits;
534 unsigned long pmd_addr = address & PMD_MASK;
535 bool write = flags & FAULT_FLAG_WRITE;
536 long length;
Ross Zwislerd77e92e2015-09-09 10:29:40 -0600537 void __pmem *kaddr;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700538 pgoff_t size, pgoff;
539 sector_t block, sector;
540 unsigned long pfn;
541 int result = 0;
542
543 /* Fall back to PTEs if we're going to COW */
544 if (write && !(vma->vm_flags & VM_SHARED))
545 return VM_FAULT_FALLBACK;
546 /* If the PMD would extend outside the VMA */
547 if (pmd_addr < vma->vm_start)
548 return VM_FAULT_FALLBACK;
549 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
550 return VM_FAULT_FALLBACK;
551
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700552 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700553 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
554 if (pgoff >= size)
555 return VM_FAULT_SIGBUS;
556 /* If the PMD would cover blocks out of the file */
557 if ((pgoff | PG_PMD_COLOUR) >= size)
558 return VM_FAULT_FALLBACK;
559
560 memset(&bh, 0, sizeof(bh));
561 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
562
563 bh.b_size = PMD_SIZE;
Matthew Wilcox84317292015-09-08 14:59:25 -0700564 i_mmap_lock_write(mapping);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700565 length = get_block(inode, block, &bh, write);
566 if (length)
567 return VM_FAULT_SIGBUS;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700568
569 /*
570 * If the filesystem isn't willing to tell us the length of a hole,
571 * just fall back to PTEs. Calling get_block 512 times in a loop
572 * would be silly.
573 */
574 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
575 goto fallback;
576
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700577 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
578 int i;
579 for (i = 0; i < PTRS_PER_PMD; i++)
Ross Zwislerd77e92e2015-09-09 10:29:40 -0600580 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
581 wmb_pmem();
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700582 count_vm_event(PGMAJFAULT);
583 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
584 result |= VM_FAULT_MAJOR;
585 }
586
587 /*
588 * If we allocated new storage, make sure no process has any
589 * zero pages covering this hole
590 */
591 if (buffer_new(&bh)) {
592 i_mmap_unlock_write(mapping);
593 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
594 i_mmap_lock_write(mapping);
595 }
596
Matthew Wilcox84c4e5e2015-09-08 14:59:17 -0700597 /*
598 * If a truncate happened while we were allocating blocks, we may
599 * leave blocks allocated to the file that are beyond EOF. We can't
600 * take i_mutex here, so just leave them hanging; they'll be freed
601 * when the file is deleted.
602 */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700603 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
604 if (pgoff >= size) {
605 result = VM_FAULT_SIGBUS;
606 goto out;
607 }
608 if ((pgoff | PG_PMD_COLOUR) >= size)
609 goto fallback;
610
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700611 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700612 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700613 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700614 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700615
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700616 if (unlikely(!zero_page))
617 goto fallback;
618
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700619 ptl = pmd_lock(vma->vm_mm, pmd);
620 if (!pmd_none(*pmd)) {
621 spin_unlock(ptl);
622 goto fallback;
623 }
624
625 entry = mk_pmd(zero_page, vma->vm_page_prot);
626 entry = pmd_mkhuge(entry);
627 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700628 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700629 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700630 } else {
631 sector = bh.b_blocknr << (blkbits - 9);
632 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
633 bh.b_size);
634 if (length < 0) {
635 result = VM_FAULT_SIGBUS;
636 goto out;
637 }
638 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
639 goto fallback;
640
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700641 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
642 }
643
644 out:
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700645 if (buffer_unwritten(&bh))
646 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
647
Matthew Wilcox84317292015-09-08 14:59:25 -0700648 i_mmap_unlock_write(mapping);
649
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700650 return result;
651
652 fallback:
653 count_vm_event(THP_FAULT_FALLBACK);
654 result = VM_FAULT_FALLBACK;
655 goto out;
656}
657EXPORT_SYMBOL_GPL(__dax_pmd_fault);
658
659/**
660 * dax_pmd_fault - handle a PMD fault on a DAX file
661 * @vma: The virtual memory area where the fault occurred
662 * @vmf: The description of the fault
663 * @get_block: The filesystem method used to translate file offsets to blocks
664 *
665 * When a page fault occurs, filesystems may call this helper in their
666 * pmd_fault handler for DAX files.
667 */
668int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
669 pmd_t *pmd, unsigned int flags, get_block_t get_block,
670 dax_iodone_t complete_unwritten)
671{
672 int result;
673 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
674
675 if (flags & FAULT_FLAG_WRITE) {
676 sb_start_pagefault(sb);
677 file_update_time(vma->vm_file);
678 }
679 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
680 complete_unwritten);
681 if (flags & FAULT_FLAG_WRITE)
682 sb_end_pagefault(sb);
683
684 return result;
685}
686EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -0700687#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700688
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800689/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700690 * dax_pfn_mkwrite - handle first write to DAX page
691 * @vma: The virtual memory area where the fault occurred
692 * @vmf: The description of the fault
693 *
694 */
695int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
696{
697 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
698
699 sb_start_pagefault(sb);
700 file_update_time(vma->vm_file);
701 sb_end_pagefault(sb);
702 return VM_FAULT_NOPAGE;
703}
704EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
705
706/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800707 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800708 * @inode: The file being truncated
709 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800710 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800711 * @get_block: The filesystem method used to translate file offsets to blocks
712 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800713 * This function can be called by a filesystem when it is zeroing part of a
714 * page in a DAX file. This is intended for hole-punch operations. If
715 * you are truncating a file, the helper function dax_truncate_page() may be
716 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800717 *
718 * We work in terms of PAGE_CACHE_SIZE here for commonality with
719 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
720 * took care of disposing of the unnecessary blocks. Even if the filesystem
721 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800722 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800723 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800724int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
725 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800726{
727 struct buffer_head bh;
728 pgoff_t index = from >> PAGE_CACHE_SHIFT;
729 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800730 int err;
731
732 /* Block boundary? Nothing to do */
733 if (!length)
734 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800735 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800736
737 memset(&bh, 0, sizeof(bh));
738 bh.b_size = PAGE_CACHE_SIZE;
739 err = get_block(inode, index, &bh, 0);
740 if (err < 0)
741 return err;
742 if (buffer_written(&bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600743 void __pmem *addr;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800744 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
745 if (err < 0)
746 return err;
Ross Zwislere2e05392015-08-18 13:55:41 -0600747 clear_pmem(addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600748 wmb_pmem();
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800749 }
750
751 return 0;
752}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800753EXPORT_SYMBOL_GPL(dax_zero_page_range);
754
755/**
756 * dax_truncate_page - handle a partial page being truncated in a DAX file
757 * @inode: The file being truncated
758 * @from: The file offset that is being truncated to
759 * @get_block: The filesystem method used to translate file offsets to blocks
760 *
761 * Similar to block_truncate_page(), this function can be called by a
762 * filesystem when it is truncating a DAX file to handle the partial page.
763 *
764 * We work in terms of PAGE_CACHE_SIZE here for commonality with
765 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
766 * took care of disposing of the unnecessary blocks. Even if the filesystem
767 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
768 * since the file might be mmapped.
769 */
770int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
771{
772 unsigned length = PAGE_CACHE_ALIGN(from) - from;
773 return dax_zero_page_range(inode, from, length, get_block);
774}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800775EXPORT_SYMBOL_GPL(dax_truncate_page);