blob: 55aa273145a8151c8918cb0fb2cddda6346cf13f [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060027#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080029#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080030#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080031#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080032#include <linux/sizes.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080033
Dan Williamsb2e0d162016-01-15 16:55:59 -080034static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
35{
36 struct request_queue *q = bdev->bd_queue;
37 long rc = -EIO;
38
39 dax->addr = (void __pmem *) ERR_PTR(-EIO);
40 if (blk_queue_enter(q, true) != 0)
41 return rc;
42
43 rc = bdev_direct_access(bdev, dax);
44 if (rc < 0) {
45 dax->addr = (void __pmem *) ERR_PTR(rc);
46 blk_queue_exit(q);
47 return rc;
48 }
49 return rc;
50}
51
52static void dax_unmap_atomic(struct block_device *bdev,
53 const struct blk_dax_ctl *dax)
54{
55 if (IS_ERR(dax->addr))
56 return;
57 blk_queue_exit(bdev->bd_queue);
58}
59
Dave Chinner1ca19152015-11-03 12:37:00 +110060/*
61 * dax_clear_blocks() is called from within transaction context from XFS,
62 * and hence this means the stack from this point must follow GFP_NOFS
63 * semantics for all operations.
64 */
Dan Williamsb2e0d162016-01-15 16:55:59 -080065int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080066{
67 struct block_device *bdev = inode->i_sb->s_bdev;
Dan Williamsb2e0d162016-01-15 16:55:59 -080068 struct blk_dax_ctl dax = {
69 .sector = block << (inode->i_blkbits - 9),
70 .size = _size,
71 };
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080072
73 might_sleep();
74 do {
Dan Williams0e749e52016-01-15 16:55:53 -080075 long count, sz;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080076
Dan Williamsb2e0d162016-01-15 16:55:59 -080077 count = dax_map_atomic(bdev, &dax);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080078 if (count < 0)
79 return count;
Dan Williams0e749e52016-01-15 16:55:53 -080080 sz = min_t(long, count, SZ_128K);
Dan Williamsb2e0d162016-01-15 16:55:59 -080081 clear_pmem(dax.addr, sz);
82 dax.size -= sz;
83 dax.sector += sz / 512;
84 dax_unmap_atomic(bdev, &dax);
Dan Williams0e749e52016-01-15 16:55:53 -080085 cond_resched();
Dan Williamsb2e0d162016-01-15 16:55:59 -080086 } while (dax.size);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080087
Ross Zwisler2765cfb2015-08-18 13:55:40 -060088 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080089 return 0;
90}
91EXPORT_SYMBOL_GPL(dax_clear_blocks);
92
Ross Zwisler2765cfb2015-08-18 13:55:40 -060093/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -060094static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
95 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080096{
97 loff_t final = end - pos + first; /* The final byte of the buffer */
98
99 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -0600100 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800101 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -0600102 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800103}
104
105static bool buffer_written(struct buffer_head *bh)
106{
107 return buffer_mapped(bh) && !buffer_unwritten(bh);
108}
109
110/*
111 * When ext4 encounters a hole, it returns without modifying the buffer_head
112 * which means that we can't trust b_size. To cope with this, we set b_state
113 * to 0 before calling get_block and, if any bit is set, we know we can trust
114 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
115 * and would save us time calling get_block repeatedly.
116 */
117static bool buffer_size_valid(struct buffer_head *bh)
118{
119 return bh->b_state != 0;
120}
121
Dan Williamsb2e0d162016-01-15 16:55:59 -0800122
123static sector_t to_sector(const struct buffer_head *bh,
124 const struct inode *inode)
125{
126 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
127
128 return sector;
129}
130
Omar Sandovala95cd632015-03-16 04:33:51 -0700131static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
132 loff_t start, loff_t end, get_block_t get_block,
133 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800134{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800135 loff_t pos = start, max = start, bh_max = start;
136 bool hole = false, need_wmb = false;
137 struct block_device *bdev = NULL;
138 int rw = iov_iter_rw(iter), rc;
139 long map_len = 0;
140 struct blk_dax_ctl dax = {
141 .addr = (void __pmem *) ERR_PTR(-EIO),
142 };
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800143
Dan Williamsb2e0d162016-01-15 16:55:59 -0800144 if (rw == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800145 end = min(end, i_size_read(inode));
146
147 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600148 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800149 if (pos == max) {
150 unsigned blkbits = inode->i_blkbits;
Jeff Moyere94f5a22015-08-14 16:15:31 -0400151 long page = pos >> PAGE_SHIFT;
152 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800153 unsigned first = pos - (block << blkbits);
154 long size;
155
156 if (pos == bh_max) {
157 bh->b_size = PAGE_ALIGN(end - pos);
158 bh->b_state = 0;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800159 rc = get_block(inode, block, bh, rw == WRITE);
160 if (rc)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800161 break;
162 if (!buffer_size_valid(bh))
163 bh->b_size = 1 << blkbits;
164 bh_max = pos - first + bh->b_size;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800165 bdev = bh->b_bdev;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800166 } else {
167 unsigned done = bh->b_size -
168 (bh_max - (pos - first));
169 bh->b_blocknr += done >> blkbits;
170 bh->b_size -= done;
171 }
172
Dan Williamsb2e0d162016-01-15 16:55:59 -0800173 hole = rw == READ && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800174 if (hole) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800175 size = bh->b_size - first;
176 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800177 dax_unmap_atomic(bdev, &dax);
178 dax.sector = to_sector(bh, inode);
179 dax.size = bh->b_size;
180 map_len = dax_map_atomic(bdev, &dax);
181 if (map_len < 0) {
182 rc = map_len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800183 break;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800184 }
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600185 if (buffer_unwritten(bh) || buffer_new(bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800186 dax_new_buf(dax.addr, map_len, first,
187 pos, end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600188 need_wmb = true;
189 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800190 dax.addr += first;
191 size = map_len - first;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800192 }
193 max = min(pos + size, end);
194 }
195
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600196 if (iov_iter_rw(iter) == WRITE) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800197 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600198 need_wmb = true;
199 } else if (!hole)
Dan Williamsb2e0d162016-01-15 16:55:59 -0800200 len = copy_to_iter((void __force *) dax.addr, max - pos,
Ross Zwislere2e05392015-08-18 13:55:41 -0600201 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800202 else
203 len = iov_iter_zero(max - pos, iter);
204
Al Virocadfbb62015-11-10 19:42:49 -0700205 if (!len) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800206 rc = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800207 break;
Al Virocadfbb62015-11-10 19:42:49 -0700208 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800209
210 pos += len;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800211 if (!IS_ERR(dax.addr))
212 dax.addr += len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800213 }
214
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600215 if (need_wmb)
216 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800217 dax_unmap_atomic(bdev, &dax);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600218
Dan Williamsb2e0d162016-01-15 16:55:59 -0800219 return (pos == start) ? rc : pos - start;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800220}
221
222/**
223 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800224 * @iocb: The control block for this I/O
225 * @inode: The file which the I/O is directed at
226 * @iter: The addresses to do I/O from or to
227 * @pos: The file offset where the I/O starts
228 * @get_block: The filesystem method used to translate file offsets to blocks
229 * @end_io: A filesystem callback for I/O completion
230 * @flags: See below
231 *
232 * This function uses the same locking scheme as do_blockdev_direct_IO:
233 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
234 * caller for writes. For reads, we take and release the i_mutex ourselves.
235 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
236 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
237 * is in progress.
238 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700239ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
240 struct iov_iter *iter, loff_t pos, get_block_t get_block,
241 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800242{
243 struct buffer_head bh;
244 ssize_t retval = -EINVAL;
245 loff_t end = pos + iov_iter_count(iter);
246
247 memset(&bh, 0, sizeof(bh));
248
Omar Sandovala95cd632015-03-16 04:33:51 -0700249 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800250 struct address_space *mapping = inode->i_mapping;
Al Viro59551022016-01-22 15:40:57 -0500251 inode_lock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800252 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
253 if (retval) {
Al Viro59551022016-01-22 15:40:57 -0500254 inode_unlock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800255 goto out;
256 }
257 }
258
259 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400260 if (!(flags & DIO_SKIP_DIO_COUNT))
261 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800262
Omar Sandovala95cd632015-03-16 04:33:51 -0700263 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800264
Omar Sandovala95cd632015-03-16 04:33:51 -0700265 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Al Viro59551022016-01-22 15:40:57 -0500266 inode_unlock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800267
268 if ((retval > 0) && end_io)
269 end_io(iocb, pos, retval, bh.b_private);
270
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400271 if (!(flags & DIO_SKIP_DIO_COUNT))
272 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800273 out:
274 return retval;
275}
276EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800277
278/*
279 * The user has performed a load from a hole in the file. Allocating
280 * a new page in the file would cause excessive storage usage for
281 * workloads with sparse files. We allocate a page cache page instead.
282 * We'll kick it out of the page cache if it's ever written to,
283 * otherwise it will simply fall out of the page cache under memory
284 * pressure without ever having been dirtied.
285 */
286static int dax_load_hole(struct address_space *mapping, struct page *page,
287 struct vm_fault *vmf)
288{
289 unsigned long size;
290 struct inode *inode = mapping->host;
291 if (!page)
292 page = find_or_create_page(mapping, vmf->pgoff,
293 GFP_KERNEL | __GFP_ZERO);
294 if (!page)
295 return VM_FAULT_OOM;
296 /* Recheck i_size under page lock to avoid truncate race */
297 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
298 if (vmf->pgoff >= size) {
299 unlock_page(page);
300 page_cache_release(page);
301 return VM_FAULT_SIGBUS;
302 }
303
304 vmf->page = page;
305 return VM_FAULT_LOCKED;
306}
307
Dan Williamsb2e0d162016-01-15 16:55:59 -0800308static int copy_user_bh(struct page *to, struct inode *inode,
309 struct buffer_head *bh, unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800310{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800311 struct blk_dax_ctl dax = {
312 .sector = to_sector(bh, inode),
313 .size = bh->b_size,
314 };
315 struct block_device *bdev = bh->b_bdev;
Ross Zwislere2e05392015-08-18 13:55:41 -0600316 void *vto;
317
Dan Williamsb2e0d162016-01-15 16:55:59 -0800318 if (dax_map_atomic(bdev, &dax) < 0)
319 return PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800320 vto = kmap_atomic(to);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800321 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800322 kunmap_atomic(vto);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800323 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800324 return 0;
325}
326
327static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
328 struct vm_area_struct *vma, struct vm_fault *vmf)
329{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800330 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800331 struct address_space *mapping = inode->i_mapping;
332 struct block_device *bdev = bh->b_bdev;
333 struct blk_dax_ctl dax = {
334 .sector = to_sector(bh, inode),
335 .size = bh->b_size,
336 };
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800337 pgoff_t size;
338 int error;
339
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700340 i_mmap_lock_read(mapping);
341
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800342 /*
343 * Check truncate didn't happen while we were allocating a block.
344 * If it did, this block may or may not be still allocated to the
345 * file. We can't tell the filesystem to free it because we can't
346 * take i_mutex here. In the worst case, the file still has blocks
347 * allocated past the end of the file.
348 */
349 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
350 if (unlikely(vmf->pgoff >= size)) {
351 error = -EIO;
352 goto out;
353 }
354
Dan Williamsb2e0d162016-01-15 16:55:59 -0800355 if (dax_map_atomic(bdev, &dax) < 0) {
356 error = PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800357 goto out;
358 }
359
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600360 if (buffer_unwritten(bh) || buffer_new(bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800361 clear_pmem(dax.addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600362 wmb_pmem();
363 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800364 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800365
Dan Williams01c8f1c2016-01-15 16:56:40 -0800366 error = vm_insert_mixed(vma, vaddr, dax.pfn);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800367
368 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700369 i_mmap_unlock_read(mapping);
370
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800371 return error;
372}
373
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000374/**
375 * __dax_fault - handle a page fault on a DAX file
376 * @vma: The virtual memory area where the fault occurred
377 * @vmf: The description of the fault
378 * @get_block: The filesystem method used to translate file offsets to blocks
Dave Chinnerb2442c52015-07-29 11:48:00 +1000379 * @complete_unwritten: The filesystem method used to convert unwritten blocks
380 * to written so the data written to them is exposed. This is required for
381 * required by write faults for filesystems that will return unwritten
382 * extent mappings from @get_block, but it is optional for reads as
383 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
384 * not support unwritten extents, the it should pass NULL.
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000385 *
386 * When a page fault occurs, filesystems may call this helper in their
387 * fault handler for DAX files. __dax_fault() assumes the caller has done all
388 * the necessary locking for the page fault to proceed successfully.
389 */
390int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000391 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800392{
393 struct file *file = vma->vm_file;
394 struct address_space *mapping = file->f_mapping;
395 struct inode *inode = mapping->host;
396 struct page *page;
397 struct buffer_head bh;
398 unsigned long vaddr = (unsigned long)vmf->virtual_address;
399 unsigned blkbits = inode->i_blkbits;
400 sector_t block;
401 pgoff_t size;
402 int error;
403 int major = 0;
404
405 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
406 if (vmf->pgoff >= size)
407 return VM_FAULT_SIGBUS;
408
409 memset(&bh, 0, sizeof(bh));
410 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
411 bh.b_size = PAGE_SIZE;
412
413 repeat:
414 page = find_get_page(mapping, vmf->pgoff);
415 if (page) {
416 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
417 page_cache_release(page);
418 return VM_FAULT_RETRY;
419 }
420 if (unlikely(page->mapping != mapping)) {
421 unlock_page(page);
422 page_cache_release(page);
423 goto repeat;
424 }
425 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
426 if (unlikely(vmf->pgoff >= size)) {
427 /*
428 * We have a struct page covering a hole in the file
429 * from a read fault and we've raced with a truncate
430 */
431 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700432 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800433 }
434 }
435
436 error = get_block(inode, block, &bh, 0);
437 if (!error && (bh.b_size < PAGE_SIZE))
438 error = -EIO; /* fs corruption? */
439 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700440 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800441
442 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
443 if (vmf->flags & FAULT_FLAG_WRITE) {
444 error = get_block(inode, block, &bh, 1);
445 count_vm_event(PGMAJFAULT);
446 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
447 major = VM_FAULT_MAJOR;
448 if (!error && (bh.b_size < PAGE_SIZE))
449 error = -EIO;
450 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700451 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800452 } else {
453 return dax_load_hole(mapping, page, vmf);
454 }
455 }
456
457 if (vmf->cow_page) {
458 struct page *new_page = vmf->cow_page;
459 if (buffer_written(&bh))
Dan Williamsb2e0d162016-01-15 16:55:59 -0800460 error = copy_user_bh(new_page, inode, &bh, vaddr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800461 else
462 clear_user_highpage(new_page, vaddr);
463 if (error)
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700464 goto unlock_page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800465 vmf->page = page;
466 if (!page) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700467 i_mmap_lock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800468 /* Check we didn't race with truncate */
469 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
470 PAGE_SHIFT;
471 if (vmf->pgoff >= size) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700472 i_mmap_unlock_read(mapping);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800473 error = -EIO;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700474 goto out;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800475 }
476 }
477 return VM_FAULT_LOCKED;
478 }
479
480 /* Check we didn't race with a read fault installing a new page */
481 if (!page && major)
482 page = find_lock_page(mapping, vmf->pgoff);
483
484 if (page) {
485 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
486 PAGE_CACHE_SIZE, 0);
487 delete_from_page_cache(page);
488 unlock_page(page);
489 page_cache_release(page);
490 }
491
Dave Chinnere842f292015-06-04 09:18:18 +1000492 /*
493 * If we successfully insert the new mapping over an unwritten extent,
494 * we need to ensure we convert the unwritten extent. If there is an
495 * error inserting the mapping, the filesystem needs to leave it as
496 * unwritten to prevent exposure of the stale underlying data to
497 * userspace, but we still need to call the completion function so
498 * the private resources on the mapping buffer can be released. We
499 * indicate what the callback should do via the uptodate variable, same
500 * as for normal BH based IO completions.
501 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800502 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnerb2442c52015-07-29 11:48:00 +1000503 if (buffer_unwritten(&bh)) {
504 if (complete_unwritten)
505 complete_unwritten(&bh, !error);
506 else
507 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
508 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800509
510 out:
511 if (error == -ENOMEM)
512 return VM_FAULT_OOM | major;
513 /* -EBUSY is fine, somebody else faulted on the same PTE */
514 if ((error < 0) && (error != -EBUSY))
515 return VM_FAULT_SIGBUS | major;
516 return VM_FAULT_NOPAGE | major;
517
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700518 unlock_page:
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800519 if (page) {
520 unlock_page(page);
521 page_cache_release(page);
522 }
523 goto out;
524}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000525EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800526
527/**
528 * dax_fault - handle a page fault on a DAX file
529 * @vma: The virtual memory area where the fault occurred
530 * @vmf: The description of the fault
531 * @get_block: The filesystem method used to translate file offsets to blocks
532 *
533 * When a page fault occurs, filesystems may call this helper in their
534 * fault handler for DAX files.
535 */
536int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000537 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800538{
539 int result;
540 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
541
542 if (vmf->flags & FAULT_FLAG_WRITE) {
543 sb_start_pagefault(sb);
544 file_update_time(vma->vm_file);
545 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000546 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800547 if (vmf->flags & FAULT_FLAG_WRITE)
548 sb_end_pagefault(sb);
549
550 return result;
551}
552EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800553
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700554#ifdef CONFIG_TRANSPARENT_HUGEPAGE
555/*
556 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
557 * more often than one might expect in the below function.
558 */
559#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
560
Dan Williamscbb38e42016-01-15 16:56:58 -0800561static void __dax_dbg(struct buffer_head *bh, unsigned long address,
562 const char *reason, const char *fn)
563{
564 if (bh) {
565 char bname[BDEVNAME_SIZE];
566 bdevname(bh->b_bdev, bname);
567 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
568 "length %zd fallback: %s\n", fn, current->comm,
569 address, bname, bh->b_state, (u64)bh->b_blocknr,
570 bh->b_size, reason);
571 } else {
572 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
573 current->comm, address, reason);
574 }
575}
576
577#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
578
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700579int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
580 pmd_t *pmd, unsigned int flags, get_block_t get_block,
581 dax_iodone_t complete_unwritten)
582{
583 struct file *file = vma->vm_file;
584 struct address_space *mapping = file->f_mapping;
585 struct inode *inode = mapping->host;
586 struct buffer_head bh;
587 unsigned blkbits = inode->i_blkbits;
588 unsigned long pmd_addr = address & PMD_MASK;
589 bool write = flags & FAULT_FLAG_WRITE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800590 struct block_device *bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700591 pgoff_t size, pgoff;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800592 sector_t block;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700593 int result = 0;
594
Dan Williamsc046c322016-01-15 16:57:01 -0800595 /* dax pmd mappings require pfn_t_devmap() */
Dan Williamsee82c9e2015-11-15 16:06:32 -0800596 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
597 return VM_FAULT_FALLBACK;
598
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700599 /* Fall back to PTEs if we're going to COW */
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800600 if (write && !(vma->vm_flags & VM_SHARED)) {
601 split_huge_pmd(vma, pmd, address);
Dan Williamscbb38e42016-01-15 16:56:58 -0800602 dax_pmd_dbg(NULL, address, "cow write");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700603 return VM_FAULT_FALLBACK;
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800604 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700605 /* If the PMD would extend outside the VMA */
Dan Williamscbb38e42016-01-15 16:56:58 -0800606 if (pmd_addr < vma->vm_start) {
607 dax_pmd_dbg(NULL, address, "vma start unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700608 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800609 }
610 if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
611 dax_pmd_dbg(NULL, address, "vma end unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700612 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800613 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700614
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700615 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700616 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
617 if (pgoff >= size)
618 return VM_FAULT_SIGBUS;
619 /* If the PMD would cover blocks out of the file */
Dan Williamscbb38e42016-01-15 16:56:58 -0800620 if ((pgoff | PG_PMD_COLOUR) >= size) {
621 dax_pmd_dbg(NULL, address,
622 "offset + huge page size > file size");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700623 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800624 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700625
626 memset(&bh, 0, sizeof(bh));
627 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
628
629 bh.b_size = PMD_SIZE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800630 if (get_block(inode, block, &bh, write) != 0)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700631 return VM_FAULT_SIGBUS;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800632 bdev = bh.b_bdev;
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700633 i_mmap_lock_read(mapping);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700634
635 /*
636 * If the filesystem isn't willing to tell us the length of a hole,
637 * just fall back to PTEs. Calling get_block 512 times in a loop
638 * would be silly.
639 */
Dan Williamscbb38e42016-01-15 16:56:58 -0800640 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
641 dax_pmd_dbg(&bh, address, "allocated block too small");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700642 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -0800643 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700644
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700645 /*
646 * If we allocated new storage, make sure no process has any
647 * zero pages covering this hole
648 */
649 if (buffer_new(&bh)) {
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700650 i_mmap_unlock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700651 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700652 i_mmap_lock_read(mapping);
Kirill A. Shutemov46c043e2015-09-08 14:59:42 -0700653 }
654
Matthew Wilcox84c4e5e2015-09-08 14:59:17 -0700655 /*
656 * If a truncate happened while we were allocating blocks, we may
657 * leave blocks allocated to the file that are beyond EOF. We can't
658 * take i_mutex here, so just leave them hanging; they'll be freed
659 * when the file is deleted.
660 */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700661 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
662 if (pgoff >= size) {
663 result = VM_FAULT_SIGBUS;
664 goto out;
665 }
Dan Williamscbb38e42016-01-15 16:56:58 -0800666 if ((pgoff | PG_PMD_COLOUR) >= size) {
667 dax_pmd_dbg(&bh, address, "pgoff unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700668 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -0800669 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700670
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700671 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700672 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700673 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700674 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700675
Dan Williamscbb38e42016-01-15 16:56:58 -0800676 if (unlikely(!zero_page)) {
677 dax_pmd_dbg(&bh, address, "no zero page");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700678 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -0800679 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700680
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700681 ptl = pmd_lock(vma->vm_mm, pmd);
682 if (!pmd_none(*pmd)) {
683 spin_unlock(ptl);
Dan Williamscbb38e42016-01-15 16:56:58 -0800684 dax_pmd_dbg(&bh, address, "pmd already present");
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700685 goto fallback;
686 }
687
Dan Williamscbb38e42016-01-15 16:56:58 -0800688 dev_dbg(part_to_dev(bdev->bd_part),
689 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
690 __func__, current->comm, address,
691 (unsigned long long) to_sector(&bh, inode));
692
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700693 entry = mk_pmd(zero_page, vma->vm_page_prot);
694 entry = pmd_mkhuge(entry);
695 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700696 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -0700697 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700698 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800699 struct blk_dax_ctl dax = {
700 .sector = to_sector(&bh, inode),
701 .size = PMD_SIZE,
702 };
703 long length = dax_map_atomic(bdev, &dax);
704
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700705 if (length < 0) {
706 result = VM_FAULT_SIGBUS;
707 goto out;
708 }
Dan Williamscbb38e42016-01-15 16:56:58 -0800709 if (length < PMD_SIZE) {
710 dax_pmd_dbg(&bh, address, "dax-length too small");
711 dax_unmap_atomic(bdev, &dax);
712 goto fallback;
713 }
714 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
715 dax_pmd_dbg(&bh, address, "pfn unaligned");
Dan Williamsb2e0d162016-01-15 16:55:59 -0800716 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700717 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800718 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700719
Dan Williamsc046c322016-01-15 16:57:01 -0800720 if (!pfn_t_devmap(dax.pfn)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800721 dax_unmap_atomic(bdev, &dax);
Dan Williamscbb38e42016-01-15 16:56:58 -0800722 dax_pmd_dbg(&bh, address, "pfn not in memmap");
Dan Williams152d7bd2015-11-12 18:33:54 -0800723 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800724 }
Dan Williams152d7bd2015-11-12 18:33:54 -0800725
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700726 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800727 clear_pmem(dax.addr, PMD_SIZE);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700728 wmb_pmem();
729 count_vm_event(PGMAJFAULT);
730 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
731 result |= VM_FAULT_MAJOR;
732 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800733 dax_unmap_atomic(bdev, &dax);
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700734
Dan Williamscbb38e42016-01-15 16:56:58 -0800735 dev_dbg(part_to_dev(bdev->bd_part),
736 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
737 __func__, current->comm, address,
738 pfn_t_to_pfn(dax.pfn),
739 (unsigned long long) dax.sector);
Dan Williams34c0fd52016-01-15 16:56:14 -0800740 result |= vmf_insert_pfn_pmd(vma, address, pmd,
Dan Williamsf25748e32016-01-15 16:56:43 -0800741 dax.pfn, write);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700742 }
743
744 out:
Ross Zwisler0f90cc62015-10-15 15:28:32 -0700745 i_mmap_unlock_read(mapping);
746
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700747 if (buffer_unwritten(&bh))
748 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
749
750 return result;
751
752 fallback:
753 count_vm_event(THP_FAULT_FALLBACK);
754 result = VM_FAULT_FALLBACK;
755 goto out;
756}
757EXPORT_SYMBOL_GPL(__dax_pmd_fault);
758
759/**
760 * dax_pmd_fault - handle a PMD fault on a DAX file
761 * @vma: The virtual memory area where the fault occurred
762 * @vmf: The description of the fault
763 * @get_block: The filesystem method used to translate file offsets to blocks
764 *
765 * When a page fault occurs, filesystems may call this helper in their
766 * pmd_fault handler for DAX files.
767 */
768int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
769 pmd_t *pmd, unsigned int flags, get_block_t get_block,
770 dax_iodone_t complete_unwritten)
771{
772 int result;
773 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
774
775 if (flags & FAULT_FLAG_WRITE) {
776 sb_start_pagefault(sb);
777 file_update_time(vma->vm_file);
778 }
779 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
780 complete_unwritten);
781 if (flags & FAULT_FLAG_WRITE)
782 sb_end_pagefault(sb);
783
784 return result;
785}
786EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -0700787#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700788
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800789/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700790 * dax_pfn_mkwrite - handle first write to DAX page
791 * @vma: The virtual memory area where the fault occurred
792 * @vmf: The description of the fault
793 *
794 */
795int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
796{
797 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
798
799 sb_start_pagefault(sb);
800 file_update_time(vma->vm_file);
801 sb_end_pagefault(sb);
802 return VM_FAULT_NOPAGE;
803}
804EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
805
806/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800807 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800808 * @inode: The file being truncated
809 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800810 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800811 * @get_block: The filesystem method used to translate file offsets to blocks
812 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800813 * This function can be called by a filesystem when it is zeroing part of a
814 * page in a DAX file. This is intended for hole-punch operations. If
815 * you are truncating a file, the helper function dax_truncate_page() may be
816 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800817 *
818 * We work in terms of PAGE_CACHE_SIZE here for commonality with
819 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
820 * took care of disposing of the unnecessary blocks. Even if the filesystem
821 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800822 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800823 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800824int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
825 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800826{
827 struct buffer_head bh;
828 pgoff_t index = from >> PAGE_CACHE_SHIFT;
829 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800830 int err;
831
832 /* Block boundary? Nothing to do */
833 if (!length)
834 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800835 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800836
837 memset(&bh, 0, sizeof(bh));
838 bh.b_size = PAGE_CACHE_SIZE;
839 err = get_block(inode, index, &bh, 0);
840 if (err < 0)
841 return err;
842 if (buffer_written(&bh)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800843 struct block_device *bdev = bh.b_bdev;
844 struct blk_dax_ctl dax = {
845 .sector = to_sector(&bh, inode),
846 .size = PAGE_CACHE_SIZE,
847 };
848
849 if (dax_map_atomic(bdev, &dax) < 0)
850 return PTR_ERR(dax.addr);
851 clear_pmem(dax.addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600852 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800853 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800854 }
855
856 return 0;
857}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800858EXPORT_SYMBOL_GPL(dax_zero_page_range);
859
860/**
861 * dax_truncate_page - handle a partial page being truncated in a DAX file
862 * @inode: The file being truncated
863 * @from: The file offset that is being truncated to
864 * @get_block: The filesystem method used to translate file offsets to blocks
865 *
866 * Similar to block_truncate_page(), this function can be called by a
867 * filesystem when it is truncating a DAX file to handle the partial page.
868 *
869 * We work in terms of PAGE_CACHE_SIZE here for commonality with
870 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
871 * took care of disposing of the unnecessary blocks. Even if the filesystem
872 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
873 * since the file might be mmapped.
874 */
875int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
876{
877 unsigned length = PAGE_CACHE_ALIGN(from) - from;
878 return dax_zero_page_range(inode, from, length, get_block);
879}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800880EXPORT_SYMBOL_GPL(dax_truncate_page);