blob: e07fecc93f80011a712ef953aac62b1a75870dc6 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/fs.h>
21#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080022#include <linux/highmem.h>
23#include <linux/memcontrol.h>
24#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080025#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060026#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080027#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080028#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080029#include <linux/vmstat.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080031int dax_clear_blocks(struct inode *inode, sector_t block, long size)
32{
33 struct block_device *bdev = inode->i_sb->s_bdev;
34 sector_t sector = block << (inode->i_blkbits - 9);
35
36 might_sleep();
37 do {
38 void *addr;
39 unsigned long pfn;
40 long count;
41
42 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
43 if (count < 0)
44 return count;
45 BUG_ON(size < count);
46 while (count > 0) {
47 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
48 if (pgsz > count)
49 pgsz = count;
Ross Zwisler2765cfb2015-08-18 13:55:40 -060050 clear_pmem((void __pmem *)addr, pgsz);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080051 addr += pgsz;
52 size -= pgsz;
53 count -= pgsz;
54 BUG_ON(pgsz & 511);
55 sector += pgsz / 512;
56 cond_resched();
57 }
58 } while (size);
59
Ross Zwisler2765cfb2015-08-18 13:55:40 -060060 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080061 return 0;
62}
63EXPORT_SYMBOL_GPL(dax_clear_blocks);
64
Matthew Wilcoxd475c632015-02-16 15:58:56 -080065static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
66{
67 unsigned long pfn;
68 sector_t sector = bh->b_blocknr << (blkbits - 9);
69 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
70}
71
Ross Zwisler2765cfb2015-08-18 13:55:40 -060072/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Matthew Wilcoxd475c632015-02-16 15:58:56 -080073static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
74 loff_t end)
75{
76 loff_t final = end - pos + first; /* The final byte of the buffer */
77
78 if (first > 0)
Ross Zwisler2765cfb2015-08-18 13:55:40 -060079 clear_pmem((void __pmem *)addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080080 if (final < size)
Ross Zwisler2765cfb2015-08-18 13:55:40 -060081 clear_pmem((void __pmem *)addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080082}
83
84static bool buffer_written(struct buffer_head *bh)
85{
86 return buffer_mapped(bh) && !buffer_unwritten(bh);
87}
88
89/*
90 * When ext4 encounters a hole, it returns without modifying the buffer_head
91 * which means that we can't trust b_size. To cope with this, we set b_state
92 * to 0 before calling get_block and, if any bit is set, we know we can trust
93 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
94 * and would save us time calling get_block repeatedly.
95 */
96static bool buffer_size_valid(struct buffer_head *bh)
97{
98 return bh->b_state != 0;
99}
100
Omar Sandovala95cd632015-03-16 04:33:51 -0700101static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
102 loff_t start, loff_t end, get_block_t get_block,
103 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800104{
105 ssize_t retval = 0;
106 loff_t pos = start;
107 loff_t max = start;
108 loff_t bh_max = start;
109 void *addr;
110 bool hole = false;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600111 bool need_wmb = false;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800112
Omar Sandovala95cd632015-03-16 04:33:51 -0700113 if (iov_iter_rw(iter) != WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800114 end = min(end, i_size_read(inode));
115
116 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600117 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800118 if (pos == max) {
119 unsigned blkbits = inode->i_blkbits;
120 sector_t block = pos >> blkbits;
121 unsigned first = pos - (block << blkbits);
122 long size;
123
124 if (pos == bh_max) {
125 bh->b_size = PAGE_ALIGN(end - pos);
126 bh->b_state = 0;
127 retval = get_block(inode, block, bh,
Omar Sandovala95cd632015-03-16 04:33:51 -0700128 iov_iter_rw(iter) == WRITE);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800129 if (retval)
130 break;
131 if (!buffer_size_valid(bh))
132 bh->b_size = 1 << blkbits;
133 bh_max = pos - first + bh->b_size;
134 } else {
135 unsigned done = bh->b_size -
136 (bh_max - (pos - first));
137 bh->b_blocknr += done >> blkbits;
138 bh->b_size -= done;
139 }
140
Omar Sandovala95cd632015-03-16 04:33:51 -0700141 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800142 if (hole) {
143 addr = NULL;
144 size = bh->b_size - first;
145 } else {
146 retval = dax_get_addr(bh, &addr, blkbits);
147 if (retval < 0)
148 break;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600149 if (buffer_unwritten(bh) || buffer_new(bh)) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800150 dax_new_buf(addr, retval, first, pos,
151 end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600152 need_wmb = true;
153 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800154 addr += first;
155 size = retval - first;
156 }
157 max = min(pos + size, end);
158 }
159
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600160 if (iov_iter_rw(iter) == WRITE) {
161 len = copy_from_iter_pmem((void __pmem *)addr,
162 max - pos, iter);
163 need_wmb = true;
164 } else if (!hole)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800165 len = copy_to_iter(addr, max - pos, iter);
166 else
167 len = iov_iter_zero(max - pos, iter);
168
169 if (!len)
170 break;
171
172 pos += len;
173 addr += len;
174 }
175
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600176 if (need_wmb)
177 wmb_pmem();
178
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800179 return (pos == start) ? retval : pos - start;
180}
181
182/**
183 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800184 * @iocb: The control block for this I/O
185 * @inode: The file which the I/O is directed at
186 * @iter: The addresses to do I/O from or to
187 * @pos: The file offset where the I/O starts
188 * @get_block: The filesystem method used to translate file offsets to blocks
189 * @end_io: A filesystem callback for I/O completion
190 * @flags: See below
191 *
192 * This function uses the same locking scheme as do_blockdev_direct_IO:
193 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
194 * caller for writes. For reads, we take and release the i_mutex ourselves.
195 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
196 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
197 * is in progress.
198 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700199ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
200 struct iov_iter *iter, loff_t pos, get_block_t get_block,
201 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800202{
203 struct buffer_head bh;
204 ssize_t retval = -EINVAL;
205 loff_t end = pos + iov_iter_count(iter);
206
207 memset(&bh, 0, sizeof(bh));
208
Omar Sandovala95cd632015-03-16 04:33:51 -0700209 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800210 struct address_space *mapping = inode->i_mapping;
211 mutex_lock(&inode->i_mutex);
212 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
213 if (retval) {
214 mutex_unlock(&inode->i_mutex);
215 goto out;
216 }
217 }
218
219 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400220 if (!(flags & DIO_SKIP_DIO_COUNT))
221 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800222
Omar Sandovala95cd632015-03-16 04:33:51 -0700223 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800224
Omar Sandovala95cd632015-03-16 04:33:51 -0700225 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800226 mutex_unlock(&inode->i_mutex);
227
228 if ((retval > 0) && end_io)
229 end_io(iocb, pos, retval, bh.b_private);
230
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400231 if (!(flags & DIO_SKIP_DIO_COUNT))
232 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800233 out:
234 return retval;
235}
236EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800237
238/*
239 * The user has performed a load from a hole in the file. Allocating
240 * a new page in the file would cause excessive storage usage for
241 * workloads with sparse files. We allocate a page cache page instead.
242 * We'll kick it out of the page cache if it's ever written to,
243 * otherwise it will simply fall out of the page cache under memory
244 * pressure without ever having been dirtied.
245 */
246static int dax_load_hole(struct address_space *mapping, struct page *page,
247 struct vm_fault *vmf)
248{
249 unsigned long size;
250 struct inode *inode = mapping->host;
251 if (!page)
252 page = find_or_create_page(mapping, vmf->pgoff,
253 GFP_KERNEL | __GFP_ZERO);
254 if (!page)
255 return VM_FAULT_OOM;
256 /* Recheck i_size under page lock to avoid truncate race */
257 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
258 if (vmf->pgoff >= size) {
259 unlock_page(page);
260 page_cache_release(page);
261 return VM_FAULT_SIGBUS;
262 }
263
264 vmf->page = page;
265 return VM_FAULT_LOCKED;
266}
267
268static int copy_user_bh(struct page *to, struct buffer_head *bh,
269 unsigned blkbits, unsigned long vaddr)
270{
271 void *vfrom, *vto;
272 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
273 return -EIO;
274 vto = kmap_atomic(to);
275 copy_user_page(vto, vfrom, vaddr, to);
276 kunmap_atomic(vto);
277 return 0;
278}
279
280static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
281 struct vm_area_struct *vma, struct vm_fault *vmf)
282{
283 struct address_space *mapping = inode->i_mapping;
284 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
285 unsigned long vaddr = (unsigned long)vmf->virtual_address;
286 void *addr;
287 unsigned long pfn;
288 pgoff_t size;
289 int error;
290
291 i_mmap_lock_read(mapping);
292
293 /*
294 * Check truncate didn't happen while we were allocating a block.
295 * If it did, this block may or may not be still allocated to the
296 * file. We can't tell the filesystem to free it because we can't
297 * take i_mutex here. In the worst case, the file still has blocks
298 * allocated past the end of the file.
299 */
300 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
301 if (unlikely(vmf->pgoff >= size)) {
302 error = -EIO;
303 goto out;
304 }
305
306 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
307 if (error < 0)
308 goto out;
309 if (error < PAGE_SIZE) {
310 error = -EIO;
311 goto out;
312 }
313
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600314 if (buffer_unwritten(bh) || buffer_new(bh)) {
315 clear_pmem((void __pmem *)addr, PAGE_SIZE);
316 wmb_pmem();
317 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800318
319 error = vm_insert_mixed(vma, vaddr, pfn);
320
321 out:
322 i_mmap_unlock_read(mapping);
323
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800324 return error;
325}
326
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000327/**
328 * __dax_fault - handle a page fault on a DAX file
329 * @vma: The virtual memory area where the fault occurred
330 * @vmf: The description of the fault
331 * @get_block: The filesystem method used to translate file offsets to blocks
332 *
333 * When a page fault occurs, filesystems may call this helper in their
334 * fault handler for DAX files. __dax_fault() assumes the caller has done all
335 * the necessary locking for the page fault to proceed successfully.
336 */
337int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000338 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800339{
340 struct file *file = vma->vm_file;
341 struct address_space *mapping = file->f_mapping;
342 struct inode *inode = mapping->host;
343 struct page *page;
344 struct buffer_head bh;
345 unsigned long vaddr = (unsigned long)vmf->virtual_address;
346 unsigned blkbits = inode->i_blkbits;
347 sector_t block;
348 pgoff_t size;
349 int error;
350 int major = 0;
351
352 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
353 if (vmf->pgoff >= size)
354 return VM_FAULT_SIGBUS;
355
356 memset(&bh, 0, sizeof(bh));
357 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
358 bh.b_size = PAGE_SIZE;
359
360 repeat:
361 page = find_get_page(mapping, vmf->pgoff);
362 if (page) {
363 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
364 page_cache_release(page);
365 return VM_FAULT_RETRY;
366 }
367 if (unlikely(page->mapping != mapping)) {
368 unlock_page(page);
369 page_cache_release(page);
370 goto repeat;
371 }
372 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
373 if (unlikely(vmf->pgoff >= size)) {
374 /*
375 * We have a struct page covering a hole in the file
376 * from a read fault and we've raced with a truncate
377 */
378 error = -EIO;
379 goto unlock_page;
380 }
381 }
382
383 error = get_block(inode, block, &bh, 0);
384 if (!error && (bh.b_size < PAGE_SIZE))
385 error = -EIO; /* fs corruption? */
386 if (error)
387 goto unlock_page;
388
389 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
390 if (vmf->flags & FAULT_FLAG_WRITE) {
391 error = get_block(inode, block, &bh, 1);
392 count_vm_event(PGMAJFAULT);
393 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
394 major = VM_FAULT_MAJOR;
395 if (!error && (bh.b_size < PAGE_SIZE))
396 error = -EIO;
397 if (error)
398 goto unlock_page;
399 } else {
400 return dax_load_hole(mapping, page, vmf);
401 }
402 }
403
404 if (vmf->cow_page) {
405 struct page *new_page = vmf->cow_page;
406 if (buffer_written(&bh))
407 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
408 else
409 clear_user_highpage(new_page, vaddr);
410 if (error)
411 goto unlock_page;
412 vmf->page = page;
413 if (!page) {
414 i_mmap_lock_read(mapping);
415 /* Check we didn't race with truncate */
416 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
417 PAGE_SHIFT;
418 if (vmf->pgoff >= size) {
419 i_mmap_unlock_read(mapping);
420 error = -EIO;
421 goto out;
422 }
423 }
424 return VM_FAULT_LOCKED;
425 }
426
427 /* Check we didn't race with a read fault installing a new page */
428 if (!page && major)
429 page = find_lock_page(mapping, vmf->pgoff);
430
431 if (page) {
432 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
433 PAGE_CACHE_SIZE, 0);
434 delete_from_page_cache(page);
435 unlock_page(page);
436 page_cache_release(page);
437 }
438
Dave Chinnere842f292015-06-04 09:18:18 +1000439 /*
440 * If we successfully insert the new mapping over an unwritten extent,
441 * we need to ensure we convert the unwritten extent. If there is an
442 * error inserting the mapping, the filesystem needs to leave it as
443 * unwritten to prevent exposure of the stale underlying data to
444 * userspace, but we still need to call the completion function so
445 * the private resources on the mapping buffer can be released. We
446 * indicate what the callback should do via the uptodate variable, same
447 * as for normal BH based IO completions.
448 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800449 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnere842f292015-06-04 09:18:18 +1000450 if (buffer_unwritten(&bh))
451 complete_unwritten(&bh, !error);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800452
453 out:
454 if (error == -ENOMEM)
455 return VM_FAULT_OOM | major;
456 /* -EBUSY is fine, somebody else faulted on the same PTE */
457 if ((error < 0) && (error != -EBUSY))
458 return VM_FAULT_SIGBUS | major;
459 return VM_FAULT_NOPAGE | major;
460
461 unlock_page:
462 if (page) {
463 unlock_page(page);
464 page_cache_release(page);
465 }
466 goto out;
467}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000468EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800469
470/**
471 * dax_fault - handle a page fault on a DAX file
472 * @vma: The virtual memory area where the fault occurred
473 * @vmf: The description of the fault
474 * @get_block: The filesystem method used to translate file offsets to blocks
475 *
476 * When a page fault occurs, filesystems may call this helper in their
477 * fault handler for DAX files.
478 */
479int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000480 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800481{
482 int result;
483 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
484
485 if (vmf->flags & FAULT_FLAG_WRITE) {
486 sb_start_pagefault(sb);
487 file_update_time(vma->vm_file);
488 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000489 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800490 if (vmf->flags & FAULT_FLAG_WRITE)
491 sb_end_pagefault(sb);
492
493 return result;
494}
495EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800496
497/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700498 * dax_pfn_mkwrite - handle first write to DAX page
499 * @vma: The virtual memory area where the fault occurred
500 * @vmf: The description of the fault
501 *
502 */
503int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
504{
505 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
506
507 sb_start_pagefault(sb);
508 file_update_time(vma->vm_file);
509 sb_end_pagefault(sb);
510 return VM_FAULT_NOPAGE;
511}
512EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
513
514/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800515 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800516 * @inode: The file being truncated
517 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800518 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800519 * @get_block: The filesystem method used to translate file offsets to blocks
520 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800521 * This function can be called by a filesystem when it is zeroing part of a
522 * page in a DAX file. This is intended for hole-punch operations. If
523 * you are truncating a file, the helper function dax_truncate_page() may be
524 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800525 *
526 * We work in terms of PAGE_CACHE_SIZE here for commonality with
527 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
528 * took care of disposing of the unnecessary blocks. Even if the filesystem
529 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800530 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800531 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800532int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
533 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800534{
535 struct buffer_head bh;
536 pgoff_t index = from >> PAGE_CACHE_SHIFT;
537 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800538 int err;
539
540 /* Block boundary? Nothing to do */
541 if (!length)
542 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800543 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800544
545 memset(&bh, 0, sizeof(bh));
546 bh.b_size = PAGE_CACHE_SIZE;
547 err = get_block(inode, index, &bh, 0);
548 if (err < 0)
549 return err;
550 if (buffer_written(&bh)) {
551 void *addr;
552 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
553 if (err < 0)
554 return err;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600555 clear_pmem((void __pmem *)addr + offset, length);
556 wmb_pmem();
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800557 }
558
559 return 0;
560}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800561EXPORT_SYMBOL_GPL(dax_zero_page_range);
562
563/**
564 * dax_truncate_page - handle a partial page being truncated in a DAX file
565 * @inode: The file being truncated
566 * @from: The file offset that is being truncated to
567 * @get_block: The filesystem method used to translate file offsets to blocks
568 *
569 * Similar to block_truncate_page(), this function can be called by a
570 * filesystem when it is truncating a DAX file to handle the partial page.
571 *
572 * We work in terms of PAGE_CACHE_SIZE here for commonality with
573 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
574 * took care of disposing of the unnecessary blocks. Even if the filesystem
575 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
576 * since the file might be mmapped.
577 */
578int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
579{
580 unsigned length = PAGE_CACHE_ALIGN(from) - from;
581 return dax_zero_page_range(inode, from, length, get_block);
582}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800583EXPORT_SYMBOL_GPL(dax_truncate_page);