blob: 7c634ac797b17cffb74054f66cc69f43888267a2 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/fs.h>
21#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080022#include <linux/highmem.h>
23#include <linux/memcontrol.h>
24#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080025#include <linux/mutex.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060026#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080027#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080028#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080029#include <linux/vmstat.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080031int dax_clear_blocks(struct inode *inode, sector_t block, long size)
32{
33 struct block_device *bdev = inode->i_sb->s_bdev;
34 sector_t sector = block << (inode->i_blkbits - 9);
35
36 might_sleep();
37 do {
Ross Zwislere2e05392015-08-18 13:55:41 -060038 void __pmem *addr;
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080039 unsigned long pfn;
40 long count;
41
42 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
43 if (count < 0)
44 return count;
45 BUG_ON(size < count);
46 while (count > 0) {
47 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
48 if (pgsz > count)
49 pgsz = count;
Ross Zwislere2e05392015-08-18 13:55:41 -060050 clear_pmem(addr, pgsz);
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080051 addr += pgsz;
52 size -= pgsz;
53 count -= pgsz;
54 BUG_ON(pgsz & 511);
55 sector += pgsz / 512;
56 cond_resched();
57 }
58 } while (size);
59
Ross Zwisler2765cfb2015-08-18 13:55:40 -060060 wmb_pmem();
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080061 return 0;
62}
63EXPORT_SYMBOL_GPL(dax_clear_blocks);
64
Ross Zwislere2e05392015-08-18 13:55:41 -060065static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
66 unsigned blkbits)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080067{
68 unsigned long pfn;
69 sector_t sector = bh->b_blocknr << (blkbits - 9);
70 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71}
72
Ross Zwisler2765cfb2015-08-18 13:55:40 -060073/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
Ross Zwislere2e05392015-08-18 13:55:41 -060074static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
75 loff_t pos, loff_t end)
Matthew Wilcoxd475c632015-02-16 15:58:56 -080076{
77 loff_t final = end - pos + first; /* The final byte of the buffer */
78
79 if (first > 0)
Ross Zwislere2e05392015-08-18 13:55:41 -060080 clear_pmem(addr, first);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080081 if (final < size)
Ross Zwislere2e05392015-08-18 13:55:41 -060082 clear_pmem(addr + final, size - final);
Matthew Wilcoxd475c632015-02-16 15:58:56 -080083}
84
85static bool buffer_written(struct buffer_head *bh)
86{
87 return buffer_mapped(bh) && !buffer_unwritten(bh);
88}
89
90/*
91 * When ext4 encounters a hole, it returns without modifying the buffer_head
92 * which means that we can't trust b_size. To cope with this, we set b_state
93 * to 0 before calling get_block and, if any bit is set, we know we can trust
94 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
95 * and would save us time calling get_block repeatedly.
96 */
97static bool buffer_size_valid(struct buffer_head *bh)
98{
99 return bh->b_state != 0;
100}
101
Omar Sandovala95cd632015-03-16 04:33:51 -0700102static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
103 loff_t start, loff_t end, get_block_t get_block,
104 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800105{
106 ssize_t retval = 0;
107 loff_t pos = start;
108 loff_t max = start;
109 loff_t bh_max = start;
Ross Zwislere2e05392015-08-18 13:55:41 -0600110 void __pmem *addr;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800111 bool hole = false;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600112 bool need_wmb = false;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800113
Omar Sandovala95cd632015-03-16 04:33:51 -0700114 if (iov_iter_rw(iter) != WRITE)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800115 end = min(end, i_size_read(inode));
116
117 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600118 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800119 if (pos == max) {
120 unsigned blkbits = inode->i_blkbits;
121 sector_t block = pos >> blkbits;
122 unsigned first = pos - (block << blkbits);
123 long size;
124
125 if (pos == bh_max) {
126 bh->b_size = PAGE_ALIGN(end - pos);
127 bh->b_state = 0;
128 retval = get_block(inode, block, bh,
Omar Sandovala95cd632015-03-16 04:33:51 -0700129 iov_iter_rw(iter) == WRITE);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800130 if (retval)
131 break;
132 if (!buffer_size_valid(bh))
133 bh->b_size = 1 << blkbits;
134 bh_max = pos - first + bh->b_size;
135 } else {
136 unsigned done = bh->b_size -
137 (bh_max - (pos - first));
138 bh->b_blocknr += done >> blkbits;
139 bh->b_size -= done;
140 }
141
Omar Sandovala95cd632015-03-16 04:33:51 -0700142 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800143 if (hole) {
144 addr = NULL;
145 size = bh->b_size - first;
146 } else {
147 retval = dax_get_addr(bh, &addr, blkbits);
148 if (retval < 0)
149 break;
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600150 if (buffer_unwritten(bh) || buffer_new(bh)) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800151 dax_new_buf(addr, retval, first, pos,
152 end);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600153 need_wmb = true;
154 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800155 addr += first;
156 size = retval - first;
157 }
158 max = min(pos + size, end);
159 }
160
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600161 if (iov_iter_rw(iter) == WRITE) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600162 len = copy_from_iter_pmem(addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600163 need_wmb = true;
164 } else if (!hole)
Ross Zwislere2e05392015-08-18 13:55:41 -0600165 len = copy_to_iter((void __force *)addr, max - pos,
166 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800167 else
168 len = iov_iter_zero(max - pos, iter);
169
170 if (!len)
171 break;
172
173 pos += len;
174 addr += len;
175 }
176
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600177 if (need_wmb)
178 wmb_pmem();
179
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800180 return (pos == start) ? retval : pos - start;
181}
182
183/**
184 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800185 * @iocb: The control block for this I/O
186 * @inode: The file which the I/O is directed at
187 * @iter: The addresses to do I/O from or to
188 * @pos: The file offset where the I/O starts
189 * @get_block: The filesystem method used to translate file offsets to blocks
190 * @end_io: A filesystem callback for I/O completion
191 * @flags: See below
192 *
193 * This function uses the same locking scheme as do_blockdev_direct_IO:
194 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
195 * caller for writes. For reads, we take and release the i_mutex ourselves.
196 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
197 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
198 * is in progress.
199 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700200ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
201 struct iov_iter *iter, loff_t pos, get_block_t get_block,
202 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800203{
204 struct buffer_head bh;
205 ssize_t retval = -EINVAL;
206 loff_t end = pos + iov_iter_count(iter);
207
208 memset(&bh, 0, sizeof(bh));
209
Omar Sandovala95cd632015-03-16 04:33:51 -0700210 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800211 struct address_space *mapping = inode->i_mapping;
212 mutex_lock(&inode->i_mutex);
213 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
214 if (retval) {
215 mutex_unlock(&inode->i_mutex);
216 goto out;
217 }
218 }
219
220 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400221 if (!(flags & DIO_SKIP_DIO_COUNT))
222 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800223
Omar Sandovala95cd632015-03-16 04:33:51 -0700224 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800225
Omar Sandovala95cd632015-03-16 04:33:51 -0700226 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800227 mutex_unlock(&inode->i_mutex);
228
229 if ((retval > 0) && end_io)
230 end_io(iocb, pos, retval, bh.b_private);
231
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400232 if (!(flags & DIO_SKIP_DIO_COUNT))
233 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800234 out:
235 return retval;
236}
237EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800238
239/*
240 * The user has performed a load from a hole in the file. Allocating
241 * a new page in the file would cause excessive storage usage for
242 * workloads with sparse files. We allocate a page cache page instead.
243 * We'll kick it out of the page cache if it's ever written to,
244 * otherwise it will simply fall out of the page cache under memory
245 * pressure without ever having been dirtied.
246 */
247static int dax_load_hole(struct address_space *mapping, struct page *page,
248 struct vm_fault *vmf)
249{
250 unsigned long size;
251 struct inode *inode = mapping->host;
252 if (!page)
253 page = find_or_create_page(mapping, vmf->pgoff,
254 GFP_KERNEL | __GFP_ZERO);
255 if (!page)
256 return VM_FAULT_OOM;
257 /* Recheck i_size under page lock to avoid truncate race */
258 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
259 if (vmf->pgoff >= size) {
260 unlock_page(page);
261 page_cache_release(page);
262 return VM_FAULT_SIGBUS;
263 }
264
265 vmf->page = page;
266 return VM_FAULT_LOCKED;
267}
268
269static int copy_user_bh(struct page *to, struct buffer_head *bh,
270 unsigned blkbits, unsigned long vaddr)
271{
Ross Zwislere2e05392015-08-18 13:55:41 -0600272 void __pmem *vfrom;
273 void *vto;
274
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800275 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
276 return -EIO;
277 vto = kmap_atomic(to);
Ross Zwislere2e05392015-08-18 13:55:41 -0600278 copy_user_page(vto, (void __force *)vfrom, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800279 kunmap_atomic(vto);
280 return 0;
281}
282
283static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
284 struct vm_area_struct *vma, struct vm_fault *vmf)
285{
286 struct address_space *mapping = inode->i_mapping;
287 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
288 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Ross Zwislere2e05392015-08-18 13:55:41 -0600289 void __pmem *addr;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800290 unsigned long pfn;
291 pgoff_t size;
292 int error;
293
294 i_mmap_lock_read(mapping);
295
296 /*
297 * Check truncate didn't happen while we were allocating a block.
298 * If it did, this block may or may not be still allocated to the
299 * file. We can't tell the filesystem to free it because we can't
300 * take i_mutex here. In the worst case, the file still has blocks
301 * allocated past the end of the file.
302 */
303 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
304 if (unlikely(vmf->pgoff >= size)) {
305 error = -EIO;
306 goto out;
307 }
308
309 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
310 if (error < 0)
311 goto out;
312 if (error < PAGE_SIZE) {
313 error = -EIO;
314 goto out;
315 }
316
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600317 if (buffer_unwritten(bh) || buffer_new(bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600318 clear_pmem(addr, PAGE_SIZE);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600319 wmb_pmem();
320 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800321
322 error = vm_insert_mixed(vma, vaddr, pfn);
323
324 out:
325 i_mmap_unlock_read(mapping);
326
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800327 return error;
328}
329
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000330/**
331 * __dax_fault - handle a page fault on a DAX file
332 * @vma: The virtual memory area where the fault occurred
333 * @vmf: The description of the fault
334 * @get_block: The filesystem method used to translate file offsets to blocks
335 *
336 * When a page fault occurs, filesystems may call this helper in their
337 * fault handler for DAX files. __dax_fault() assumes the caller has done all
338 * the necessary locking for the page fault to proceed successfully.
339 */
340int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000341 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800342{
343 struct file *file = vma->vm_file;
344 struct address_space *mapping = file->f_mapping;
345 struct inode *inode = mapping->host;
346 struct page *page;
347 struct buffer_head bh;
348 unsigned long vaddr = (unsigned long)vmf->virtual_address;
349 unsigned blkbits = inode->i_blkbits;
350 sector_t block;
351 pgoff_t size;
352 int error;
353 int major = 0;
354
355 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
356 if (vmf->pgoff >= size)
357 return VM_FAULT_SIGBUS;
358
359 memset(&bh, 0, sizeof(bh));
360 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
361 bh.b_size = PAGE_SIZE;
362
363 repeat:
364 page = find_get_page(mapping, vmf->pgoff);
365 if (page) {
366 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
367 page_cache_release(page);
368 return VM_FAULT_RETRY;
369 }
370 if (unlikely(page->mapping != mapping)) {
371 unlock_page(page);
372 page_cache_release(page);
373 goto repeat;
374 }
375 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
376 if (unlikely(vmf->pgoff >= size)) {
377 /*
378 * We have a struct page covering a hole in the file
379 * from a read fault and we've raced with a truncate
380 */
381 error = -EIO;
382 goto unlock_page;
383 }
384 }
385
386 error = get_block(inode, block, &bh, 0);
387 if (!error && (bh.b_size < PAGE_SIZE))
388 error = -EIO; /* fs corruption? */
389 if (error)
390 goto unlock_page;
391
392 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
393 if (vmf->flags & FAULT_FLAG_WRITE) {
394 error = get_block(inode, block, &bh, 1);
395 count_vm_event(PGMAJFAULT);
396 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
397 major = VM_FAULT_MAJOR;
398 if (!error && (bh.b_size < PAGE_SIZE))
399 error = -EIO;
400 if (error)
401 goto unlock_page;
402 } else {
403 return dax_load_hole(mapping, page, vmf);
404 }
405 }
406
407 if (vmf->cow_page) {
408 struct page *new_page = vmf->cow_page;
409 if (buffer_written(&bh))
410 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
411 else
412 clear_user_highpage(new_page, vaddr);
413 if (error)
414 goto unlock_page;
415 vmf->page = page;
416 if (!page) {
417 i_mmap_lock_read(mapping);
418 /* Check we didn't race with truncate */
419 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
420 PAGE_SHIFT;
421 if (vmf->pgoff >= size) {
422 i_mmap_unlock_read(mapping);
423 error = -EIO;
424 goto out;
425 }
426 }
427 return VM_FAULT_LOCKED;
428 }
429
430 /* Check we didn't race with a read fault installing a new page */
431 if (!page && major)
432 page = find_lock_page(mapping, vmf->pgoff);
433
434 if (page) {
435 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
436 PAGE_CACHE_SIZE, 0);
437 delete_from_page_cache(page);
438 unlock_page(page);
439 page_cache_release(page);
440 }
441
Dave Chinnere842f292015-06-04 09:18:18 +1000442 /*
443 * If we successfully insert the new mapping over an unwritten extent,
444 * we need to ensure we convert the unwritten extent. If there is an
445 * error inserting the mapping, the filesystem needs to leave it as
446 * unwritten to prevent exposure of the stale underlying data to
447 * userspace, but we still need to call the completion function so
448 * the private resources on the mapping buffer can be released. We
449 * indicate what the callback should do via the uptodate variable, same
450 * as for normal BH based IO completions.
451 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800452 error = dax_insert_mapping(inode, &bh, vma, vmf);
Dave Chinnere842f292015-06-04 09:18:18 +1000453 if (buffer_unwritten(&bh))
454 complete_unwritten(&bh, !error);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800455
456 out:
457 if (error == -ENOMEM)
458 return VM_FAULT_OOM | major;
459 /* -EBUSY is fine, somebody else faulted on the same PTE */
460 if ((error < 0) && (error != -EBUSY))
461 return VM_FAULT_SIGBUS | major;
462 return VM_FAULT_NOPAGE | major;
463
464 unlock_page:
465 if (page) {
466 unlock_page(page);
467 page_cache_release(page);
468 }
469 goto out;
470}
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000471EXPORT_SYMBOL(__dax_fault);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800472
473/**
474 * dax_fault - handle a page fault on a DAX file
475 * @vma: The virtual memory area where the fault occurred
476 * @vmf: The description of the fault
477 * @get_block: The filesystem method used to translate file offsets to blocks
478 *
479 * When a page fault occurs, filesystems may call this helper in their
480 * fault handler for DAX files.
481 */
482int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Dave Chinnere842f292015-06-04 09:18:18 +1000483 get_block_t get_block, dax_iodone_t complete_unwritten)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800484{
485 int result;
486 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
487
488 if (vmf->flags & FAULT_FLAG_WRITE) {
489 sb_start_pagefault(sb);
490 file_update_time(vma->vm_file);
491 }
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000492 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800493 if (vmf->flags & FAULT_FLAG_WRITE)
494 sb_end_pagefault(sb);
495
496 return result;
497}
498EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800499
500/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700501 * dax_pfn_mkwrite - handle first write to DAX page
502 * @vma: The virtual memory area where the fault occurred
503 * @vmf: The description of the fault
504 *
505 */
506int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
507{
508 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
509
510 sb_start_pagefault(sb);
511 file_update_time(vma->vm_file);
512 sb_end_pagefault(sb);
513 return VM_FAULT_NOPAGE;
514}
515EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
516
517/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800518 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800519 * @inode: The file being truncated
520 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800521 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800522 * @get_block: The filesystem method used to translate file offsets to blocks
523 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800524 * This function can be called by a filesystem when it is zeroing part of a
525 * page in a DAX file. This is intended for hole-punch operations. If
526 * you are truncating a file, the helper function dax_truncate_page() may be
527 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800528 *
529 * We work in terms of PAGE_CACHE_SIZE here for commonality with
530 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
531 * took care of disposing of the unnecessary blocks. Even if the filesystem
532 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800533 * since the file might be mmapped.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800534 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800535int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
536 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800537{
538 struct buffer_head bh;
539 pgoff_t index = from >> PAGE_CACHE_SHIFT;
540 unsigned offset = from & (PAGE_CACHE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800541 int err;
542
543 /* Block boundary? Nothing to do */
544 if (!length)
545 return 0;
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800546 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800547
548 memset(&bh, 0, sizeof(bh));
549 bh.b_size = PAGE_CACHE_SIZE;
550 err = get_block(inode, index, &bh, 0);
551 if (err < 0)
552 return err;
553 if (buffer_written(&bh)) {
Ross Zwislere2e05392015-08-18 13:55:41 -0600554 void __pmem *addr;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800555 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
556 if (err < 0)
557 return err;
Ross Zwislere2e05392015-08-18 13:55:41 -0600558 clear_pmem(addr + offset, length);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600559 wmb_pmem();
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800560 }
561
562 return 0;
563}
Matthew Wilcox25726bc2015-02-16 15:59:35 -0800564EXPORT_SYMBOL_GPL(dax_zero_page_range);
565
566/**
567 * dax_truncate_page - handle a partial page being truncated in a DAX file
568 * @inode: The file being truncated
569 * @from: The file offset that is being truncated to
570 * @get_block: The filesystem method used to translate file offsets to blocks
571 *
572 * Similar to block_truncate_page(), this function can be called by a
573 * filesystem when it is truncating a DAX file to handle the partial page.
574 *
575 * We work in terms of PAGE_CACHE_SIZE here for commonality with
576 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
577 * took care of disposing of the unnecessary blocks. Even if the filesystem
578 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
579 * since the file might be mmapped.
580 */
581int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
582{
583 unsigned length = PAGE_CACHE_ALIGN(from) - from;
584 return dax_zero_page_range(inode, from, length, get_block);
585}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800586EXPORT_SYMBOL_GPL(dax_truncate_page);