| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * fs/dax.c - Direct Access filesystem code | 
 | 3 |  * Copyright (c) 2013-2014 Intel Corporation | 
 | 4 |  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> | 
 | 5 |  * Author: Ross Zwisler <ross.zwisler@linux.intel.com> | 
 | 6 |  * | 
 | 7 |  * This program is free software; you can redistribute it and/or modify it | 
 | 8 |  * under the terms and conditions of the GNU General Public License, | 
 | 9 |  * version 2, as published by the Free Software Foundation. | 
 | 10 |  * | 
 | 11 |  * This program is distributed in the hope it will be useful, but WITHOUT | 
 | 12 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 13 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 14 |  * more details. | 
 | 15 |  */ | 
 | 16 |  | 
 | 17 | #include <linux/atomic.h> | 
 | 18 | #include <linux/blkdev.h> | 
 | 19 | #include <linux/buffer_head.h> | 
| Ross Zwisler | d77e92e | 2015-09-09 10:29:40 -0600 | [diff] [blame] | 20 | #include <linux/dax.h> | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 21 | #include <linux/fs.h> | 
 | 22 | #include <linux/genhd.h> | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 23 | #include <linux/highmem.h> | 
 | 24 | #include <linux/memcontrol.h> | 
 | 25 | #include <linux/mm.h> | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 26 | #include <linux/mutex.h> | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 27 | #include <linux/pmem.h> | 
| Matthew Wilcox | 289c6ae | 2015-02-16 15:58:59 -0800 | [diff] [blame] | 28 | #include <linux/sched.h> | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 29 | #include <linux/uio.h> | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 30 | #include <linux/vmstat.h> | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 31 |  | 
| Matthew Wilcox | 289c6ae | 2015-02-16 15:58:59 -0800 | [diff] [blame] | 32 | int dax_clear_blocks(struct inode *inode, sector_t block, long size) | 
 | 33 | { | 
 | 34 | 	struct block_device *bdev = inode->i_sb->s_bdev; | 
 | 35 | 	sector_t sector = block << (inode->i_blkbits - 9); | 
 | 36 |  | 
 | 37 | 	might_sleep(); | 
 | 38 | 	do { | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 39 | 		void __pmem *addr; | 
| Matthew Wilcox | 289c6ae | 2015-02-16 15:58:59 -0800 | [diff] [blame] | 40 | 		unsigned long pfn; | 
 | 41 | 		long count; | 
 | 42 |  | 
 | 43 | 		count = bdev_direct_access(bdev, sector, &addr, &pfn, size); | 
 | 44 | 		if (count < 0) | 
 | 45 | 			return count; | 
 | 46 | 		BUG_ON(size < count); | 
 | 47 | 		while (count > 0) { | 
 | 48 | 			unsigned pgsz = PAGE_SIZE - offset_in_page(addr); | 
 | 49 | 			if (pgsz > count) | 
 | 50 | 				pgsz = count; | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 51 | 			clear_pmem(addr, pgsz); | 
| Matthew Wilcox | 289c6ae | 2015-02-16 15:58:59 -0800 | [diff] [blame] | 52 | 			addr += pgsz; | 
 | 53 | 			size -= pgsz; | 
 | 54 | 			count -= pgsz; | 
 | 55 | 			BUG_ON(pgsz & 511); | 
 | 56 | 			sector += pgsz / 512; | 
 | 57 | 			cond_resched(); | 
 | 58 | 		} | 
 | 59 | 	} while (size); | 
 | 60 |  | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 61 | 	wmb_pmem(); | 
| Matthew Wilcox | 289c6ae | 2015-02-16 15:58:59 -0800 | [diff] [blame] | 62 | 	return 0; | 
 | 63 | } | 
 | 64 | EXPORT_SYMBOL_GPL(dax_clear_blocks); | 
 | 65 |  | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 66 | static long dax_get_addr(struct buffer_head *bh, void __pmem **addr, | 
 | 67 | 		unsigned blkbits) | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 68 | { | 
 | 69 | 	unsigned long pfn; | 
 | 70 | 	sector_t sector = bh->b_blocknr << (blkbits - 9); | 
 | 71 | 	return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); | 
 | 72 | } | 
 | 73 |  | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 74 | /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 75 | static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first, | 
 | 76 | 		loff_t pos, loff_t end) | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 77 | { | 
 | 78 | 	loff_t final = end - pos + first; /* The final byte of the buffer */ | 
 | 79 |  | 
 | 80 | 	if (first > 0) | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 81 | 		clear_pmem(addr, first); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 82 | 	if (final < size) | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 83 | 		clear_pmem(addr + final, size - final); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 84 | } | 
 | 85 |  | 
 | 86 | static bool buffer_written(struct buffer_head *bh) | 
 | 87 | { | 
 | 88 | 	return buffer_mapped(bh) && !buffer_unwritten(bh); | 
 | 89 | } | 
 | 90 |  | 
 | 91 | /* | 
 | 92 |  * When ext4 encounters a hole, it returns without modifying the buffer_head | 
 | 93 |  * which means that we can't trust b_size.  To cope with this, we set b_state | 
 | 94 |  * to 0 before calling get_block and, if any bit is set, we know we can trust | 
 | 95 |  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is | 
 | 96 |  * and would save us time calling get_block repeatedly. | 
 | 97 |  */ | 
 | 98 | static bool buffer_size_valid(struct buffer_head *bh) | 
 | 99 | { | 
 | 100 | 	return bh->b_state != 0; | 
 | 101 | } | 
 | 102 |  | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 103 | static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | 
 | 104 | 		      loff_t start, loff_t end, get_block_t get_block, | 
 | 105 | 		      struct buffer_head *bh) | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 106 | { | 
 | 107 | 	ssize_t retval = 0; | 
 | 108 | 	loff_t pos = start; | 
 | 109 | 	loff_t max = start; | 
 | 110 | 	loff_t bh_max = start; | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 111 | 	void __pmem *addr; | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 112 | 	bool hole = false; | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 113 | 	bool need_wmb = false; | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 114 |  | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 115 | 	if (iov_iter_rw(iter) != WRITE) | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 116 | 		end = min(end, i_size_read(inode)); | 
 | 117 |  | 
 | 118 | 	while (pos < end) { | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 119 | 		size_t len; | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 120 | 		if (pos == max) { | 
 | 121 | 			unsigned blkbits = inode->i_blkbits; | 
| Jeff Moyer | e94f5a2 | 2015-08-14 16:15:31 -0400 | [diff] [blame] | 122 | 			long page = pos >> PAGE_SHIFT; | 
 | 123 | 			sector_t block = page << (PAGE_SHIFT - blkbits); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 124 | 			unsigned first = pos - (block << blkbits); | 
 | 125 | 			long size; | 
 | 126 |  | 
 | 127 | 			if (pos == bh_max) { | 
 | 128 | 				bh->b_size = PAGE_ALIGN(end - pos); | 
 | 129 | 				bh->b_state = 0; | 
 | 130 | 				retval = get_block(inode, block, bh, | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 131 | 						   iov_iter_rw(iter) == WRITE); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 132 | 				if (retval) | 
 | 133 | 					break; | 
 | 134 | 				if (!buffer_size_valid(bh)) | 
 | 135 | 					bh->b_size = 1 << blkbits; | 
 | 136 | 				bh_max = pos - first + bh->b_size; | 
 | 137 | 			} else { | 
 | 138 | 				unsigned done = bh->b_size - | 
 | 139 | 						(bh_max - (pos - first)); | 
 | 140 | 				bh->b_blocknr += done >> blkbits; | 
 | 141 | 				bh->b_size -= done; | 
 | 142 | 			} | 
 | 143 |  | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 144 | 			hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 145 | 			if (hole) { | 
 | 146 | 				addr = NULL; | 
 | 147 | 				size = bh->b_size - first; | 
 | 148 | 			} else { | 
 | 149 | 				retval = dax_get_addr(bh, &addr, blkbits); | 
 | 150 | 				if (retval < 0) | 
 | 151 | 					break; | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 152 | 				if (buffer_unwritten(bh) || buffer_new(bh)) { | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 153 | 					dax_new_buf(addr, retval, first, pos, | 
 | 154 | 									end); | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 155 | 					need_wmb = true; | 
 | 156 | 				} | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 157 | 				addr += first; | 
 | 158 | 				size = retval - first; | 
 | 159 | 			} | 
 | 160 | 			max = min(pos + size, end); | 
 | 161 | 		} | 
 | 162 |  | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 163 | 		if (iov_iter_rw(iter) == WRITE) { | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 164 | 			len = copy_from_iter_pmem(addr, max - pos, iter); | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 165 | 			need_wmb = true; | 
 | 166 | 		} else if (!hole) | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 167 | 			len = copy_to_iter((void __force *)addr, max - pos, | 
 | 168 | 					iter); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 169 | 		else | 
 | 170 | 			len = iov_iter_zero(max - pos, iter); | 
 | 171 |  | 
 | 172 | 		if (!len) | 
 | 173 | 			break; | 
 | 174 |  | 
 | 175 | 		pos += len; | 
 | 176 | 		addr += len; | 
 | 177 | 	} | 
 | 178 |  | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 179 | 	if (need_wmb) | 
 | 180 | 		wmb_pmem(); | 
 | 181 |  | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 182 | 	return (pos == start) ? retval : pos - start; | 
 | 183 | } | 
 | 184 |  | 
 | 185 | /** | 
 | 186 |  * dax_do_io - Perform I/O to a DAX file | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 187 |  * @iocb: The control block for this I/O | 
 | 188 |  * @inode: The file which the I/O is directed at | 
 | 189 |  * @iter: The addresses to do I/O from or to | 
 | 190 |  * @pos: The file offset where the I/O starts | 
 | 191 |  * @get_block: The filesystem method used to translate file offsets to blocks | 
 | 192 |  * @end_io: A filesystem callback for I/O completion | 
 | 193 |  * @flags: See below | 
 | 194 |  * | 
 | 195 |  * This function uses the same locking scheme as do_blockdev_direct_IO: | 
 | 196 |  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the | 
 | 197 |  * caller for writes.  For reads, we take and release the i_mutex ourselves. | 
 | 198 |  * If DIO_LOCKING is not set, the filesystem takes care of its own locking. | 
 | 199 |  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O | 
 | 200 |  * is in progress. | 
 | 201 |  */ | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 202 | ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, | 
 | 203 | 		  struct iov_iter *iter, loff_t pos, get_block_t get_block, | 
 | 204 | 		  dio_iodone_t end_io, int flags) | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 205 | { | 
 | 206 | 	struct buffer_head bh; | 
 | 207 | 	ssize_t retval = -EINVAL; | 
 | 208 | 	loff_t end = pos + iov_iter_count(iter); | 
 | 209 |  | 
 | 210 | 	memset(&bh, 0, sizeof(bh)); | 
 | 211 |  | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 212 | 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) { | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 213 | 		struct address_space *mapping = inode->i_mapping; | 
 | 214 | 		mutex_lock(&inode->i_mutex); | 
 | 215 | 		retval = filemap_write_and_wait_range(mapping, pos, end - 1); | 
 | 216 | 		if (retval) { | 
 | 217 | 			mutex_unlock(&inode->i_mutex); | 
 | 218 | 			goto out; | 
 | 219 | 		} | 
 | 220 | 	} | 
 | 221 |  | 
 | 222 | 	/* Protects against truncate */ | 
| Matthew Wilcox | bbab37d | 2015-07-03 10:40:42 -0400 | [diff] [blame] | 223 | 	if (!(flags & DIO_SKIP_DIO_COUNT)) | 
 | 224 | 		inode_dio_begin(inode); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 225 |  | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 226 | 	retval = dax_io(inode, iter, pos, end, get_block, &bh); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 227 |  | 
| Omar Sandoval | a95cd63 | 2015-03-16 04:33:51 -0700 | [diff] [blame] | 228 | 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 229 | 		mutex_unlock(&inode->i_mutex); | 
 | 230 |  | 
 | 231 | 	if ((retval > 0) && end_io) | 
 | 232 | 		end_io(iocb, pos, retval, bh.b_private); | 
 | 233 |  | 
| Matthew Wilcox | bbab37d | 2015-07-03 10:40:42 -0400 | [diff] [blame] | 234 | 	if (!(flags & DIO_SKIP_DIO_COUNT)) | 
 | 235 | 		inode_dio_end(inode); | 
| Matthew Wilcox | d475c63 | 2015-02-16 15:58:56 -0800 | [diff] [blame] | 236 |  out: | 
 | 237 | 	return retval; | 
 | 238 | } | 
 | 239 | EXPORT_SYMBOL_GPL(dax_do_io); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 240 |  | 
 | 241 | /* | 
 | 242 |  * The user has performed a load from a hole in the file.  Allocating | 
 | 243 |  * a new page in the file would cause excessive storage usage for | 
 | 244 |  * workloads with sparse files.  We allocate a page cache page instead. | 
 | 245 |  * We'll kick it out of the page cache if it's ever written to, | 
 | 246 |  * otherwise it will simply fall out of the page cache under memory | 
 | 247 |  * pressure without ever having been dirtied. | 
 | 248 |  */ | 
 | 249 | static int dax_load_hole(struct address_space *mapping, struct page *page, | 
 | 250 | 							struct vm_fault *vmf) | 
 | 251 | { | 
 | 252 | 	unsigned long size; | 
 | 253 | 	struct inode *inode = mapping->host; | 
 | 254 | 	if (!page) | 
 | 255 | 		page = find_or_create_page(mapping, vmf->pgoff, | 
 | 256 | 						GFP_KERNEL | __GFP_ZERO); | 
 | 257 | 	if (!page) | 
 | 258 | 		return VM_FAULT_OOM; | 
 | 259 | 	/* Recheck i_size under page lock to avoid truncate race */ | 
 | 260 | 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 261 | 	if (vmf->pgoff >= size) { | 
 | 262 | 		unlock_page(page); | 
 | 263 | 		page_cache_release(page); | 
 | 264 | 		return VM_FAULT_SIGBUS; | 
 | 265 | 	} | 
 | 266 |  | 
 | 267 | 	vmf->page = page; | 
 | 268 | 	return VM_FAULT_LOCKED; | 
 | 269 | } | 
 | 270 |  | 
 | 271 | static int copy_user_bh(struct page *to, struct buffer_head *bh, | 
 | 272 | 			unsigned blkbits, unsigned long vaddr) | 
 | 273 | { | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 274 | 	void __pmem *vfrom; | 
 | 275 | 	void *vto; | 
 | 276 |  | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 277 | 	if (dax_get_addr(bh, &vfrom, blkbits) < 0) | 
 | 278 | 		return -EIO; | 
 | 279 | 	vto = kmap_atomic(to); | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 280 | 	copy_user_page(vto, (void __force *)vfrom, vaddr, to); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 281 | 	kunmap_atomic(vto); | 
 | 282 | 	return 0; | 
 | 283 | } | 
 | 284 |  | 
 | 285 | static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | 
 | 286 | 			struct vm_area_struct *vma, struct vm_fault *vmf) | 
 | 287 | { | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 288 | 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); | 
 | 289 | 	unsigned long vaddr = (unsigned long)vmf->virtual_address; | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 290 | 	void __pmem *addr; | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 291 | 	unsigned long pfn; | 
 | 292 | 	pgoff_t size; | 
 | 293 | 	int error; | 
 | 294 |  | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 295 | 	/* | 
 | 296 | 	 * Check truncate didn't happen while we were allocating a block. | 
 | 297 | 	 * If it did, this block may or may not be still allocated to the | 
 | 298 | 	 * file.  We can't tell the filesystem to free it because we can't | 
 | 299 | 	 * take i_mutex here.  In the worst case, the file still has blocks | 
 | 300 | 	 * allocated past the end of the file. | 
 | 301 | 	 */ | 
 | 302 | 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 303 | 	if (unlikely(vmf->pgoff >= size)) { | 
 | 304 | 		error = -EIO; | 
 | 305 | 		goto out; | 
 | 306 | 	} | 
 | 307 |  | 
 | 308 | 	error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size); | 
 | 309 | 	if (error < 0) | 
 | 310 | 		goto out; | 
 | 311 | 	if (error < PAGE_SIZE) { | 
 | 312 | 		error = -EIO; | 
 | 313 | 		goto out; | 
 | 314 | 	} | 
 | 315 |  | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 316 | 	if (buffer_unwritten(bh) || buffer_new(bh)) { | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 317 | 		clear_pmem(addr, PAGE_SIZE); | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 318 | 		wmb_pmem(); | 
 | 319 | 	} | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 320 |  | 
 | 321 | 	error = vm_insert_mixed(vma, vaddr, pfn); | 
 | 322 |  | 
 | 323 |  out: | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 324 | 	return error; | 
 | 325 | } | 
 | 326 |  | 
| Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 327 | /** | 
 | 328 |  * __dax_fault - handle a page fault on a DAX file | 
 | 329 |  * @vma: The virtual memory area where the fault occurred | 
 | 330 |  * @vmf: The description of the fault | 
 | 331 |  * @get_block: The filesystem method used to translate file offsets to blocks | 
| Dave Chinner | b2442c5 | 2015-07-29 11:48:00 +1000 | [diff] [blame] | 332 |  * @complete_unwritten: The filesystem method used to convert unwritten blocks | 
 | 333 |  *	to written so the data written to them is exposed. This is required for | 
 | 334 |  *	required by write faults for filesystems that will return unwritten | 
 | 335 |  *	extent mappings from @get_block, but it is optional for reads as | 
 | 336 |  *	dax_insert_mapping() will always zero unwritten blocks. If the fs does | 
 | 337 |  *	not support unwritten extents, the it should pass NULL. | 
| Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 338 |  * | 
 | 339 |  * When a page fault occurs, filesystems may call this helper in their | 
 | 340 |  * fault handler for DAX files. __dax_fault() assumes the caller has done all | 
 | 341 |  * the necessary locking for the page fault to proceed successfully. | 
 | 342 |  */ | 
 | 343 | int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | 
| Dave Chinner | e842f29 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 344 | 			get_block_t get_block, dax_iodone_t complete_unwritten) | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 345 | { | 
 | 346 | 	struct file *file = vma->vm_file; | 
 | 347 | 	struct address_space *mapping = file->f_mapping; | 
 | 348 | 	struct inode *inode = mapping->host; | 
 | 349 | 	struct page *page; | 
 | 350 | 	struct buffer_head bh; | 
 | 351 | 	unsigned long vaddr = (unsigned long)vmf->virtual_address; | 
 | 352 | 	unsigned blkbits = inode->i_blkbits; | 
 | 353 | 	sector_t block; | 
 | 354 | 	pgoff_t size; | 
 | 355 | 	int error; | 
 | 356 | 	int major = 0; | 
 | 357 |  | 
 | 358 | 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 359 | 	if (vmf->pgoff >= size) | 
 | 360 | 		return VM_FAULT_SIGBUS; | 
 | 361 |  | 
 | 362 | 	memset(&bh, 0, sizeof(bh)); | 
 | 363 | 	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); | 
 | 364 | 	bh.b_size = PAGE_SIZE; | 
 | 365 |  | 
 | 366 |  repeat: | 
 | 367 | 	page = find_get_page(mapping, vmf->pgoff); | 
 | 368 | 	if (page) { | 
 | 369 | 		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { | 
 | 370 | 			page_cache_release(page); | 
 | 371 | 			return VM_FAULT_RETRY; | 
 | 372 | 		} | 
 | 373 | 		if (unlikely(page->mapping != mapping)) { | 
 | 374 | 			unlock_page(page); | 
 | 375 | 			page_cache_release(page); | 
 | 376 | 			goto repeat; | 
 | 377 | 		} | 
 | 378 | 		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 379 | 		if (unlikely(vmf->pgoff >= size)) { | 
 | 380 | 			/* | 
 | 381 | 			 * We have a struct page covering a hole in the file | 
 | 382 | 			 * from a read fault and we've raced with a truncate | 
 | 383 | 			 */ | 
 | 384 | 			error = -EIO; | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 385 | 			goto unlock; | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 386 | 		} | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 387 | 	} else { | 
 | 388 | 		i_mmap_lock_write(mapping); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 389 | 	} | 
 | 390 |  | 
 | 391 | 	error = get_block(inode, block, &bh, 0); | 
 | 392 | 	if (!error && (bh.b_size < PAGE_SIZE)) | 
 | 393 | 		error = -EIO;		/* fs corruption? */ | 
 | 394 | 	if (error) | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 395 | 		goto unlock; | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 396 |  | 
 | 397 | 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { | 
 | 398 | 		if (vmf->flags & FAULT_FLAG_WRITE) { | 
 | 399 | 			error = get_block(inode, block, &bh, 1); | 
 | 400 | 			count_vm_event(PGMAJFAULT); | 
 | 401 | 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | 
 | 402 | 			major = VM_FAULT_MAJOR; | 
 | 403 | 			if (!error && (bh.b_size < PAGE_SIZE)) | 
 | 404 | 				error = -EIO; | 
 | 405 | 			if (error) | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 406 | 				goto unlock; | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 407 | 		} else { | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 408 | 			i_mmap_unlock_write(mapping); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 409 | 			return dax_load_hole(mapping, page, vmf); | 
 | 410 | 		} | 
 | 411 | 	} | 
 | 412 |  | 
 | 413 | 	if (vmf->cow_page) { | 
 | 414 | 		struct page *new_page = vmf->cow_page; | 
 | 415 | 		if (buffer_written(&bh)) | 
 | 416 | 			error = copy_user_bh(new_page, &bh, blkbits, vaddr); | 
 | 417 | 		else | 
 | 418 | 			clear_user_highpage(new_page, vaddr); | 
 | 419 | 		if (error) | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 420 | 			goto unlock; | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 421 | 		vmf->page = page; | 
 | 422 | 		if (!page) { | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 423 | 			/* Check we didn't race with truncate */ | 
 | 424 | 			size = (i_size_read(inode) + PAGE_SIZE - 1) >> | 
 | 425 | 								PAGE_SHIFT; | 
 | 426 | 			if (vmf->pgoff >= size) { | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 427 | 				error = -EIO; | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 428 | 				goto unlock; | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 429 | 			} | 
 | 430 | 		} | 
 | 431 | 		return VM_FAULT_LOCKED; | 
 | 432 | 	} | 
 | 433 |  | 
 | 434 | 	/* Check we didn't race with a read fault installing a new page */ | 
 | 435 | 	if (!page && major) | 
 | 436 | 		page = find_lock_page(mapping, vmf->pgoff); | 
 | 437 |  | 
 | 438 | 	if (page) { | 
 | 439 | 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, | 
 | 440 | 							PAGE_CACHE_SIZE, 0); | 
 | 441 | 		delete_from_page_cache(page); | 
 | 442 | 		unlock_page(page); | 
 | 443 | 		page_cache_release(page); | 
 | 444 | 	} | 
 | 445 |  | 
| Dave Chinner | e842f29 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 446 | 	/* | 
 | 447 | 	 * If we successfully insert the new mapping over an unwritten extent, | 
 | 448 | 	 * we need to ensure we convert the unwritten extent. If there is an | 
 | 449 | 	 * error inserting the mapping, the filesystem needs to leave it as | 
 | 450 | 	 * unwritten to prevent exposure of the stale underlying data to | 
 | 451 | 	 * userspace, but we still need to call the completion function so | 
 | 452 | 	 * the private resources on the mapping buffer can be released. We | 
 | 453 | 	 * indicate what the callback should do via the uptodate variable, same | 
 | 454 | 	 * as for normal BH based IO completions. | 
 | 455 | 	 */ | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 456 | 	error = dax_insert_mapping(inode, &bh, vma, vmf); | 
| Dave Chinner | b2442c5 | 2015-07-29 11:48:00 +1000 | [diff] [blame] | 457 | 	if (buffer_unwritten(&bh)) { | 
 | 458 | 		if (complete_unwritten) | 
 | 459 | 			complete_unwritten(&bh, !error); | 
 | 460 | 		else | 
 | 461 | 			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); | 
 | 462 | 	} | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 463 |  | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 464 | 	if (!page) | 
 | 465 | 		i_mmap_unlock_write(mapping); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 466 |  out: | 
 | 467 | 	if (error == -ENOMEM) | 
 | 468 | 		return VM_FAULT_OOM | major; | 
 | 469 | 	/* -EBUSY is fine, somebody else faulted on the same PTE */ | 
 | 470 | 	if ((error < 0) && (error != -EBUSY)) | 
 | 471 | 		return VM_FAULT_SIGBUS | major; | 
 | 472 | 	return VM_FAULT_NOPAGE | major; | 
 | 473 |  | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 474 |  unlock: | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 475 | 	if (page) { | 
 | 476 | 		unlock_page(page); | 
 | 477 | 		page_cache_release(page); | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 478 | 	} else { | 
 | 479 | 		i_mmap_unlock_write(mapping); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 480 | 	} | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 481 |  | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 482 | 	goto out; | 
 | 483 | } | 
| Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 484 | EXPORT_SYMBOL(__dax_fault); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 485 |  | 
 | 486 | /** | 
 | 487 |  * dax_fault - handle a page fault on a DAX file | 
 | 488 |  * @vma: The virtual memory area where the fault occurred | 
 | 489 |  * @vmf: The description of the fault | 
 | 490 |  * @get_block: The filesystem method used to translate file offsets to blocks | 
 | 491 |  * | 
 | 492 |  * When a page fault occurs, filesystems may call this helper in their | 
 | 493 |  * fault handler for DAX files. | 
 | 494 |  */ | 
 | 495 | int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | 
| Dave Chinner | e842f29 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 496 | 	      get_block_t get_block, dax_iodone_t complete_unwritten) | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 497 | { | 
 | 498 | 	int result; | 
 | 499 | 	struct super_block *sb = file_inode(vma->vm_file)->i_sb; | 
 | 500 |  | 
 | 501 | 	if (vmf->flags & FAULT_FLAG_WRITE) { | 
 | 502 | 		sb_start_pagefault(sb); | 
 | 503 | 		file_update_time(vma->vm_file); | 
 | 504 | 	} | 
| Dave Chinner | ce5c5d5 | 2015-06-04 09:18:18 +1000 | [diff] [blame] | 505 | 	result = __dax_fault(vma, vmf, get_block, complete_unwritten); | 
| Matthew Wilcox | f7ca90b | 2015-02-16 15:59:02 -0800 | [diff] [blame] | 506 | 	if (vmf->flags & FAULT_FLAG_WRITE) | 
 | 507 | 		sb_end_pagefault(sb); | 
 | 508 |  | 
 | 509 | 	return result; | 
 | 510 | } | 
 | 511 | EXPORT_SYMBOL_GPL(dax_fault); | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 512 |  | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 513 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
 | 514 | /* | 
 | 515 |  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up | 
 | 516 |  * more often than one might expect in the below function. | 
 | 517 |  */ | 
 | 518 | #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1) | 
 | 519 |  | 
 | 520 | int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | 
 | 521 | 		pmd_t *pmd, unsigned int flags, get_block_t get_block, | 
 | 522 | 		dax_iodone_t complete_unwritten) | 
 | 523 | { | 
 | 524 | 	struct file *file = vma->vm_file; | 
 | 525 | 	struct address_space *mapping = file->f_mapping; | 
 | 526 | 	struct inode *inode = mapping->host; | 
 | 527 | 	struct buffer_head bh; | 
 | 528 | 	unsigned blkbits = inode->i_blkbits; | 
 | 529 | 	unsigned long pmd_addr = address & PMD_MASK; | 
 | 530 | 	bool write = flags & FAULT_FLAG_WRITE; | 
 | 531 | 	long length; | 
| Ross Zwisler | d77e92e | 2015-09-09 10:29:40 -0600 | [diff] [blame] | 532 | 	void __pmem *kaddr; | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 533 | 	pgoff_t size, pgoff; | 
 | 534 | 	sector_t block, sector; | 
 | 535 | 	unsigned long pfn; | 
 | 536 | 	int result = 0; | 
 | 537 |  | 
 | 538 | 	/* Fall back to PTEs if we're going to COW */ | 
 | 539 | 	if (write && !(vma->vm_flags & VM_SHARED)) | 
 | 540 | 		return VM_FAULT_FALLBACK; | 
 | 541 | 	/* If the PMD would extend outside the VMA */ | 
 | 542 | 	if (pmd_addr < vma->vm_start) | 
 | 543 | 		return VM_FAULT_FALLBACK; | 
 | 544 | 	if ((pmd_addr + PMD_SIZE) > vma->vm_end) | 
 | 545 | 		return VM_FAULT_FALLBACK; | 
 | 546 |  | 
| Matthew Wilcox | 3fdd1b47 | 2015-09-08 14:59:39 -0700 | [diff] [blame] | 547 | 	pgoff = linear_page_index(vma, pmd_addr); | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 548 | 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 549 | 	if (pgoff >= size) | 
 | 550 | 		return VM_FAULT_SIGBUS; | 
 | 551 | 	/* If the PMD would cover blocks out of the file */ | 
 | 552 | 	if ((pgoff | PG_PMD_COLOUR) >= size) | 
 | 553 | 		return VM_FAULT_FALLBACK; | 
 | 554 |  | 
 | 555 | 	memset(&bh, 0, sizeof(bh)); | 
 | 556 | 	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); | 
 | 557 |  | 
 | 558 | 	bh.b_size = PMD_SIZE; | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 559 | 	i_mmap_lock_write(mapping); | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 560 | 	length = get_block(inode, block, &bh, write); | 
 | 561 | 	if (length) | 
 | 562 | 		return VM_FAULT_SIGBUS; | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 563 |  | 
 | 564 | 	/* | 
 | 565 | 	 * If the filesystem isn't willing to tell us the length of a hole, | 
 | 566 | 	 * just fall back to PTEs.  Calling get_block 512 times in a loop | 
 | 567 | 	 * would be silly. | 
 | 568 | 	 */ | 
 | 569 | 	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) | 
 | 570 | 		goto fallback; | 
 | 571 |  | 
| Ross Zwisler | 8346c41 | 2015-10-01 15:36:59 -0700 | [diff] [blame] | 572 | 	sector = bh.b_blocknr << (blkbits - 9); | 
 | 573 |  | 
| Kirill A. Shutemov | 46c043e | 2015-09-08 14:59:42 -0700 | [diff] [blame] | 574 | 	if (buffer_unwritten(&bh) || buffer_new(&bh)) { | 
 | 575 | 		int i; | 
| Ross Zwisler | 8346c41 | 2015-10-01 15:36:59 -0700 | [diff] [blame] | 576 |  | 
 | 577 | 		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, | 
 | 578 | 						bh.b_size); | 
 | 579 | 		if (length < 0) { | 
 | 580 | 			result = VM_FAULT_SIGBUS; | 
 | 581 | 			goto out; | 
 | 582 | 		} | 
 | 583 | 		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) | 
 | 584 | 			goto fallback; | 
 | 585 |  | 
| Kirill A. Shutemov | 46c043e | 2015-09-08 14:59:42 -0700 | [diff] [blame] | 586 | 		for (i = 0; i < PTRS_PER_PMD; i++) | 
| Ross Zwisler | d77e92e | 2015-09-09 10:29:40 -0600 | [diff] [blame] | 587 | 			clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); | 
 | 588 | 		wmb_pmem(); | 
| Kirill A. Shutemov | 46c043e | 2015-09-08 14:59:42 -0700 | [diff] [blame] | 589 | 		count_vm_event(PGMAJFAULT); | 
 | 590 | 		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | 
 | 591 | 		result |= VM_FAULT_MAJOR; | 
 | 592 | 	} | 
 | 593 |  | 
 | 594 | 	/* | 
 | 595 | 	 * If we allocated new storage, make sure no process has any | 
 | 596 | 	 * zero pages covering this hole | 
 | 597 | 	 */ | 
 | 598 | 	if (buffer_new(&bh)) { | 
 | 599 | 		i_mmap_unlock_write(mapping); | 
 | 600 | 		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); | 
 | 601 | 		i_mmap_lock_write(mapping); | 
 | 602 | 	} | 
 | 603 |  | 
| Matthew Wilcox | 84c4e5e | 2015-09-08 14:59:17 -0700 | [diff] [blame] | 604 | 	/* | 
 | 605 | 	 * If a truncate happened while we were allocating blocks, we may | 
 | 606 | 	 * leave blocks allocated to the file that are beyond EOF.  We can't | 
 | 607 | 	 * take i_mutex here, so just leave them hanging; they'll be freed | 
 | 608 | 	 * when the file is deleted. | 
 | 609 | 	 */ | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 610 | 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 611 | 	if (pgoff >= size) { | 
 | 612 | 		result = VM_FAULT_SIGBUS; | 
 | 613 | 		goto out; | 
 | 614 | 	} | 
 | 615 | 	if ((pgoff | PG_PMD_COLOUR) >= size) | 
 | 616 | 		goto fallback; | 
 | 617 |  | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 618 | 	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 619 | 		spinlock_t *ptl; | 
| Kirill A. Shutemov | d295e34 | 2015-09-08 14:59:34 -0700 | [diff] [blame] | 620 | 		pmd_t entry; | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 621 | 		struct page *zero_page = get_huge_zero_page(); | 
| Kirill A. Shutemov | d295e34 | 2015-09-08 14:59:34 -0700 | [diff] [blame] | 622 |  | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 623 | 		if (unlikely(!zero_page)) | 
 | 624 | 			goto fallback; | 
 | 625 |  | 
| Kirill A. Shutemov | d295e34 | 2015-09-08 14:59:34 -0700 | [diff] [blame] | 626 | 		ptl = pmd_lock(vma->vm_mm, pmd); | 
 | 627 | 		if (!pmd_none(*pmd)) { | 
 | 628 | 			spin_unlock(ptl); | 
 | 629 | 			goto fallback; | 
 | 630 | 		} | 
 | 631 |  | 
 | 632 | 		entry = mk_pmd(zero_page, vma->vm_page_prot); | 
 | 633 | 		entry = pmd_mkhuge(entry); | 
 | 634 | 		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 635 | 		result = VM_FAULT_NOPAGE; | 
| Kirill A. Shutemov | d295e34 | 2015-09-08 14:59:34 -0700 | [diff] [blame] | 636 | 		spin_unlock(ptl); | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 637 | 	} else { | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 638 | 		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, | 
 | 639 | 						bh.b_size); | 
 | 640 | 		if (length < 0) { | 
 | 641 | 			result = VM_FAULT_SIGBUS; | 
 | 642 | 			goto out; | 
 | 643 | 		} | 
 | 644 | 		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) | 
 | 645 | 			goto fallback; | 
 | 646 |  | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 647 | 		result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); | 
 | 648 | 	} | 
 | 649 |  | 
 | 650 |  out: | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 651 | 	if (buffer_unwritten(&bh)) | 
 | 652 | 		complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); | 
 | 653 |  | 
| Matthew Wilcox | 8431729 | 2015-09-08 14:59:25 -0700 | [diff] [blame] | 654 | 	i_mmap_unlock_write(mapping); | 
 | 655 |  | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 656 | 	return result; | 
 | 657 |  | 
 | 658 |  fallback: | 
 | 659 | 	count_vm_event(THP_FAULT_FALLBACK); | 
 | 660 | 	result = VM_FAULT_FALLBACK; | 
 | 661 | 	goto out; | 
 | 662 | } | 
 | 663 | EXPORT_SYMBOL_GPL(__dax_pmd_fault); | 
 | 664 |  | 
 | 665 | /** | 
 | 666 |  * dax_pmd_fault - handle a PMD fault on a DAX file | 
 | 667 |  * @vma: The virtual memory area where the fault occurred | 
 | 668 |  * @vmf: The description of the fault | 
 | 669 |  * @get_block: The filesystem method used to translate file offsets to blocks | 
 | 670 |  * | 
 | 671 |  * When a page fault occurs, filesystems may call this helper in their | 
 | 672 |  * pmd_fault handler for DAX files. | 
 | 673 |  */ | 
 | 674 | int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | 
 | 675 | 			pmd_t *pmd, unsigned int flags, get_block_t get_block, | 
 | 676 | 			dax_iodone_t complete_unwritten) | 
 | 677 | { | 
 | 678 | 	int result; | 
 | 679 | 	struct super_block *sb = file_inode(vma->vm_file)->i_sb; | 
 | 680 |  | 
 | 681 | 	if (flags & FAULT_FLAG_WRITE) { | 
 | 682 | 		sb_start_pagefault(sb); | 
 | 683 | 		file_update_time(vma->vm_file); | 
 | 684 | 	} | 
 | 685 | 	result = __dax_pmd_fault(vma, address, pmd, flags, get_block, | 
 | 686 | 				complete_unwritten); | 
 | 687 | 	if (flags & FAULT_FLAG_WRITE) | 
 | 688 | 		sb_end_pagefault(sb); | 
 | 689 |  | 
 | 690 | 	return result; | 
 | 691 | } | 
 | 692 | EXPORT_SYMBOL_GPL(dax_pmd_fault); | 
| Valentin Rothberg | dd8a2b6 | 2015-09-08 14:59:09 -0700 | [diff] [blame] | 693 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 
| Matthew Wilcox | 844f35d | 2015-09-08 14:58:57 -0700 | [diff] [blame] | 694 |  | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 695 | /** | 
| Boaz Harrosh | 0e3b210 | 2015-04-15 16:15:14 -0700 | [diff] [blame] | 696 |  * dax_pfn_mkwrite - handle first write to DAX page | 
 | 697 |  * @vma: The virtual memory area where the fault occurred | 
 | 698 |  * @vmf: The description of the fault | 
 | 699 |  * | 
 | 700 |  */ | 
 | 701 | int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 
 | 702 | { | 
 | 703 | 	struct super_block *sb = file_inode(vma->vm_file)->i_sb; | 
 | 704 |  | 
 | 705 | 	sb_start_pagefault(sb); | 
 | 706 | 	file_update_time(vma->vm_file); | 
 | 707 | 	sb_end_pagefault(sb); | 
 | 708 | 	return VM_FAULT_NOPAGE; | 
 | 709 | } | 
 | 710 | EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); | 
 | 711 |  | 
 | 712 | /** | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 713 |  * dax_zero_page_range - zero a range within a page of a DAX file | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 714 |  * @inode: The file being truncated | 
 | 715 |  * @from: The file offset that is being truncated to | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 716 |  * @length: The number of bytes to zero | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 717 |  * @get_block: The filesystem method used to translate file offsets to blocks | 
 | 718 |  * | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 719 |  * This function can be called by a filesystem when it is zeroing part of a | 
 | 720 |  * page in a DAX file.  This is intended for hole-punch operations.  If | 
 | 721 |  * you are truncating a file, the helper function dax_truncate_page() may be | 
 | 722 |  * more convenient. | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 723 |  * | 
 | 724 |  * We work in terms of PAGE_CACHE_SIZE here for commonality with | 
 | 725 |  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem | 
 | 726 |  * took care of disposing of the unnecessary blocks.  Even if the filesystem | 
 | 727 |  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 728 |  * since the file might be mmapped. | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 729 |  */ | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 730 | int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, | 
 | 731 | 							get_block_t get_block) | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 732 | { | 
 | 733 | 	struct buffer_head bh; | 
 | 734 | 	pgoff_t index = from >> PAGE_CACHE_SHIFT; | 
 | 735 | 	unsigned offset = from & (PAGE_CACHE_SIZE-1); | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 736 | 	int err; | 
 | 737 |  | 
 | 738 | 	/* Block boundary? Nothing to do */ | 
 | 739 | 	if (!length) | 
 | 740 | 		return 0; | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 741 | 	BUG_ON((offset + length) > PAGE_CACHE_SIZE); | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 742 |  | 
 | 743 | 	memset(&bh, 0, sizeof(bh)); | 
 | 744 | 	bh.b_size = PAGE_CACHE_SIZE; | 
 | 745 | 	err = get_block(inode, index, &bh, 0); | 
 | 746 | 	if (err < 0) | 
 | 747 | 		return err; | 
 | 748 | 	if (buffer_written(&bh)) { | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 749 | 		void __pmem *addr; | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 750 | 		err = dax_get_addr(&bh, &addr, inode->i_blkbits); | 
 | 751 | 		if (err < 0) | 
 | 752 | 			return err; | 
| Ross Zwisler | e2e0539 | 2015-08-18 13:55:41 -0600 | [diff] [blame] | 753 | 		clear_pmem(addr + offset, length); | 
| Ross Zwisler | 2765cfb | 2015-08-18 13:55:40 -0600 | [diff] [blame] | 754 | 		wmb_pmem(); | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 755 | 	} | 
 | 756 |  | 
 | 757 | 	return 0; | 
 | 758 | } | 
| Matthew Wilcox | 25726bc | 2015-02-16 15:59:35 -0800 | [diff] [blame] | 759 | EXPORT_SYMBOL_GPL(dax_zero_page_range); | 
 | 760 |  | 
 | 761 | /** | 
 | 762 |  * dax_truncate_page - handle a partial page being truncated in a DAX file | 
 | 763 |  * @inode: The file being truncated | 
 | 764 |  * @from: The file offset that is being truncated to | 
 | 765 |  * @get_block: The filesystem method used to translate file offsets to blocks | 
 | 766 |  * | 
 | 767 |  * Similar to block_truncate_page(), this function can be called by a | 
 | 768 |  * filesystem when it is truncating a DAX file to handle the partial page. | 
 | 769 |  * | 
 | 770 |  * We work in terms of PAGE_CACHE_SIZE here for commonality with | 
 | 771 |  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem | 
 | 772 |  * took care of disposing of the unnecessary blocks.  Even if the filesystem | 
 | 773 |  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page | 
 | 774 |  * since the file might be mmapped. | 
 | 775 |  */ | 
 | 776 | int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) | 
 | 777 | { | 
 | 778 | 	unsigned length = PAGE_CACHE_ALIGN(from) - from; | 
 | 779 | 	return dax_zero_page_range(inode, from, length, get_block); | 
 | 780 | } | 
| Matthew Wilcox | 4c0ccfe | 2015-02-16 15:59:06 -0800 | [diff] [blame] | 781 | EXPORT_SYMBOL_GPL(dax_truncate_page); |