blob: 432b9e6dd63b90b7a98a4820f1c45e257d007517 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060028#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080029#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080034
Jan Karae8043152016-05-12 18:29:16 +020035/*
36 * We use lowest available bit in exceptional entry for locking, other two
37 * bits to determine entry type. In total 3 special bits.
38 */
39#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
40#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
41#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
42#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
43#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
NeilBrown78a9be02016-05-20 17:03:51 -070044#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
45#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
Jan Karae8043152016-05-12 18:29:16 +020046 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
47 RADIX_TREE_EXCEPTIONAL_ENTRY))
NeilBrowne4b27492016-05-11 11:58:47 +020048
Jan Karaac401cc2016-05-12 18:29:18 +020049/* We choose 4096 entries - same as per-zone page wait tables */
50#define DAX_WAIT_TABLE_BITS 12
51#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
52
53wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
54
55static int __init init_dax_wait_table(void)
56{
57 int i;
58
59 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
60 init_waitqueue_head(wait_table + i);
61 return 0;
62}
63fs_initcall(init_dax_wait_table);
64
65static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
66 pgoff_t index)
67{
68 unsigned long hash = hash_long((unsigned long)mapping ^ index,
69 DAX_WAIT_TABLE_BITS);
70 return wait_table + hash;
71}
NeilBrown78a9be02016-05-20 17:03:51 -070072
Dan Williamsb2e0d162016-01-15 16:55:59 -080073static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
74{
75 struct request_queue *q = bdev->bd_queue;
76 long rc = -EIO;
77
78 dax->addr = (void __pmem *) ERR_PTR(-EIO);
79 if (blk_queue_enter(q, true) != 0)
80 return rc;
81
82 rc = bdev_direct_access(bdev, dax);
83 if (rc < 0) {
84 dax->addr = (void __pmem *) ERR_PTR(rc);
85 blk_queue_exit(q);
86 return rc;
87 }
88 return rc;
89}
90
91static void dax_unmap_atomic(struct block_device *bdev,
92 const struct blk_dax_ctl *dax)
93{
94 if (IS_ERR(dax->addr))
95 return;
96 blk_queue_exit(bdev->bd_queue);
97}
98
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080099struct page *read_dax_sector(struct block_device *bdev, sector_t n)
100{
101 struct page *page = alloc_pages(GFP_KERNEL, 0);
102 struct blk_dax_ctl dax = {
103 .size = PAGE_SIZE,
104 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
105 };
106 long rc;
107
108 if (!page)
109 return ERR_PTR(-ENOMEM);
110
111 rc = dax_map_atomic(bdev, &dax);
112 if (rc < 0)
113 return ERR_PTR(rc);
114 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
115 dax_unmap_atomic(bdev, &dax);
116 return page;
117}
118
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800119static bool buffer_written(struct buffer_head *bh)
120{
121 return buffer_mapped(bh) && !buffer_unwritten(bh);
122}
123
124/*
125 * When ext4 encounters a hole, it returns without modifying the buffer_head
126 * which means that we can't trust b_size. To cope with this, we set b_state
127 * to 0 before calling get_block and, if any bit is set, we know we can trust
128 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
129 * and would save us time calling get_block repeatedly.
130 */
131static bool buffer_size_valid(struct buffer_head *bh)
132{
133 return bh->b_state != 0;
134}
135
Dan Williamsb2e0d162016-01-15 16:55:59 -0800136
137static sector_t to_sector(const struct buffer_head *bh,
138 const struct inode *inode)
139{
140 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
141
142 return sector;
143}
144
Omar Sandovala95cd632015-03-16 04:33:51 -0700145static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
146 loff_t start, loff_t end, get_block_t get_block,
147 struct buffer_head *bh)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800148{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800149 loff_t pos = start, max = start, bh_max = start;
150 bool hole = false, need_wmb = false;
151 struct block_device *bdev = NULL;
152 int rw = iov_iter_rw(iter), rc;
153 long map_len = 0;
154 struct blk_dax_ctl dax = {
155 .addr = (void __pmem *) ERR_PTR(-EIO),
156 };
Jan Kara069c77b2016-05-11 11:58:51 +0200157 unsigned blkbits = inode->i_blkbits;
158 sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
159 >> blkbits;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800160
Dan Williamsb2e0d162016-01-15 16:55:59 -0800161 if (rw == READ)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800162 end = min(end, i_size_read(inode));
163
164 while (pos < end) {
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600165 size_t len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800166 if (pos == max) {
Jeff Moyere94f5a22015-08-14 16:15:31 -0400167 long page = pos >> PAGE_SHIFT;
168 sector_t block = page << (PAGE_SHIFT - blkbits);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800169 unsigned first = pos - (block << blkbits);
170 long size;
171
172 if (pos == bh_max) {
173 bh->b_size = PAGE_ALIGN(end - pos);
174 bh->b_state = 0;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800175 rc = get_block(inode, block, bh, rw == WRITE);
176 if (rc)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800177 break;
178 if (!buffer_size_valid(bh))
179 bh->b_size = 1 << blkbits;
180 bh_max = pos - first + bh->b_size;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800181 bdev = bh->b_bdev;
Jan Kara069c77b2016-05-11 11:58:51 +0200182 /*
183 * We allow uninitialized buffers for writes
184 * beyond EOF as those cannot race with faults
185 */
186 WARN_ON_ONCE(
187 (buffer_new(bh) && block < file_blks) ||
188 (rw == WRITE && buffer_unwritten(bh)));
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800189 } else {
190 unsigned done = bh->b_size -
191 (bh_max - (pos - first));
192 bh->b_blocknr += done >> blkbits;
193 bh->b_size -= done;
194 }
195
Dan Williamsb2e0d162016-01-15 16:55:59 -0800196 hole = rw == READ && !buffer_written(bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800197 if (hole) {
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800198 size = bh->b_size - first;
199 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800200 dax_unmap_atomic(bdev, &dax);
201 dax.sector = to_sector(bh, inode);
202 dax.size = bh->b_size;
203 map_len = dax_map_atomic(bdev, &dax);
204 if (map_len < 0) {
205 rc = map_len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800206 break;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800207 }
Dan Williamsb2e0d162016-01-15 16:55:59 -0800208 dax.addr += first;
209 size = map_len - first;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800210 }
Eric Sandeen02395432016-06-23 16:54:46 -0500211 /*
212 * pos + size is one past the last offset for IO,
213 * so pos + size can overflow loff_t at extreme offsets.
214 * Cast to u64 to catch this and get the true minimum.
215 */
216 max = min_t(u64, pos + size, end);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800217 }
218
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600219 if (iov_iter_rw(iter) == WRITE) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800220 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600221 need_wmb = true;
222 } else if (!hole)
Dan Williamsb2e0d162016-01-15 16:55:59 -0800223 len = copy_to_iter((void __force *) dax.addr, max - pos,
Ross Zwislere2e05392015-08-18 13:55:41 -0600224 iter);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800225 else
226 len = iov_iter_zero(max - pos, iter);
227
Al Virocadfbb62015-11-10 19:42:49 -0700228 if (!len) {
Dan Williamsb2e0d162016-01-15 16:55:59 -0800229 rc = -EFAULT;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800230 break;
Al Virocadfbb62015-11-10 19:42:49 -0700231 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800232
233 pos += len;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800234 if (!IS_ERR(dax.addr))
235 dax.addr += len;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800236 }
237
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600238 if (need_wmb)
239 wmb_pmem();
Dan Williamsb2e0d162016-01-15 16:55:59 -0800240 dax_unmap_atomic(bdev, &dax);
Ross Zwisler2765cfb2015-08-18 13:55:40 -0600241
Dan Williamsb2e0d162016-01-15 16:55:59 -0800242 return (pos == start) ? rc : pos - start;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800243}
244
245/**
246 * dax_do_io - Perform I/O to a DAX file
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800247 * @iocb: The control block for this I/O
248 * @inode: The file which the I/O is directed at
249 * @iter: The addresses to do I/O from or to
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800250 * @get_block: The filesystem method used to translate file offsets to blocks
251 * @end_io: A filesystem callback for I/O completion
252 * @flags: See below
253 *
254 * This function uses the same locking scheme as do_blockdev_direct_IO:
255 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
256 * caller for writes. For reads, we take and release the i_mutex ourselves.
257 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
258 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
259 * is in progress.
260 */
Omar Sandovala95cd632015-03-16 04:33:51 -0700261ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700262 struct iov_iter *iter, get_block_t get_block,
Omar Sandovala95cd632015-03-16 04:33:51 -0700263 dio_iodone_t end_io, int flags)
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800264{
265 struct buffer_head bh;
266 ssize_t retval = -EINVAL;
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700267 loff_t pos = iocb->ki_pos;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800268 loff_t end = pos + iov_iter_count(iter);
269
270 memset(&bh, 0, sizeof(bh));
Ross Zwislereab95db2016-01-22 15:10:59 -0800271 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800272
Jan Karac3d98e32016-05-11 11:58:52 +0200273 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Al Viro59551022016-01-22 15:40:57 -0500274 inode_lock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800275
276 /* Protects against truncate */
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400277 if (!(flags & DIO_SKIP_DIO_COUNT))
278 inode_dio_begin(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800279
Omar Sandovala95cd632015-03-16 04:33:51 -0700280 retval = dax_io(inode, iter, pos, end, get_block, &bh);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800281
Omar Sandovala95cd632015-03-16 04:33:51 -0700282 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
Al Viro59551022016-01-22 15:40:57 -0500283 inode_unlock(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800284
Christoph Hellwig187372a2016-02-08 14:40:51 +1100285 if (end_io) {
286 int err;
287
288 err = end_io(iocb, pos, retval, bh.b_private);
289 if (err)
290 retval = err;
291 }
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800292
Matthew Wilcoxbbab37d2015-07-03 10:40:42 -0400293 if (!(flags & DIO_SKIP_DIO_COUNT))
294 inode_dio_end(inode);
Matthew Wilcoxd475c632015-02-16 15:58:56 -0800295 return retval;
296}
297EXPORT_SYMBOL_GPL(dax_do_io);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800298
299/*
Jan Karaac401cc2016-05-12 18:29:18 +0200300 * DAX radix tree locking
301 */
302struct exceptional_entry_key {
303 struct address_space *mapping;
304 unsigned long index;
305};
306
307struct wait_exceptional_entry_queue {
308 wait_queue_t wait;
309 struct exceptional_entry_key key;
310};
311
312static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
313 int sync, void *keyp)
314{
315 struct exceptional_entry_key *key = keyp;
316 struct wait_exceptional_entry_queue *ewait =
317 container_of(wait, struct wait_exceptional_entry_queue, wait);
318
319 if (key->mapping != ewait->key.mapping ||
320 key->index != ewait->key.index)
321 return 0;
322 return autoremove_wake_function(wait, mode, sync, NULL);
323}
324
325/*
326 * Check whether the given slot is locked. The function must be called with
327 * mapping->tree_lock held
328 */
329static inline int slot_locked(struct address_space *mapping, void **slot)
330{
331 unsigned long entry = (unsigned long)
332 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
333 return entry & RADIX_DAX_ENTRY_LOCK;
334}
335
336/*
337 * Mark the given slot is locked. The function must be called with
338 * mapping->tree_lock held
339 */
340static inline void *lock_slot(struct address_space *mapping, void **slot)
341{
342 unsigned long entry = (unsigned long)
343 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
344
345 entry |= RADIX_DAX_ENTRY_LOCK;
346 radix_tree_replace_slot(slot, (void *)entry);
347 return (void *)entry;
348}
349
350/*
351 * Mark the given slot is unlocked. The function must be called with
352 * mapping->tree_lock held
353 */
354static inline void *unlock_slot(struct address_space *mapping, void **slot)
355{
356 unsigned long entry = (unsigned long)
357 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
358
359 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
360 radix_tree_replace_slot(slot, (void *)entry);
361 return (void *)entry;
362}
363
364/*
365 * Lookup entry in radix tree, wait for it to become unlocked if it is
366 * exceptional entry and return it. The caller must call
367 * put_unlocked_mapping_entry() when he decided not to lock the entry or
368 * put_locked_mapping_entry() when he locked the entry and now wants to
369 * unlock it.
370 *
371 * The function must be called with mapping->tree_lock held.
372 */
373static void *get_unlocked_mapping_entry(struct address_space *mapping,
374 pgoff_t index, void ***slotp)
375{
376 void *ret, **slot;
377 struct wait_exceptional_entry_queue ewait;
378 wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
379
380 init_wait(&ewait.wait);
381 ewait.wait.func = wake_exceptional_entry_func;
382 ewait.key.mapping = mapping;
383 ewait.key.index = index;
384
385 for (;;) {
386 ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
387 &slot);
388 if (!ret || !radix_tree_exceptional_entry(ret) ||
389 !slot_locked(mapping, slot)) {
390 if (slotp)
391 *slotp = slot;
392 return ret;
393 }
394 prepare_to_wait_exclusive(wq, &ewait.wait,
395 TASK_UNINTERRUPTIBLE);
396 spin_unlock_irq(&mapping->tree_lock);
397 schedule();
398 finish_wait(wq, &ewait.wait);
399 spin_lock_irq(&mapping->tree_lock);
400 }
401}
402
403/*
404 * Find radix tree entry at given index. If it points to a page, return with
405 * the page locked. If it points to the exceptional entry, return with the
406 * radix tree entry locked. If the radix tree doesn't contain given index,
407 * create empty exceptional entry for the index and return with it locked.
408 *
409 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
410 * persistent memory the benefit is doubtful. We can add that later if we can
411 * show it helps.
412 */
413static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
414{
415 void *ret, **slot;
416
417restart:
418 spin_lock_irq(&mapping->tree_lock);
419 ret = get_unlocked_mapping_entry(mapping, index, &slot);
420 /* No entry for given index? Make sure radix tree is big enough. */
421 if (!ret) {
422 int err;
423
424 spin_unlock_irq(&mapping->tree_lock);
425 err = radix_tree_preload(
426 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
427 if (err)
428 return ERR_PTR(err);
429 ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
430 RADIX_DAX_ENTRY_LOCK);
431 spin_lock_irq(&mapping->tree_lock);
432 err = radix_tree_insert(&mapping->page_tree, index, ret);
433 radix_tree_preload_end();
434 if (err) {
435 spin_unlock_irq(&mapping->tree_lock);
436 /* Someone already created the entry? */
437 if (err == -EEXIST)
438 goto restart;
439 return ERR_PTR(err);
440 }
441 /* Good, we have inserted empty locked entry into the tree. */
442 mapping->nrexceptional++;
443 spin_unlock_irq(&mapping->tree_lock);
444 return ret;
445 }
446 /* Normal page in radix tree? */
447 if (!radix_tree_exceptional_entry(ret)) {
448 struct page *page = ret;
449
450 get_page(page);
451 spin_unlock_irq(&mapping->tree_lock);
452 lock_page(page);
453 /* Page got truncated? Retry... */
454 if (unlikely(page->mapping != mapping)) {
455 unlock_page(page);
456 put_page(page);
457 goto restart;
458 }
459 return page;
460 }
461 ret = lock_slot(mapping, slot);
462 spin_unlock_irq(&mapping->tree_lock);
463 return ret;
464}
465
466void dax_wake_mapping_entry_waiter(struct address_space *mapping,
467 pgoff_t index, bool wake_all)
468{
469 wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
470
471 /*
472 * Checking for locked entry and prepare_to_wait_exclusive() happens
473 * under mapping->tree_lock, ditto for entry handling in our callers.
474 * So at this point all tasks that could have seen our entry locked
475 * must be in the waitqueue and the following check will see them.
476 */
477 if (waitqueue_active(wq)) {
478 struct exceptional_entry_key key;
479
480 key.mapping = mapping;
481 key.index = index;
482 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
483 }
484}
485
Jan Karabc2466e2016-05-12 18:29:19 +0200486void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
Jan Karaac401cc2016-05-12 18:29:18 +0200487{
488 void *ret, **slot;
489
490 spin_lock_irq(&mapping->tree_lock);
491 ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
492 if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
493 !slot_locked(mapping, slot))) {
494 spin_unlock_irq(&mapping->tree_lock);
495 return;
496 }
497 unlock_slot(mapping, slot);
498 spin_unlock_irq(&mapping->tree_lock);
499 dax_wake_mapping_entry_waiter(mapping, index, false);
500}
501
502static void put_locked_mapping_entry(struct address_space *mapping,
503 pgoff_t index, void *entry)
504{
505 if (!radix_tree_exceptional_entry(entry)) {
506 unlock_page(entry);
507 put_page(entry);
508 } else {
Jan Karabc2466e2016-05-12 18:29:19 +0200509 dax_unlock_mapping_entry(mapping, index);
Jan Karaac401cc2016-05-12 18:29:18 +0200510 }
511}
512
513/*
514 * Called when we are done with radix tree entry we looked up via
515 * get_unlocked_mapping_entry() and which we didn't lock in the end.
516 */
517static void put_unlocked_mapping_entry(struct address_space *mapping,
518 pgoff_t index, void *entry)
519{
520 if (!radix_tree_exceptional_entry(entry))
521 return;
522
523 /* We have to wake up next waiter for the radix tree entry lock */
524 dax_wake_mapping_entry_waiter(mapping, index, false);
525}
526
527/*
528 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
529 * entry to get unlocked before deleting it.
530 */
531int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
532{
533 void *entry;
534
535 spin_lock_irq(&mapping->tree_lock);
536 entry = get_unlocked_mapping_entry(mapping, index, NULL);
537 /*
538 * This gets called from truncate / punch_hole path. As such, the caller
539 * must hold locks protecting against concurrent modifications of the
540 * radix tree (usually fs-private i_mmap_sem for writing). Since the
541 * caller has seen exceptional entry for this index, we better find it
542 * at that index as well...
543 */
544 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
545 spin_unlock_irq(&mapping->tree_lock);
546 return 0;
547 }
548 radix_tree_delete(&mapping->page_tree, index);
549 mapping->nrexceptional--;
550 spin_unlock_irq(&mapping->tree_lock);
551 dax_wake_mapping_entry_waiter(mapping, index, true);
552
553 return 1;
554}
555
556/*
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800557 * The user has performed a load from a hole in the file. Allocating
558 * a new page in the file would cause excessive storage usage for
559 * workloads with sparse files. We allocate a page cache page instead.
560 * We'll kick it out of the page cache if it's ever written to,
561 * otherwise it will simply fall out of the page cache under memory
562 * pressure without ever having been dirtied.
563 */
Jan Karaac401cc2016-05-12 18:29:18 +0200564static int dax_load_hole(struct address_space *mapping, void *entry,
565 struct vm_fault *vmf)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800566{
Jan Karaac401cc2016-05-12 18:29:18 +0200567 struct page *page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800568
Jan Karaac401cc2016-05-12 18:29:18 +0200569 /* Hole page already exists? Return it... */
570 if (!radix_tree_exceptional_entry(entry)) {
571 vmf->page = entry;
572 return VM_FAULT_LOCKED;
573 }
574
575 /* This will replace locked radix tree entry with a hole page */
576 page = find_or_create_page(mapping, vmf->pgoff,
577 vmf->gfp_mask | __GFP_ZERO);
578 if (!page) {
579 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
580 return VM_FAULT_OOM;
581 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800582 vmf->page = page;
583 return VM_FAULT_LOCKED;
584}
585
Dan Williamsb2e0d162016-01-15 16:55:59 -0800586static int copy_user_bh(struct page *to, struct inode *inode,
587 struct buffer_head *bh, unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800588{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800589 struct blk_dax_ctl dax = {
590 .sector = to_sector(bh, inode),
591 .size = bh->b_size,
592 };
593 struct block_device *bdev = bh->b_bdev;
Ross Zwislere2e05392015-08-18 13:55:41 -0600594 void *vto;
595
Dan Williamsb2e0d162016-01-15 16:55:59 -0800596 if (dax_map_atomic(bdev, &dax) < 0)
597 return PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800598 vto = kmap_atomic(to);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800599 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800600 kunmap_atomic(vto);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800601 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800602 return 0;
603}
604
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300605#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
Ross Zwisler9973c982016-01-22 15:10:47 -0800606
Jan Karaac401cc2016-05-12 18:29:18 +0200607static void *dax_insert_mapping_entry(struct address_space *mapping,
608 struct vm_fault *vmf,
609 void *entry, sector_t sector)
Ross Zwisler9973c982016-01-22 15:10:47 -0800610{
611 struct radix_tree_root *page_tree = &mapping->page_tree;
Jan Karaac401cc2016-05-12 18:29:18 +0200612 int error = 0;
613 bool hole_fill = false;
614 void *new_entry;
615 pgoff_t index = vmf->pgoff;
Ross Zwisler9973c982016-01-22 15:10:47 -0800616
Jan Karaac401cc2016-05-12 18:29:18 +0200617 if (vmf->flags & FAULT_FLAG_WRITE)
Dmitry Monakhovd2b2a282016-02-05 15:36:55 -0800618 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Ross Zwisler9973c982016-01-22 15:10:47 -0800619
Jan Karaac401cc2016-05-12 18:29:18 +0200620 /* Replacing hole page with block mapping? */
621 if (!radix_tree_exceptional_entry(entry)) {
622 hole_fill = true;
623 /*
624 * Unmap the page now before we remove it from page cache below.
625 * The page is locked so it cannot be faulted in again.
626 */
627 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
628 PAGE_SIZE, 0);
629 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
630 if (error)
631 return ERR_PTR(error);
Ross Zwisler9973c982016-01-22 15:10:47 -0800632 }
633
Jan Karaac401cc2016-05-12 18:29:18 +0200634 spin_lock_irq(&mapping->tree_lock);
635 new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
636 RADIX_DAX_ENTRY_LOCK);
637 if (hole_fill) {
638 __delete_from_page_cache(entry, NULL);
639 /* Drop pagecache reference */
640 put_page(entry);
641 error = radix_tree_insert(page_tree, index, new_entry);
642 if (error) {
643 new_entry = ERR_PTR(error);
Ross Zwisler9973c982016-01-22 15:10:47 -0800644 goto unlock;
645 }
Jan Karaac401cc2016-05-12 18:29:18 +0200646 mapping->nrexceptional++;
647 } else {
648 void **slot;
649 void *ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800650
Jan Karaac401cc2016-05-12 18:29:18 +0200651 ret = __radix_tree_lookup(page_tree, index, NULL, &slot);
652 WARN_ON_ONCE(ret != entry);
653 radix_tree_replace_slot(slot, new_entry);
Ross Zwisler9973c982016-01-22 15:10:47 -0800654 }
Jan Karaac401cc2016-05-12 18:29:18 +0200655 if (vmf->flags & FAULT_FLAG_WRITE)
Ross Zwisler9973c982016-01-22 15:10:47 -0800656 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
657 unlock:
658 spin_unlock_irq(&mapping->tree_lock);
Jan Karaac401cc2016-05-12 18:29:18 +0200659 if (hole_fill) {
660 radix_tree_preload_end();
661 /*
662 * We don't need hole page anymore, it has been replaced with
663 * locked radix tree entry now.
664 */
665 if (mapping->a_ops->freepage)
666 mapping->a_ops->freepage(entry);
667 unlock_page(entry);
668 put_page(entry);
669 }
670 return new_entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800671}
672
673static int dax_writeback_one(struct block_device *bdev,
674 struct address_space *mapping, pgoff_t index, void *entry)
675{
676 struct radix_tree_root *page_tree = &mapping->page_tree;
677 int type = RADIX_DAX_TYPE(entry);
678 struct radix_tree_node *node;
679 struct blk_dax_ctl dax;
680 void **slot;
681 int ret = 0;
682
683 spin_lock_irq(&mapping->tree_lock);
684 /*
685 * Regular page slots are stabilized by the page lock even
686 * without the tree itself locked. These unlocked entries
687 * need verification under the tree lock.
688 */
689 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
690 goto unlock;
691 if (*slot != entry)
692 goto unlock;
693
694 /* another fsync thread may have already written back this entry */
695 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
696 goto unlock;
697
698 if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
699 ret = -EIO;
700 goto unlock;
701 }
702
703 dax.sector = RADIX_DAX_SECTOR(entry);
704 dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
705 spin_unlock_irq(&mapping->tree_lock);
706
707 /*
708 * We cannot hold tree_lock while calling dax_map_atomic() because it
709 * eventually calls cond_resched().
710 */
711 ret = dax_map_atomic(bdev, &dax);
712 if (ret < 0)
713 return ret;
714
715 if (WARN_ON_ONCE(ret < dax.size)) {
716 ret = -EIO;
717 goto unmap;
718 }
719
720 wb_cache_pmem(dax.addr, dax.size);
721
722 spin_lock_irq(&mapping->tree_lock);
723 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
724 spin_unlock_irq(&mapping->tree_lock);
725 unmap:
726 dax_unmap_atomic(bdev, &dax);
727 return ret;
728
729 unlock:
730 spin_unlock_irq(&mapping->tree_lock);
731 return ret;
732}
733
734/*
735 * Flush the mapping to the persistent domain within the byte range of [start,
736 * end]. This is required by data integrity operations to ensure file data is
737 * on persistent storage prior to completion of the operation.
738 */
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800739int dax_writeback_mapping_range(struct address_space *mapping,
740 struct block_device *bdev, struct writeback_control *wbc)
Ross Zwisler9973c982016-01-22 15:10:47 -0800741{
742 struct inode *inode = mapping->host;
Ross Zwisler9973c982016-01-22 15:10:47 -0800743 pgoff_t start_index, end_index, pmd_index;
744 pgoff_t indices[PAGEVEC_SIZE];
745 struct pagevec pvec;
746 bool done = false;
747 int i, ret = 0;
748 void *entry;
749
750 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
751 return -EIO;
752
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800753 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
754 return 0;
755
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300756 start_index = wbc->range_start >> PAGE_SHIFT;
757 end_index = wbc->range_end >> PAGE_SHIFT;
Ross Zwisler9973c982016-01-22 15:10:47 -0800758 pmd_index = DAX_PMD_INDEX(start_index);
759
760 rcu_read_lock();
761 entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
762 rcu_read_unlock();
763
764 /* see if the start of our range is covered by a PMD entry */
765 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
766 start_index = pmd_index;
767
768 tag_pages_for_writeback(mapping, start_index, end_index);
769
770 pagevec_init(&pvec, 0);
771 while (!done) {
772 pvec.nr = find_get_entries_tag(mapping, start_index,
773 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
774 pvec.pages, indices);
775
776 if (pvec.nr == 0)
777 break;
778
779 for (i = 0; i < pvec.nr; i++) {
780 if (indices[i] > end_index) {
781 done = true;
782 break;
783 }
784
785 ret = dax_writeback_one(bdev, mapping, indices[i],
786 pvec.pages[i]);
787 if (ret < 0)
788 return ret;
789 }
790 }
791 wmb_pmem();
792 return 0;
793}
794EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
795
Jan Karaac401cc2016-05-12 18:29:18 +0200796static int dax_insert_mapping(struct address_space *mapping,
797 struct buffer_head *bh, void **entryp,
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800798 struct vm_area_struct *vma, struct vm_fault *vmf)
799{
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800800 unsigned long vaddr = (unsigned long)vmf->virtual_address;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800801 struct block_device *bdev = bh->b_bdev;
802 struct blk_dax_ctl dax = {
Jan Karaac401cc2016-05-12 18:29:18 +0200803 .sector = to_sector(bh, mapping->host),
Dan Williamsb2e0d162016-01-15 16:55:59 -0800804 .size = bh->b_size,
805 };
Jan Karaac401cc2016-05-12 18:29:18 +0200806 void *ret;
807 void *entry = *entryp;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800808
Jan Kara4d9a2c82016-05-12 18:29:20 +0200809 if (dax_map_atomic(bdev, &dax) < 0)
810 return PTR_ERR(dax.addr);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800811 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800812
Jan Karaac401cc2016-05-12 18:29:18 +0200813 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
Jan Kara4d9a2c82016-05-12 18:29:20 +0200814 if (IS_ERR(ret))
815 return PTR_ERR(ret);
Jan Karaac401cc2016-05-12 18:29:18 +0200816 *entryp = ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800817
Jan Kara4d9a2c82016-05-12 18:29:20 +0200818 return vm_insert_mixed(vma, vaddr, dax.pfn);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800819}
820
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000821/**
Ross Zwisler6b524992016-07-26 15:21:05 -0700822 * dax_fault - handle a page fault on a DAX file
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000823 * @vma: The virtual memory area where the fault occurred
824 * @vmf: The description of the fault
825 * @get_block: The filesystem method used to translate file offsets to blocks
826 *
827 * When a page fault occurs, filesystems may call this helper in their
Ross Zwisler6b524992016-07-26 15:21:05 -0700828 * fault handler for DAX files. dax_fault() assumes the caller has done all
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000829 * the necessary locking for the page fault to proceed successfully.
830 */
Ross Zwisler6b524992016-07-26 15:21:05 -0700831int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Jan Kara02fbd132016-05-11 11:58:48 +0200832 get_block_t get_block)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800833{
834 struct file *file = vma->vm_file;
835 struct address_space *mapping = file->f_mapping;
836 struct inode *inode = mapping->host;
Jan Karaac401cc2016-05-12 18:29:18 +0200837 void *entry;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800838 struct buffer_head bh;
839 unsigned long vaddr = (unsigned long)vmf->virtual_address;
840 unsigned blkbits = inode->i_blkbits;
841 sector_t block;
842 pgoff_t size;
843 int error;
844 int major = 0;
845
Jan Karaac401cc2016-05-12 18:29:18 +0200846 /*
847 * Check whether offset isn't beyond end of file now. Caller is supposed
848 * to hold locks serializing us with truncate / punch hole so this is
849 * a reliable test.
850 */
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800851 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
852 if (vmf->pgoff >= size)
853 return VM_FAULT_SIGBUS;
854
855 memset(&bh, 0, sizeof(bh));
856 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
Ross Zwislereab95db2016-01-22 15:10:59 -0800857 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800858 bh.b_size = PAGE_SIZE;
859
Jan Karaac401cc2016-05-12 18:29:18 +0200860 entry = grab_mapping_entry(mapping, vmf->pgoff);
861 if (IS_ERR(entry)) {
862 error = PTR_ERR(entry);
863 goto out;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800864 }
865
866 error = get_block(inode, block, &bh, 0);
867 if (!error && (bh.b_size < PAGE_SIZE))
868 error = -EIO; /* fs corruption? */
869 if (error)
Jan Karaac401cc2016-05-12 18:29:18 +0200870 goto unlock_entry;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800871
872 if (vmf->cow_page) {
873 struct page *new_page = vmf->cow_page;
874 if (buffer_written(&bh))
Dan Williamsb2e0d162016-01-15 16:55:59 -0800875 error = copy_user_bh(new_page, inode, &bh, vaddr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800876 else
877 clear_user_highpage(new_page, vaddr);
878 if (error)
Jan Karaac401cc2016-05-12 18:29:18 +0200879 goto unlock_entry;
880 if (!radix_tree_exceptional_entry(entry)) {
881 vmf->page = entry;
Jan Karabc2466e2016-05-12 18:29:19 +0200882 return VM_FAULT_LOCKED;
Jan Karaac401cc2016-05-12 18:29:18 +0200883 }
Jan Karabc2466e2016-05-12 18:29:19 +0200884 vmf->entry = entry;
885 return VM_FAULT_DAX_LOCKED;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800886 }
887
Jan Karaac401cc2016-05-12 18:29:18 +0200888 if (!buffer_mapped(&bh)) {
889 if (vmf->flags & FAULT_FLAG_WRITE) {
890 error = get_block(inode, block, &bh, 1);
891 count_vm_event(PGMAJFAULT);
892 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
893 major = VM_FAULT_MAJOR;
894 if (!error && (bh.b_size < PAGE_SIZE))
895 error = -EIO;
896 if (error)
897 goto unlock_entry;
898 } else {
899 return dax_load_hole(mapping, entry, vmf);
900 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800901 }
902
Jan Kara02fbd132016-05-11 11:58:48 +0200903 /* Filesystem should not return unwritten buffers to us! */
Jan Kara2b109452016-05-11 11:58:50 +0200904 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
Jan Karaac401cc2016-05-12 18:29:18 +0200905 error = dax_insert_mapping(mapping, &bh, &entry, vma, vmf);
906 unlock_entry:
907 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800908 out:
909 if (error == -ENOMEM)
910 return VM_FAULT_OOM | major;
911 /* -EBUSY is fine, somebody else faulted on the same PTE */
912 if ((error < 0) && (error != -EBUSY))
913 return VM_FAULT_SIGBUS | major;
914 return VM_FAULT_NOPAGE | major;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800915}
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800916EXPORT_SYMBOL_GPL(dax_fault);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -0800917
Jan Kara348e9672016-05-12 18:29:15 +0200918#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700919/*
920 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
921 * more often than one might expect in the below function.
922 */
923#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
924
Dan Williamscbb38e42016-01-15 16:56:58 -0800925static void __dax_dbg(struct buffer_head *bh, unsigned long address,
926 const char *reason, const char *fn)
927{
928 if (bh) {
929 char bname[BDEVNAME_SIZE];
930 bdevname(bh->b_bdev, bname);
931 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
932 "length %zd fallback: %s\n", fn, current->comm,
933 address, bname, bh->b_state, (u64)bh->b_blocknr,
934 bh->b_size, reason);
935 } else {
936 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
937 current->comm, address, reason);
938 }
939}
940
941#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
942
Ross Zwisler6b524992016-07-26 15:21:05 -0700943/**
944 * dax_pmd_fault - handle a PMD fault on a DAX file
945 * @vma: The virtual memory area where the fault occurred
946 * @vmf: The description of the fault
947 * @get_block: The filesystem method used to translate file offsets to blocks
948 *
949 * When a page fault occurs, filesystems may call this helper in their
950 * pmd_fault handler for DAX files.
951 */
952int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
Jan Kara02fbd132016-05-11 11:58:48 +0200953 pmd_t *pmd, unsigned int flags, get_block_t get_block)
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700954{
955 struct file *file = vma->vm_file;
956 struct address_space *mapping = file->f_mapping;
957 struct inode *inode = mapping->host;
958 struct buffer_head bh;
959 unsigned blkbits = inode->i_blkbits;
960 unsigned long pmd_addr = address & PMD_MASK;
961 bool write = flags & FAULT_FLAG_WRITE;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800962 struct block_device *bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700963 pgoff_t size, pgoff;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800964 sector_t block;
Jan Karaac401cc2016-05-12 18:29:18 +0200965 int result = 0;
Ross Zwisler9973c982016-01-22 15:10:47 -0800966 bool alloc = false;
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700967
Dan Williamsc046c322016-01-15 16:57:01 -0800968 /* dax pmd mappings require pfn_t_devmap() */
Dan Williamsee82c9e2015-11-15 16:06:32 -0800969 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
970 return VM_FAULT_FALLBACK;
971
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700972 /* Fall back to PTEs if we're going to COW */
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800973 if (write && !(vma->vm_flags & VM_SHARED)) {
974 split_huge_pmd(vma, pmd, address);
Dan Williamscbb38e42016-01-15 16:56:58 -0800975 dax_pmd_dbg(NULL, address, "cow write");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700976 return VM_FAULT_FALLBACK;
Toshi Kani59bf4fb2016-01-15 16:56:05 -0800977 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700978 /* If the PMD would extend outside the VMA */
Dan Williamscbb38e42016-01-15 16:56:58 -0800979 if (pmd_addr < vma->vm_start) {
980 dax_pmd_dbg(NULL, address, "vma start unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700981 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800982 }
983 if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
984 dax_pmd_dbg(NULL, address, "vma end unaligned");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700985 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800986 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700987
Matthew Wilcox3fdd1b472015-09-08 14:59:39 -0700988 pgoff = linear_page_index(vma, pmd_addr);
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700989 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
990 if (pgoff >= size)
991 return VM_FAULT_SIGBUS;
992 /* If the PMD would cover blocks out of the file */
Dan Williamscbb38e42016-01-15 16:56:58 -0800993 if ((pgoff | PG_PMD_COLOUR) >= size) {
994 dax_pmd_dbg(NULL, address,
995 "offset + huge page size > file size");
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700996 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -0800997 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -0700998
999 memset(&bh, 0, sizeof(bh));
Ross Zwislerd4bbe702016-01-22 15:10:31 -08001000 bh.b_bdev = inode->i_sb->s_bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001001 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
1002
1003 bh.b_size = PMD_SIZE;
Ross Zwisler9973c982016-01-22 15:10:47 -08001004
1005 if (get_block(inode, block, &bh, 0) != 0)
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001006 return VM_FAULT_SIGBUS;
Ross Zwisler9973c982016-01-22 15:10:47 -08001007
1008 if (!buffer_mapped(&bh) && write) {
1009 if (get_block(inode, block, &bh, 1) != 0)
1010 return VM_FAULT_SIGBUS;
1011 alloc = true;
Jan Kara2b109452016-05-11 11:58:50 +02001012 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
Ross Zwisler9973c982016-01-22 15:10:47 -08001013 }
1014
Dan Williamsb2e0d162016-01-15 16:55:59 -08001015 bdev = bh.b_bdev;
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001016
1017 /*
1018 * If the filesystem isn't willing to tell us the length of a hole,
1019 * just fall back to PTEs. Calling get_block 512 times in a loop
1020 * would be silly.
1021 */
Dan Williamscbb38e42016-01-15 16:56:58 -08001022 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
1023 dax_pmd_dbg(&bh, address, "allocated block too small");
Ross Zwisler9973c982016-01-22 15:10:47 -08001024 return VM_FAULT_FALLBACK;
Dan Williamscbb38e42016-01-15 16:56:58 -08001025 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001026
Ross Zwisler9973c982016-01-22 15:10:47 -08001027 /*
1028 * If we allocated new storage, make sure no process has any
1029 * zero pages covering this hole
1030 */
1031 if (alloc) {
1032 loff_t lstart = pgoff << PAGE_SHIFT;
1033 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
1034
1035 truncate_pagecache_range(inode, lstart, lend);
1036 }
1037
Jan Karab9953532016-05-12 18:29:14 +02001038 if (!write && !buffer_mapped(&bh)) {
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001039 spinlock_t *ptl;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -07001040 pmd_t entry;
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001041 struct page *zero_page = get_huge_zero_page();
Kirill A. Shutemovd295e342015-09-08 14:59:34 -07001042
Dan Williamscbb38e42016-01-15 16:56:58 -08001043 if (unlikely(!zero_page)) {
1044 dax_pmd_dbg(&bh, address, "no zero page");
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001045 goto fallback;
Dan Williamscbb38e42016-01-15 16:56:58 -08001046 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001047
Kirill A. Shutemovd295e342015-09-08 14:59:34 -07001048 ptl = pmd_lock(vma->vm_mm, pmd);
1049 if (!pmd_none(*pmd)) {
1050 spin_unlock(ptl);
Dan Williamscbb38e42016-01-15 16:56:58 -08001051 dax_pmd_dbg(&bh, address, "pmd already present");
Kirill A. Shutemovd295e342015-09-08 14:59:34 -07001052 goto fallback;
1053 }
1054
Dan Williamscbb38e42016-01-15 16:56:58 -08001055 dev_dbg(part_to_dev(bdev->bd_part),
1056 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
1057 __func__, current->comm, address,
1058 (unsigned long long) to_sector(&bh, inode));
1059
Kirill A. Shutemovd295e342015-09-08 14:59:34 -07001060 entry = mk_pmd(zero_page, vma->vm_page_prot);
1061 entry = pmd_mkhuge(entry);
1062 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001063 result = VM_FAULT_NOPAGE;
Kirill A. Shutemovd295e342015-09-08 14:59:34 -07001064 spin_unlock(ptl);
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001065 } else {
Dan Williamsb2e0d162016-01-15 16:55:59 -08001066 struct blk_dax_ctl dax = {
1067 .sector = to_sector(&bh, inode),
1068 .size = PMD_SIZE,
1069 };
1070 long length = dax_map_atomic(bdev, &dax);
1071
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001072 if (length < 0) {
Dan Williams8b3db9792016-02-24 14:02:06 -08001073 dax_pmd_dbg(&bh, address, "dax-error fallback");
1074 goto fallback;
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001075 }
Dan Williamscbb38e42016-01-15 16:56:58 -08001076 if (length < PMD_SIZE) {
1077 dax_pmd_dbg(&bh, address, "dax-length too small");
1078 dax_unmap_atomic(bdev, &dax);
1079 goto fallback;
1080 }
1081 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
1082 dax_pmd_dbg(&bh, address, "pfn unaligned");
Dan Williamsb2e0d162016-01-15 16:55:59 -08001083 dax_unmap_atomic(bdev, &dax);
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001084 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001085 }
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001086
Dan Williamsc046c322016-01-15 16:57:01 -08001087 if (!pfn_t_devmap(dax.pfn)) {
Dan Williamsb2e0d162016-01-15 16:55:59 -08001088 dax_unmap_atomic(bdev, &dax);
Dan Williamscbb38e42016-01-15 16:56:58 -08001089 dax_pmd_dbg(&bh, address, "pfn not in memmap");
Dan Williams152d7bd2015-11-12 18:33:54 -08001090 goto fallback;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001091 }
Dan Williamsb2e0d162016-01-15 16:55:59 -08001092 dax_unmap_atomic(bdev, &dax);
Ross Zwisler0f90cc62015-10-15 15:28:32 -07001093
Ross Zwisler9973c982016-01-22 15:10:47 -08001094 /*
1095 * For PTE faults we insert a radix tree entry for reads, and
1096 * leave it clean. Then on the first write we dirty the radix
1097 * tree entry via the dax_pfn_mkwrite() path. This sequence
1098 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
1099 * call into get_block() to translate the pgoff to a sector in
1100 * order to be able to create a new radix tree entry.
1101 *
1102 * The PMD path doesn't have an equivalent to
1103 * dax_pfn_mkwrite(), though, so for a read followed by a
Ross Zwisler6b524992016-07-26 15:21:05 -07001104 * write we traverse all the way through dax_pmd_fault()
Ross Zwisler9973c982016-01-22 15:10:47 -08001105 * twice. This means we can just skip inserting a radix tree
1106 * entry completely on the initial read and just wait until
1107 * the write to insert a dirty entry.
1108 */
1109 if (write) {
Jan Karaac401cc2016-05-12 18:29:18 +02001110 /*
1111 * We should insert radix-tree entry and dirty it here.
1112 * For now this is broken...
1113 */
Ross Zwisler9973c982016-01-22 15:10:47 -08001114 }
1115
Dan Williamscbb38e42016-01-15 16:56:58 -08001116 dev_dbg(part_to_dev(bdev->bd_part),
1117 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1118 __func__, current->comm, address,
1119 pfn_t_to_pfn(dax.pfn),
1120 (unsigned long long) dax.sector);
Dan Williams34c0fd52016-01-15 16:56:14 -08001121 result |= vmf_insert_pfn_pmd(vma, address, pmd,
Dan Williamsf25748e32016-01-15 16:56:43 -08001122 dax.pfn, write);
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001123 }
1124
1125 out:
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001126 return result;
1127
1128 fallback:
1129 count_vm_event(THP_FAULT_FALLBACK);
1130 result = VM_FAULT_FALLBACK;
1131 goto out;
1132}
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001133EXPORT_SYMBOL_GPL(dax_pmd_fault);
Valentin Rothbergdd8a2b62015-09-08 14:59:09 -07001134#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Matthew Wilcox844f35d2015-09-08 14:58:57 -07001135
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001136/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001137 * dax_pfn_mkwrite - handle first write to DAX page
1138 * @vma: The virtual memory area where the fault occurred
1139 * @vmf: The description of the fault
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001140 */
1141int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1142{
Ross Zwisler9973c982016-01-22 15:10:47 -08001143 struct file *file = vma->vm_file;
Jan Karaac401cc2016-05-12 18:29:18 +02001144 struct address_space *mapping = file->f_mapping;
1145 void *entry;
1146 pgoff_t index = vmf->pgoff;
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001147
Jan Karaac401cc2016-05-12 18:29:18 +02001148 spin_lock_irq(&mapping->tree_lock);
1149 entry = get_unlocked_mapping_entry(mapping, index, NULL);
1150 if (!entry || !radix_tree_exceptional_entry(entry))
1151 goto out;
1152 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
1153 put_unlocked_mapping_entry(mapping, index, entry);
1154out:
1155 spin_unlock_irq(&mapping->tree_lock);
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001156 return VM_FAULT_NOPAGE;
1157}
1158EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1159
Vishal Verma4b0228f2016-04-21 15:13:46 -04001160static bool dax_range_is_aligned(struct block_device *bdev,
1161 unsigned int offset, unsigned int length)
1162{
1163 unsigned short sector_size = bdev_logical_block_size(bdev);
1164
1165 if (!IS_ALIGNED(offset, sector_size))
1166 return false;
1167 if (!IS_ALIGNED(length, sector_size))
1168 return false;
1169
1170 return true;
1171}
1172
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001173int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
1174 unsigned int offset, unsigned int length)
1175{
1176 struct blk_dax_ctl dax = {
1177 .sector = sector,
1178 .size = PAGE_SIZE,
1179 };
1180
Vishal Verma4b0228f2016-04-21 15:13:46 -04001181 if (dax_range_is_aligned(bdev, offset, length)) {
1182 sector_t start_sector = dax.sector + (offset >> 9);
1183
1184 return blkdev_issue_zeroout(bdev, start_sector,
1185 length >> 9, GFP_NOFS, true);
1186 } else {
1187 if (dax_map_atomic(bdev, &dax) < 0)
1188 return PTR_ERR(dax.addr);
1189 clear_pmem(dax.addr + offset, length);
1190 wmb_pmem();
1191 dax_unmap_atomic(bdev, &dax);
1192 }
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001193 return 0;
1194}
1195EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1196
Boaz Harrosh0e3b2102015-04-15 16:15:14 -07001197/**
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001198 * dax_zero_page_range - zero a range within a page of a DAX file
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001199 * @inode: The file being truncated
1200 * @from: The file offset that is being truncated to
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001201 * @length: The number of bytes to zero
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001202 * @get_block: The filesystem method used to translate file offsets to blocks
1203 *
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001204 * This function can be called by a filesystem when it is zeroing part of a
1205 * page in a DAX file. This is intended for hole-punch operations. If
1206 * you are truncating a file, the helper function dax_truncate_page() may be
1207 * more convenient.
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001208 */
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001209int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1210 get_block_t get_block)
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001211{
1212 struct buffer_head bh;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001213 pgoff_t index = from >> PAGE_SHIFT;
1214 unsigned offset = from & (PAGE_SIZE-1);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001215 int err;
1216
1217 /* Block boundary? Nothing to do */
1218 if (!length)
1219 return 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001220 BUG_ON((offset + length) > PAGE_SIZE);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001221
1222 memset(&bh, 0, sizeof(bh));
Ross Zwislereab95db2016-01-22 15:10:59 -08001223 bh.b_bdev = inode->i_sb->s_bdev;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001224 bh.b_size = PAGE_SIZE;
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001225 err = get_block(inode, index, &bh, 0);
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001226 if (err < 0 || !buffer_written(&bh))
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001227 return err;
Dan Williamsb2e0d162016-01-15 16:55:59 -08001228
Christoph Hellwig679c8bd2016-05-09 10:47:04 +02001229 return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
1230 offset, length);
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001231}
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001232EXPORT_SYMBOL_GPL(dax_zero_page_range);
1233
1234/**
1235 * dax_truncate_page - handle a partial page being truncated in a DAX file
1236 * @inode: The file being truncated
1237 * @from: The file offset that is being truncated to
1238 * @get_block: The filesystem method used to translate file offsets to blocks
1239 *
1240 * Similar to block_truncate_page(), this function can be called by a
1241 * filesystem when it is truncating a DAX file to handle the partial page.
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001242 */
1243int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1244{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001245 unsigned length = PAGE_ALIGN(from) - from;
Matthew Wilcox25726bc2015-02-16 15:59:35 -08001246 return dax_zero_page_range(inode, from, length, get_block);
1247}
Matthew Wilcox4c0ccfe2015-02-16 15:59:06 -08001248EXPORT_SYMBOL_GPL(dax_truncate_page);