blob: a0595b4ddbd8857694f699d020b9203ffe4c2f87 [file] [log] [blame]
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07001#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
Jan Kara4f622932016-05-12 18:29:17 +02006#include <linux/radix-tree.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07007#include <asm/pgtable.h>
8
Christoph Hellwiga254e562016-09-19 11:24:49 +10009struct iomap_ops;
10
Jan Karae8043152016-05-12 18:29:16 +020011/* We use lowest available exceptional entry bit for locking */
12#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
13
Christoph Hellwiga254e562016-09-19 11:24:49 +100014ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
15 struct iomap_ops *ops);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -070016ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070017 get_block_t, dio_iodone_t, int flags);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070018int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
19int dax_truncate_page(struct inode *, loff_t from, get_block_t);
Jan Kara02fbd132016-05-11 11:58:48 +020020int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
Jan Karaac401cc2016-05-12 18:29:18 +020021int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
22void dax_wake_mapping_entry_waiter(struct address_space *mapping,
23 pgoff_t index, bool wake_all);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080024
25#ifdef CONFIG_FS_DAX
26struct page *read_dax_sector(struct block_device *bdev, sector_t n);
Jan Karabc2466e2016-05-12 18:29:19 +020027void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
Christoph Hellwig679c8bd2016-05-09 10:47:04 +020028int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
29 unsigned int offset, unsigned int length);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080030#else
31static inline struct page *read_dax_sector(struct block_device *bdev,
32 sector_t n)
33{
34 return ERR_PTR(-ENXIO);
35}
Jan Karabc2466e2016-05-12 18:29:19 +020036/* Shouldn't ever be called when dax is disabled. */
37static inline void dax_unlock_mapping_entry(struct address_space *mapping,
38 pgoff_t index)
39{
40 BUG();
41}
Christoph Hellwig679c8bd2016-05-09 10:47:04 +020042static inline int __dax_zero_page_range(struct block_device *bdev,
43 sector_t sector, unsigned int offset, unsigned int length)
44{
45 return -ENXIO;
46}
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080047#endif
48
Jan Kara348e9672016-05-12 18:29:15 +020049#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
Matthew Wilcox844f35d2015-09-08 14:58:57 -070050int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
Jan Kara02fbd132016-05-11 11:58:48 +020051 unsigned int flags, get_block_t);
Matthew Wilcox844f35d2015-09-08 14:58:57 -070052#else
53static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
Jan Kara02fbd132016-05-11 11:58:48 +020054 pmd_t *pmd, unsigned int flags, get_block_t gb)
Matthew Wilcox844f35d2015-09-08 14:58:57 -070055{
56 return VM_FAULT_FALLBACK;
57}
Matthew Wilcox844f35d2015-09-08 14:58:57 -070058#endif
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070059int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
Jan Kara02fbd132016-05-11 11:58:48 +020060#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070061
Matthew Wilcox4897c762015-09-08 14:58:45 -070062static inline bool vma_is_dax(struct vm_area_struct *vma)
63{
64 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
65}
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080066
67static inline bool dax_mapping(struct address_space *mapping)
68{
69 return mapping->host && IS_DAX(mapping->host);
70}
Ross Zwisler7f6d5b52016-02-26 15:19:55 -080071
72struct writeback_control;
73int dax_writeback_mapping_range(struct address_space *mapping,
74 struct block_device *bdev, struct writeback_control *wbc);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070075#endif