blob: eeb02421c848ccb21ef81914a425da4427b53adf [file] [log] [blame]
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07001#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
Jan Kara4f622932016-05-12 18:29:17 +02006#include <linux/radix-tree.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07007#include <asm/pgtable.h>
8
Christoph Hellwiga254e562016-09-19 11:24:49 +10009struct iomap_ops;
10
Ross Zwislerfa28f722016-11-08 11:33:35 +110011/*
Ross Zwisler642261a2016-11-08 11:34:45 +110012 * We use lowest available bit in exceptional entry for locking, one bit for
13 * the entry size (PMD) and two more to tell us if the entry is a huge zero
14 * page (HZP) or an empty entry that is just used for locking. In total four
15 * special bits.
16 *
17 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
18 * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
19 * block allocation.
Ross Zwislerfa28f722016-11-08 11:33:35 +110020 */
Ross Zwisler642261a2016-11-08 11:34:45 +110021#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
Jan Karae8043152016-05-12 18:29:16 +020022#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
Ross Zwisler642261a2016-11-08 11:34:45 +110023#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
24#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
25#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
Ross Zwislerfa28f722016-11-08 11:33:35 +110026
Ross Zwisler642261a2016-11-08 11:34:45 +110027static inline unsigned long dax_radix_sector(void *entry)
28{
29 return (unsigned long)entry >> RADIX_DAX_SHIFT;
30}
31
32static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
33{
34 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
35 ((unsigned long)sector << RADIX_DAX_SHIFT) |
36 RADIX_DAX_ENTRY_LOCK);
37}
Jan Karae8043152016-05-12 18:29:16 +020038
Ross Zwisler11c59c92016-11-08 11:32:46 +110039ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080040 const struct iomap_ops *ops);
Dave Jiang11bac802017-02-24 14:56:41 -080041int dax_iomap_fault(struct vm_fault *vmf, const struct iomap_ops *ops);
Jan Karaac401cc2016-05-12 18:29:18 +020042int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
Jan Karac6dcf522016-08-10 17:22:44 +020043int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
44int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
45 pgoff_t index);
Jan Karaac401cc2016-05-12 18:29:18 +020046void dax_wake_mapping_entry_waiter(struct address_space *mapping,
Ross Zwisler63e95b52016-11-08 11:32:20 +110047 pgoff_t index, void *entry, bool wake_all);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080048
49#ifdef CONFIG_FS_DAX
50struct page *read_dax_sector(struct block_device *bdev, sector_t n);
Christoph Hellwig679c8bd2016-05-09 10:47:04 +020051int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
52 unsigned int offset, unsigned int length);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080053#else
54static inline struct page *read_dax_sector(struct block_device *bdev,
55 sector_t n)
56{
57 return ERR_PTR(-ENXIO);
58}
Christoph Hellwig679c8bd2016-05-09 10:47:04 +020059static inline int __dax_zero_page_range(struct block_device *bdev,
60 sector_t sector, unsigned int offset, unsigned int length)
61{
62 return -ENXIO;
63}
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080064#endif
65
Ross Zwisler642261a2016-11-08 11:34:45 +110066#ifdef CONFIG_FS_DAX_PMD
67static inline unsigned int dax_radix_order(void *entry)
68{
69 if ((unsigned long)entry & RADIX_DAX_PMD)
70 return PMD_SHIFT - PAGE_SHIFT;
71 return 0;
72}
Linus Torvaldsbc49a782017-02-22 19:29:24 -080073int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops);
Ross Zwisler642261a2016-11-08 11:34:45 +110074#else
75static inline unsigned int dax_radix_order(void *entry)
76{
77 return 0;
78}
Dave Jiangf4200392017-02-22 15:40:06 -080079static inline int dax_iomap_pmd_fault(struct vm_fault *vmf,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080080 const struct iomap_ops *ops)
Ross Zwisler642261a2016-11-08 11:34:45 +110081{
82 return VM_FAULT_FALLBACK;
83}
84#endif
Dave Jiang11bac802017-02-24 14:56:41 -080085int dax_pfn_mkwrite(struct vm_fault *vmf);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070086
Matthew Wilcox4897c762015-09-08 14:58:45 -070087static inline bool vma_is_dax(struct vm_area_struct *vma)
88{
89 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
90}
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080091
92static inline bool dax_mapping(struct address_space *mapping)
93{
94 return mapping->host && IS_DAX(mapping->host);
95}
Ross Zwisler7f6d5b52016-02-26 15:19:55 -080096
97struct writeback_control;
98int dax_writeback_mapping_range(struct address_space *mapping,
99 struct block_device *bdev, struct writeback_control *wbc);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -0700100#endif