dax: Remove i_mmap_lock protection
Currently faults are protected against truncate by filesystem specific
i_mmap_sem and page lock in case of hole page. Cow faults are protected
DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX
code. Remove it.
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
diff --git a/fs/dax.c b/fs/dax.c
index be74635..6dbe602 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -798,29 +798,19 @@
.sector = to_sector(bh, mapping->host),
.size = bh->b_size,
};
- int error;
void *ret;
void *entry = *entryp;
- i_mmap_lock_read(mapping);
-
- if (dax_map_atomic(bdev, &dax) < 0) {
- error = PTR_ERR(dax.addr);
- goto out;
- }
+ if (dax_map_atomic(bdev, &dax) < 0)
+ return PTR_ERR(dax.addr);
dax_unmap_atomic(bdev, &dax);
ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
- if (IS_ERR(ret)) {
- error = PTR_ERR(ret);
- goto out;
- }
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
*entryp = ret;
- error = vm_insert_mixed(vma, vaddr, dax.pfn);
- out:
- i_mmap_unlock_read(mapping);
- return error;
+ return vm_insert_mixed(vma, vaddr, dax.pfn);
}
/**
@@ -1058,8 +1048,6 @@
truncate_pagecache_range(inode, lstart, lend);
}
- i_mmap_lock_read(mapping);
-
if (!write && !buffer_mapped(&bh)) {
spinlock_t *ptl;
pmd_t entry;
@@ -1148,8 +1136,6 @@
}
out:
- i_mmap_unlock_read(mapping);
-
return result;
fallback: