NFS: Don't check request offset and size without holding a lock
Request offsets and sizes are not guaranteed to be stable unless you
are holding the request locked.
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c940e61..84b6818 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -523,20 +523,6 @@ nfs_lock_and_join_requests(struct page *page)
total_bytes = head->wb_bytes;
for (subreq = head->wb_this_page; subreq != head;
subreq = subreq->wb_this_page) {
- /*
- * Subrequests are always contiguous, non overlapping
- * and in order - but may be repeated (mirrored writes).
- */
- if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
- /* keep track of how many bytes this group covers */
- total_bytes += subreq->wb_bytes;
- } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
- ((subreq->wb_offset + subreq->wb_bytes) >
- (head->wb_offset + total_bytes)))) {
- nfs_unroll_locks_and_wait(inode, head, subreq);
- return ERR_PTR(-EIO);
- }
-
if (!nfs_lock_request(subreq)) {
/* releases page group bit lock and
* inode spin lock and all references */
@@ -548,6 +534,20 @@ nfs_lock_and_join_requests(struct page *page)
return ERR_PTR(ret);
}
+ /*
+ * Subrequests are always contiguous, non overlapping
+ * and in order - but may be repeated (mirrored writes).
+ */
+ if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
+ /* keep track of how many bytes this group covers */
+ total_bytes += subreq->wb_bytes;
+ } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
+ ((subreq->wb_offset + subreq->wb_bytes) >
+ (head->wb_offset + total_bytes)))) {
+ nfs_unlock_request(subreq);
+ nfs_unroll_locks_and_wait(inode, head, subreq);
+ return ERR_PTR(-EIO);
+ }
}
/* Now that all requests are locked, make sure they aren't on any list.