blob: 17a42e4eb8728371f4aec957a545c47d332e7fcd [file] [log] [blame]
Fred Isaman155e7522011-07-30 20:52:39 -04001/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
Fred Isaman9549ec02011-07-30 20:52:53 -040032
Fred Isaman155e7522011-07-30 20:52:39 -040033#include <linux/module.h>
34#include <linux/init.h>
Jim Reesfe0a9b72011-07-30 20:52:42 -040035#include <linux/mount.h>
36#include <linux/namei.h>
Fred Isaman9549ec02011-07-30 20:52:53 -040037#include <linux/bio.h> /* struct bio */
Heiko Carstens88c9e422011-08-02 09:57:35 +020038#include <linux/prefetch.h>
Peng Tao62965562012-09-25 14:55:57 +080039#include <linux/pagevec.h>
Fred Isaman155e7522011-07-30 20:52:39 -040040
Jim Rees10bd2952012-04-09 22:33:39 -040041#include "../pnfs.h"
Trond Myklebust76e697b2012-11-26 14:20:49 -050042#include "../nfs4session.h"
Jim Rees10bd2952012-04-09 22:33:39 -040043#include "../internal.h"
Fred Isaman155e7522011-07-30 20:52:39 -040044#include "blocklayout.h"
45
46#define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
Christoph Hellwig80672532014-09-10 08:23:34 -070052static bool is_hole(struct pnfs_block_extent *be)
Fred Isaman9549ec02011-07-30 20:52:53 -040053{
Christoph Hellwig80672532014-09-10 08:23:34 -070054 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
Fred Isaman650e2d32011-07-30 20:52:54 -040062}
63
Fred Isaman9549ec02011-07-30 20:52:53 -040064/* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67struct parallel_io {
68 struct kref refcnt;
Christoph Hellwig80672532014-09-10 08:23:34 -070069 void (*pnfs_callback) (void *data);
Fred Isaman9549ec02011-07-30 20:52:53 -040070 void *data;
71};
72
73static inline struct parallel_io *alloc_parallel(void *data)
74{
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83}
84
85static inline void get_parallel(struct parallel_io *p)
86{
87 kref_get(&p->refcnt);
88}
89
90static void destroy_parallel(struct kref *kref)
91{
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -070095 p->pnfs_callback(p->data);
Fred Isaman9549ec02011-07-30 20:52:53 -040096 kfree(p);
97}
98
99static inline void put_parallel(struct parallel_io *p)
100{
101 kref_put(&p->refcnt, destroy_parallel);
102}
103
104static struct bio *
105bl_submit_bio(int rw, struct bio *bio)
106{
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700110 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
111 (unsigned long long)bio->bi_iter.bi_sector);
Fred Isaman9549ec02011-07-30 20:52:53 -0400112 submit_bio(rw, bio);
113 }
114 return NULL;
115}
116
Christoph Hellwig5c837462014-09-10 17:37:27 -0700117static struct bio *
118bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200119 bio_end_io_t end_io, struct parallel_io *par)
Fred Isaman9549ec02011-07-30 20:52:53 -0400120{
121 struct bio *bio;
122
Peng Tao74a6eeb2012-01-12 23:18:48 +0800123 npg = min(npg, BIO_MAX_PAGES);
Fred Isaman9549ec02011-07-30 20:52:53 -0400124 bio = bio_alloc(GFP_NOIO, npg);
Peng Tao74a6eeb2012-01-12 23:18:48 +0800125 if (!bio && (current->flags & PF_MEMALLOC)) {
126 while (!bio && (npg /= 2))
127 bio = bio_alloc(GFP_NOIO, npg);
128 }
Fred Isaman9549ec02011-07-30 20:52:53 -0400129
Peng Tao74a6eeb2012-01-12 23:18:48 +0800130 if (bio) {
Christoph Hellwig5c837462014-09-10 17:37:27 -0700131 bio->bi_iter.bi_sector = disk_sector;
132 bio->bi_bdev = bdev;
Peng Tao74a6eeb2012-01-12 23:18:48 +0800133 bio->bi_end_io = end_io;
134 bio->bi_private = par;
135 }
Fred Isaman9549ec02011-07-30 20:52:53 -0400136 return bio;
137}
138
Christoph Hellwig5c837462014-09-10 17:37:27 -0700139static struct bio *
140do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
141 struct page *page, struct pnfs_block_dev_map *map,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200142 struct pnfs_block_extent *be, bio_end_io_t end_io,
Christoph Hellwig5c837462014-09-10 17:37:27 -0700143 struct parallel_io *par, unsigned int offset, int *len)
Fred Isaman9549ec02011-07-30 20:52:53 -0400144{
Christoph Hellwig5c837462014-09-10 17:37:27 -0700145 struct pnfs_block_dev *dev =
146 container_of(be->be_device, struct pnfs_block_dev, node);
147 u64 disk_addr, end;
148
Peng Taofe6e1e82012-08-24 00:27:51 +0800149 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
Christoph Hellwig5c837462014-09-10 17:37:27 -0700150 npg, rw, (unsigned long long)isect, offset, *len);
151
152 /* translate to device offset */
153 isect += be->be_v_offset;
154 isect -= be->be_f_offset;
155
156 /* translate to physical disk offset */
157 disk_addr = (u64)isect << SECTOR_SHIFT;
158 if (disk_addr < map->start || disk_addr >= map->start + map->len) {
159 if (!dev->map(dev, disk_addr, map))
160 return ERR_PTR(-EIO);
161 bio = bl_submit_bio(rw, bio);
162 }
163 disk_addr += map->disk_offset;
164 disk_addr -= map->start;
165
166 /* limit length to what the device mapping allows */
167 end = disk_addr + *len;
168 if (end >= map->start + map->len)
169 *len = map->start + map->len - disk_addr;
170
Fred Isaman9549ec02011-07-30 20:52:53 -0400171retry:
172 if (!bio) {
Christoph Hellwig5c837462014-09-10 17:37:27 -0700173 bio = bl_alloc_init_bio(npg, map->bdev,
174 disk_addr >> SECTOR_SHIFT, end_io, par);
Fred Isaman9549ec02011-07-30 20:52:53 -0400175 if (!bio)
176 return ERR_PTR(-ENOMEM);
177 }
Christoph Hellwig5c837462014-09-10 17:37:27 -0700178 if (bio_add_page(bio, page, *len, offset) < *len) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400179 bio = bl_submit_bio(rw, bio);
180 goto retry;
181 }
182 return bio;
183}
184
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200185static void bl_end_io_read(struct bio *bio)
Fred Isaman9549ec02011-07-30 20:52:53 -0400186{
187 struct parallel_io *par = bio->bi_private;
Fred Isaman9549ec02011-07-30 20:52:53 -0400188
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200189 if (bio->bi_error) {
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400190 struct nfs_pgio_header *header = par->data;
Fred Isamancd841602012-04-20 14:47:44 -0400191
192 if (!header->pnfs_error)
193 header->pnfs_error = -EIO;
194 pnfs_set_lo_fail(header->lseg);
Fred Isaman9549ec02011-07-30 20:52:53 -0400195 }
Christoph Hellwig8c792ea2014-09-10 08:23:33 -0700196
Fred Isaman9549ec02011-07-30 20:52:53 -0400197 bio_put(bio);
198 put_parallel(par);
199}
200
201static void bl_read_cleanup(struct work_struct *work)
202{
203 struct rpc_task *task;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400204 struct nfs_pgio_header *hdr;
Fred Isaman9549ec02011-07-30 20:52:53 -0400205 dprintk("%s enter\n", __func__);
206 task = container_of(work, struct rpc_task, u.tk_work);
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400207 hdr = container_of(task, struct nfs_pgio_header, task);
208 pnfs_ld_read_done(hdr);
Fred Isaman9549ec02011-07-30 20:52:53 -0400209}
210
211static void
Christoph Hellwig80672532014-09-10 08:23:34 -0700212bl_end_par_io_read(void *data)
Fred Isaman9549ec02011-07-30 20:52:53 -0400213{
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400214 struct nfs_pgio_header *hdr = data;
Fred Isaman9549ec02011-07-30 20:52:53 -0400215
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400216 hdr->task.tk_status = hdr->pnfs_error;
217 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
218 schedule_work(&hdr->task.u.tk_work);
Fred Isaman9549ec02011-07-30 20:52:53 -0400219}
220
Fred Isaman155e7522011-07-30 20:52:39 -0400221static enum pnfs_try_status
Christoph Hellwig80672532014-09-10 08:23:34 -0700222bl_read_pagelist(struct nfs_pgio_header *header)
Fred Isaman155e7522011-07-30 20:52:39 -0400223{
Christoph Hellwig80672532014-09-10 08:23:34 -0700224 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
Christoph Hellwig5c837462014-09-10 17:37:27 -0700225 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
Fred Isaman9549ec02011-07-30 20:52:53 -0400226 struct bio *bio = NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700227 struct pnfs_block_extent be;
Fred Isaman9549ec02011-07-30 20:52:53 -0400228 sector_t isect, extent_length = 0;
229 struct parallel_io *par;
Christoph Hellwig80672532014-09-10 08:23:34 -0700230 loff_t f_offset = header->args.offset;
231 size_t bytes_left = header->args.count;
Kinglong Mee15ae2c72015-10-16 17:22:50 +0800232 unsigned int pg_offset = header->args.pgbase, pg_len;
Christoph Hellwig80672532014-09-10 08:23:34 -0700233 struct page **pages = header->args.pages;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300234 int pg_index = header->args.pgbase >> PAGE_SHIFT;
Peng Taof742dc42012-08-24 00:27:52 +0800235 const bool is_dio = (header->dreq != NULL);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500236 struct blk_plug plug;
Christoph Hellwig80672532014-09-10 08:23:34 -0700237 int i;
Fred Isaman9549ec02011-07-30 20:52:53 -0400238
Trond Myklebust6f008662012-03-20 14:12:46 -0400239 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
Christoph Hellwig80672532014-09-10 08:23:34 -0700240 header->page_array.npages, f_offset,
241 (unsigned int)header->args.count);
Fred Isaman9549ec02011-07-30 20:52:53 -0400242
Christoph Hellwig80672532014-09-10 08:23:34 -0700243 par = alloc_parallel(header);
Fred Isaman9549ec02011-07-30 20:52:53 -0400244 if (!par)
Christoph Hellwig80672532014-09-10 08:23:34 -0700245 return PNFS_NOT_ATTEMPTED;
Fred Isaman9549ec02011-07-30 20:52:53 -0400246 par->pnfs_callback = bl_end_par_io_read;
Fred Isaman9549ec02011-07-30 20:52:53 -0400247
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500248 blk_start_plug(&plug);
249
Fred Isaman9549ec02011-07-30 20:52:53 -0400250 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
251 /* Code assumes extents are page-aligned */
Christoph Hellwig80672532014-09-10 08:23:34 -0700252 for (i = pg_index; i < header->page_array.npages; i++) {
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500253 if (extent_length <= 0) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400254 /* We've used up the previous extent */
Fred Isaman9549ec02011-07-30 20:52:53 -0400255 bio = bl_submit_bio(READ, bio);
Christoph Hellwig80672532014-09-10 08:23:34 -0700256
Fred Isaman9549ec02011-07-30 20:52:53 -0400257 /* Get the next one */
Christoph Hellwig80672532014-09-10 08:23:34 -0700258 if (!ext_tree_lookup(bl, isect, &be, false)) {
Fred Isamancd841602012-04-20 14:47:44 -0400259 header->pnfs_error = -EIO;
Fred Isaman9549ec02011-07-30 20:52:53 -0400260 goto out;
261 }
Christoph Hellwig80672532014-09-10 08:23:34 -0700262 extent_length = be.be_length - (isect - be.be_f_offset);
Fred Isaman9549ec02011-07-30 20:52:53 -0400263 }
Peng Taof742dc42012-08-24 00:27:52 +0800264
265 if (is_dio) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300266 if (pg_offset + bytes_left > PAGE_SIZE)
267 pg_len = PAGE_SIZE - pg_offset;
Peng Taof742dc42012-08-24 00:27:52 +0800268 else
269 pg_len = bytes_left;
Peng Taof742dc42012-08-24 00:27:52 +0800270 } else {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700271 BUG_ON(pg_offset != 0);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300272 pg_len = PAGE_SIZE;
Peng Taof742dc42012-08-24 00:27:52 +0800273 }
274
Christoph Hellwig80672532014-09-10 08:23:34 -0700275 if (is_hole(&be)) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400276 bio = bl_submit_bio(READ, bio);
277 /* Fill hole w/ zeroes w/o accessing device */
278 dprintk("%s Zeroing page for hole\n", __func__);
Peng Taof742dc42012-08-24 00:27:52 +0800279 zero_user_segment(pages[i], pg_offset, pg_len);
Christoph Hellwig5c837462014-09-10 17:37:27 -0700280
281 /* invalidate map */
282 map.start = NFS4_MAX_UINT64;
Fred Isaman9549ec02011-07-30 20:52:53 -0400283 } else {
Weston Andros Adamson823b0c92014-06-09 11:48:34 -0400284 bio = do_add_page_to_bio(bio,
Christoph Hellwig80672532014-09-10 08:23:34 -0700285 header->page_array.npages - i,
Fred Isaman30dd3742012-04-20 14:47:45 -0400286 READ,
Christoph Hellwig5c837462014-09-10 17:37:27 -0700287 isect, pages[i], &map, &be,
Peng Taof742dc42012-08-24 00:27:52 +0800288 bl_end_io_read, par,
Christoph Hellwig5c837462014-09-10 17:37:27 -0700289 pg_offset, &pg_len);
Fred Isaman9549ec02011-07-30 20:52:53 -0400290 if (IS_ERR(bio)) {
Fred Isamancd841602012-04-20 14:47:44 -0400291 header->pnfs_error = PTR_ERR(bio);
Peng Taoe6d05a72011-09-22 21:50:16 -0400292 bio = NULL;
Fred Isaman9549ec02011-07-30 20:52:53 -0400293 goto out;
294 }
295 }
Peng Taof742dc42012-08-24 00:27:52 +0800296 isect += (pg_len >> SECTOR_SHIFT);
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500297 extent_length -= (pg_len >> SECTOR_SHIFT);
Christoph Hellwig5c837462014-09-10 17:37:27 -0700298 f_offset += pg_len;
299 bytes_left -= pg_len;
Kinglong Mee15ae2c72015-10-16 17:22:50 +0800300 pg_offset = 0;
Fred Isaman9549ec02011-07-30 20:52:53 -0400301 }
Fred Isamancd841602012-04-20 14:47:44 -0400302 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
Christoph Hellwig80672532014-09-10 08:23:34 -0700303 header->res.eof = 1;
304 header->res.count = header->inode->i_size - header->args.offset;
Fred Isaman9549ec02011-07-30 20:52:53 -0400305 } else {
Christoph Hellwig80672532014-09-10 08:23:34 -0700306 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
Fred Isaman9549ec02011-07-30 20:52:53 -0400307 }
308out:
Fred Isaman9549ec02011-07-30 20:52:53 -0400309 bl_submit_bio(READ, bio);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500310 blk_finish_plug(&plug);
Fred Isaman9549ec02011-07-30 20:52:53 -0400311 put_parallel(par);
312 return PNFS_ATTEMPTED;
Fred Isaman31e63062011-07-30 20:52:55 -0400313}
314
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200315static void bl_end_io_write(struct bio *bio)
Fred Isaman155e7522011-07-30 20:52:39 -0400316{
Fred Isaman650e2d32011-07-30 20:52:54 -0400317 struct parallel_io *par = bio->bi_private;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400318 struct nfs_pgio_header *header = par->data;
Fred Isaman650e2d32011-07-30 20:52:54 -0400319
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200320 if (bio->bi_error) {
Fred Isamancd841602012-04-20 14:47:44 -0400321 if (!header->pnfs_error)
322 header->pnfs_error = -EIO;
323 pnfs_set_lo_fail(header->lseg);
Fred Isaman650e2d32011-07-30 20:52:54 -0400324 }
325 bio_put(bio);
326 put_parallel(par);
327}
328
329/* Function scheduled for call during bl_end_par_io_write,
330 * it marks sectors as written and extends the commitlist.
331 */
332static void bl_write_cleanup(struct work_struct *work)
333{
Christoph Hellwig80672532014-09-10 08:23:34 -0700334 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
335 struct nfs_pgio_header *hdr =
336 container_of(task, struct nfs_pgio_header, task);
337
Fred Isaman650e2d32011-07-30 20:52:54 -0400338 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -0700339
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400340 if (likely(!hdr->pnfs_error)) {
Christoph Hellwig80672532014-09-10 08:23:34 -0700341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300342 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
Christoph Hellwig80672532014-09-10 08:23:34 -0700343 u64 end = (hdr->args.offset + hdr->args.count +
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300344 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
Christoph Hellwig80672532014-09-10 08:23:34 -0700345
346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
347 (end - start) >> SECTOR_SHIFT);
Fred Isaman31e63062011-07-30 20:52:55 -0400348 }
Christoph Hellwig80672532014-09-10 08:23:34 -0700349
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400350 pnfs_ld_write_done(hdr);
Fred Isaman650e2d32011-07-30 20:52:54 -0400351}
352
353/* Called when last of bios associated with a bl_write_pagelist call finishes */
Christoph Hellwig80672532014-09-10 08:23:34 -0700354static void bl_end_par_io_write(void *data)
Fred Isaman650e2d32011-07-30 20:52:54 -0400355{
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400356 struct nfs_pgio_header *hdr = data;
Fred Isaman650e2d32011-07-30 20:52:54 -0400357
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400358 hdr->task.tk_status = hdr->pnfs_error;
Weston Andros Adamsonc65e6252014-06-09 11:48:36 -0400359 hdr->verf.committed = NFS_FILE_SYNC;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400360 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
361 schedule_work(&hdr->task.u.tk_work);
Fred Isaman650e2d32011-07-30 20:52:54 -0400362}
363
364static enum pnfs_try_status
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400365bl_write_pagelist(struct nfs_pgio_header *header, int sync)
Fred Isaman650e2d32011-07-30 20:52:54 -0400366{
Christoph Hellwig80672532014-09-10 08:23:34 -0700367 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
Christoph Hellwig5c837462014-09-10 17:37:27 -0700368 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
Fred Isaman650e2d32011-07-30 20:52:54 -0400369 struct bio *bio = NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700370 struct pnfs_block_extent be;
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700371 sector_t isect, extent_length = 0;
Peng Tao96c9eae2012-08-24 00:27:53 +0800372 struct parallel_io *par = NULL;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400373 loff_t offset = header->args.offset;
374 size_t count = header->args.count;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400375 struct page **pages = header->args.pages;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300376 int pg_index = header->args.pgbase >> PAGE_SHIFT;
Christoph Hellwig5c837462014-09-10 17:37:27 -0700377 unsigned int pg_len;
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500378 struct blk_plug plug;
Christoph Hellwig80672532014-09-10 08:23:34 -0700379 int i;
Fred Isaman650e2d32011-07-30 20:52:54 -0400380
381 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
Peng Tao96c9eae2012-08-24 00:27:53 +0800382
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400383 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
Peng Tao71cdd402011-07-30 20:52:56 -0400384 * We want to write each, and if there is an error set pnfs_error
385 * to have it redone using nfs.
Fred Isaman650e2d32011-07-30 20:52:54 -0400386 */
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400387 par = alloc_parallel(header);
Fred Isaman650e2d32011-07-30 20:52:54 -0400388 if (!par)
Christoph Hellwig80672532014-09-10 08:23:34 -0700389 return PNFS_NOT_ATTEMPTED;
Fred Isaman650e2d32011-07-30 20:52:54 -0400390 par->pnfs_callback = bl_end_par_io_write;
Fred Isaman650e2d32011-07-30 20:52:54 -0400391
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700392 blk_start_plug(&plug);
Peng Tao71cdd402011-07-30 20:52:56 -0400393
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700394 /* we always write out the whole page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300395 offset = offset & (loff_t)PAGE_MASK;
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700396 isect = offset >> SECTOR_SHIFT;
Peng Tao71cdd402011-07-30 20:52:56 -0400397
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400398 for (i = pg_index; i < header->page_array.npages; i++) {
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500399 if (extent_length <= 0) {
Fred Isaman650e2d32011-07-30 20:52:54 -0400400 /* We've used up the previous extent */
Fred Isaman650e2d32011-07-30 20:52:54 -0400401 bio = bl_submit_bio(WRITE, bio);
402 /* Get the next one */
Christoph Hellwig80672532014-09-10 08:23:34 -0700403 if (!ext_tree_lookup(bl, isect, &be, true)) {
Fred Isamancd841602012-04-20 14:47:44 -0400404 header->pnfs_error = -EINVAL;
Fred Isaman650e2d32011-07-30 20:52:54 -0400405 goto out;
406 }
Peng Taofe6e1e82012-08-24 00:27:51 +0800407
Christoph Hellwig80672532014-09-10 08:23:34 -0700408 extent_length = be.be_length - (isect - be.be_f_offset);
Peng Tao71cdd402011-07-30 20:52:56 -0400409 }
Peng Taofe6e1e82012-08-24 00:27:51 +0800410
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300411 pg_len = PAGE_SIZE;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400412 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
Christoph Hellwig5c837462014-09-10 17:37:27 -0700413 WRITE, isect, pages[i], &map, &be,
Peng Taofe6e1e82012-08-24 00:27:51 +0800414 bl_end_io_write, par,
Christoph Hellwig5c837462014-09-10 17:37:27 -0700415 0, &pg_len);
Peng Tao71cdd402011-07-30 20:52:56 -0400416 if (IS_ERR(bio)) {
Fred Isamancd841602012-04-20 14:47:44 -0400417 header->pnfs_error = PTR_ERR(bio);
Peng Taoe6d05a72011-09-22 21:50:16 -0400418 bio = NULL;
Peng Tao71cdd402011-07-30 20:52:56 -0400419 goto out;
Fred Isaman650e2d32011-07-30 20:52:54 -0400420 }
Christoph Hellwig5c837462014-09-10 17:37:27 -0700421
422 offset += pg_len;
423 count -= pg_len;
424 isect += (pg_len >> SECTOR_SHIFT);
425 extent_length -= (pg_len >> SECTOR_SHIFT);
Fred Isaman650e2d32011-07-30 20:52:54 -0400426 }
Peng Tao71cdd402011-07-30 20:52:56 -0400427
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400428 header->res.count = header->args.count;
Fred Isaman650e2d32011-07-30 20:52:54 -0400429out:
Fred Isaman650e2d32011-07-30 20:52:54 -0400430 bl_submit_bio(WRITE, bio);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500431 blk_finish_plug(&plug);
Fred Isaman650e2d32011-07-30 20:52:54 -0400432 put_parallel(par);
433 return PNFS_ATTEMPTED;
Fred Isaman155e7522011-07-30 20:52:39 -0400434}
435
436static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
437{
438 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
Christoph Hellwig80672532014-09-10 08:23:34 -0700439 int err;
Fred Isaman155e7522011-07-30 20:52:39 -0400440
441 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -0700442
443 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
444 WARN_ON(err);
445
Fred Isaman155e7522011-07-30 20:52:39 -0400446 kfree(bl);
447}
448
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100449static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
450 gfp_t gfp_flags, bool is_scsi_layout)
Fred Isaman155e7522011-07-30 20:52:39 -0400451{
452 struct pnfs_block_layout *bl;
453
454 dprintk("%s enter\n", __func__);
455 bl = kzalloc(sizeof(*bl), gfp_flags);
456 if (!bl)
457 return NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700458
459 bl->bl_ext_rw = RB_ROOT;
460 bl->bl_ext_ro = RB_ROOT;
Fred Isaman155e7522011-07-30 20:52:39 -0400461 spin_lock_init(&bl->bl_ext_lock);
Christoph Hellwig80672532014-09-10 08:23:34 -0700462
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100463 bl->bl_scsi_layout = is_scsi_layout;
Fred Isaman155e7522011-07-30 20:52:39 -0400464 return &bl->bl_layout;
465}
466
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100467static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
468 gfp_t gfp_flags)
469{
470 return __bl_alloc_layout_hdr(inode, gfp_flags, false);
471}
472
473static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
474 gfp_t gfp_flags)
475{
476 return __bl_alloc_layout_hdr(inode, gfp_flags, true);
477}
478
Fred Isamana60d2eb2011-07-30 20:52:44 -0400479static void bl_free_lseg(struct pnfs_layout_segment *lseg)
Fred Isaman155e7522011-07-30 20:52:39 -0400480{
Fred Isamana60d2eb2011-07-30 20:52:44 -0400481 dprintk("%s enter\n", __func__);
482 kfree(lseg);
Fred Isaman155e7522011-07-30 20:52:39 -0400483}
484
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700485/* Tracks info needed to ensure extents in layout obey constraints of spec */
486struct layout_verification {
487 u32 mode; /* R or RW */
488 u64 start; /* Expected start of next non-COW extent */
489 u64 inval; /* Start of INVAL coverage */
490 u64 cowread; /* End of COW read coverage */
491};
492
493/* Verify the extent meets the layout requirements of the pnfs-block draft,
494 * section 2.3.1.
495 */
496static int verify_extent(struct pnfs_block_extent *be,
497 struct layout_verification *lv)
498{
499 if (lv->mode == IOMODE_READ) {
500 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
501 be->be_state == PNFS_BLOCK_INVALID_DATA)
502 return -EIO;
503 if (be->be_f_offset != lv->start)
504 return -EIO;
505 lv->start += be->be_length;
506 return 0;
507 }
508 /* lv->mode == IOMODE_RW */
509 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
510 if (be->be_f_offset != lv->start)
511 return -EIO;
512 if (lv->cowread > lv->start)
513 return -EIO;
514 lv->start += be->be_length;
515 lv->inval = lv->start;
516 return 0;
517 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
518 if (be->be_f_offset != lv->start)
519 return -EIO;
520 lv->start += be->be_length;
521 return 0;
522 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
523 if (be->be_f_offset > lv->start)
524 return -EIO;
525 if (be->be_f_offset < lv->inval)
526 return -EIO;
527 if (be->be_f_offset < lv->cowread)
528 return -EIO;
529 /* It looks like you might want to min this with lv->start,
530 * but you really don't.
531 */
532 lv->inval = lv->inval + be->be_length;
533 lv->cowread = be->be_f_offset + be->be_length;
534 return 0;
535 } else
536 return -EIO;
537}
538
539static int decode_sector_number(__be32 **rp, sector_t *sp)
540{
541 uint64_t s;
542
543 *rp = xdr_decode_hyper(*rp, &s);
544 if (s & 0x1ff) {
545 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
546 return -1;
547 }
548 *sp = s >> SECTOR_SHIFT;
549 return 0;
550}
551
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700552static int
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700553bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
554 struct layout_verification *lv, struct list_head *extents,
555 gfp_t gfp_mask)
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700556{
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700557 struct pnfs_block_extent *be;
558 struct nfs4_deviceid id;
559 int error;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700560 __be32 *p;
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700561
562 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
563 if (!p)
564 return -EIO;
565
566 be = kzalloc(sizeof(*be), GFP_NOFS);
567 if (!be)
568 return -ENOMEM;
569
570 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
571 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
572
573 error = -EIO;
574 be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
575 lo->plh_lc_cred, gfp_mask);
576 if (!be->be_device)
577 goto out_free_be;
578
579 /*
580 * The next three values are read in as bytes, but stored in the
581 * extent structure in 512-byte granularity.
582 */
583 if (decode_sector_number(&p, &be->be_f_offset) < 0)
584 goto out_put_deviceid;
585 if (decode_sector_number(&p, &be->be_length) < 0)
586 goto out_put_deviceid;
587 if (decode_sector_number(&p, &be->be_v_offset) < 0)
588 goto out_put_deviceid;
589 be->be_state = be32_to_cpup(p++);
590
591 error = verify_extent(be, lv);
592 if (error) {
593 dprintk("%s: extent verification failed\n", __func__);
594 goto out_put_deviceid;
595 }
596
597 list_add_tail(&be->be_list, extents);
598 return 0;
599
600out_put_deviceid:
601 nfs4_put_deviceid_node(be->be_device);
602out_free_be:
603 kfree(be);
604 return error;
605}
606
607static struct pnfs_layout_segment *
608bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
609 gfp_t gfp_mask)
610{
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700611 struct layout_verification lv = {
612 .mode = lgr->range.iomode,
613 .start = lgr->range.offset >> SECTOR_SHIFT,
614 .inval = lgr->range.offset >> SECTOR_SHIFT,
615 .cowread = lgr->range.offset >> SECTOR_SHIFT,
616 };
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700617 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
618 struct pnfs_layout_segment *lseg;
619 struct xdr_buf buf;
620 struct xdr_stream xdr;
621 struct page *scratch;
622 int status, i;
623 uint32_t count;
624 __be32 *p;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700625 LIST_HEAD(extents);
626
627 dprintk("---> %s\n", __func__);
628
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700629 lseg = kzalloc(sizeof(*lseg), gfp_mask);
630 if (!lseg)
631 return ERR_PTR(-ENOMEM);
632
633 status = -ENOMEM;
634 scratch = alloc_page(gfp_mask);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700635 if (!scratch)
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700636 goto out;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700637
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700638 xdr_init_decode_pages(&xdr, &buf,
639 lgr->layoutp->pages, lgr->layoutp->len);
640 xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700641
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700642 status = -EIO;
643 p = xdr_inline_decode(&xdr, 4);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700644 if (unlikely(!p))
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700645 goto out_free_scratch;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700646
647 count = be32_to_cpup(p++);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700648 dprintk("%s: number of extents %d\n", __func__, count);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700649
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700650 /*
651 * Decode individual extents, putting them in temporary staging area
652 * until whole layout is decoded to make error recovery easier.
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700653 */
654 for (i = 0; i < count; i++) {
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700655 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
656 if (status)
657 goto process_extents;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700658 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700659
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700660 if (lgr->range.offset + lgr->range.length !=
661 lv.start << SECTOR_SHIFT) {
662 dprintk("%s Final length mismatch\n", __func__);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700663 status = -EIO;
664 goto process_extents;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700665 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700666
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700667 if (lv.start < lv.cowread) {
668 dprintk("%s Final uncovered COW extent\n", __func__);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700669 status = -EIO;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700670 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700671
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700672process_extents:
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700673 while (!list_empty(&extents)) {
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700674 struct pnfs_block_extent *be =
675 list_first_entry(&extents, struct pnfs_block_extent,
676 be_list);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700677 list_del(&be->be_list);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700678
679 if (!status)
680 status = ext_tree_insert(bl, be);
681
682 if (status) {
683 nfs4_put_deviceid_node(be->be_device);
684 kfree(be);
685 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700686 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700687
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700688out_free_scratch:
689 __free_page(scratch);
690out:
691 dprintk("%s returns %d\n", __func__, status);
Fred Isamana60d2eb2011-07-30 20:52:44 -0400692 if (status) {
Fred Isamana60d2eb2011-07-30 20:52:44 -0400693 kfree(lseg);
694 return ERR_PTR(status);
695 }
696 return lseg;
Fred Isaman155e7522011-07-30 20:52:39 -0400697}
698
699static void
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700700bl_return_range(struct pnfs_layout_hdr *lo,
701 struct pnfs_layout_range *range)
702{
703 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
704 sector_t offset = range->offset >> SECTOR_SHIFT, end;
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700705
706 if (range->offset % 8) {
707 dprintk("%s: offset %lld not block size aligned\n",
708 __func__, range->offset);
709 return;
710 }
711
712 if (range->length != NFS4_MAX_UINT64) {
713 if (range->length % 8) {
714 dprintk("%s: length %lld not block size aligned\n",
715 __func__, range->length);
716 return;
717 }
718
719 end = offset + (range->length >> SECTOR_SHIFT);
720 } else {
721 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
722 }
723
Trond Myklebust164ae582014-09-12 13:25:14 -0400724 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700725}
726
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700727static int
728bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
Fred Isaman155e7522011-07-30 20:52:39 -0400729{
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700730 return ext_tree_prepare_commit(arg);
Fred Isaman155e7522011-07-30 20:52:39 -0400731}
732
733static void
734bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
735{
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700736 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
Fred Isaman155e7522011-07-30 20:52:39 -0400737}
738
739static int
740bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
741{
742 dprintk("%s enter\n", __func__);
Fred Isaman2f9fd182011-07-30 20:52:46 -0400743
744 if (server->pnfs_blksize == 0) {
745 dprintk("%s Server did not return blksize\n", __func__);
746 return -EINVAL;
747 }
Christoph Hellwige3aaf7f2014-08-21 11:09:26 -0500748 if (server->pnfs_blksize > PAGE_SIZE) {
749 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
750 __func__, server->pnfs_blksize);
751 return -EINVAL;
752 }
753
Christoph Hellwigd4b18c32014-09-10 17:36:31 -0700754 return 0;
Fred Isaman155e7522011-07-30 20:52:39 -0400755}
756
Peng Taof742dc42012-08-24 00:27:52 +0800757static bool
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700758is_aligned_req(struct nfs_pageio_descriptor *pgio,
Kinglong Meef35592a2016-02-13 21:51:31 +0800759 struct nfs_page *req, unsigned int alignment, bool is_write)
Peng Taof742dc42012-08-24 00:27:52 +0800760{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700761 /*
762 * Always accept buffered writes, higher layers take care of the
763 * right alignment.
764 */
765 if (pgio->pg_dreq == NULL)
766 return true;
767
768 if (!IS_ALIGNED(req->wb_offset, alignment))
769 return false;
770
771 if (IS_ALIGNED(req->wb_bytes, alignment))
772 return true;
773
Kinglong Meef35592a2016-02-13 21:51:31 +0800774 if (is_write &&
775 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700776 /*
777 * If the write goes up to the inode size, just write
778 * the full page. Data past the inode size is
779 * guaranteed to be zeroed by the higher level client
780 * code, and this behaviour is mandated by RFC 5663
781 * section 2.3.2.
782 */
783 return true;
784 }
785
786 return false;
Peng Taof742dc42012-08-24 00:27:52 +0800787}
788
789static void
790bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
791{
Kinglong Meef35592a2016-02-13 21:51:31 +0800792 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
Peng Taof742dc42012-08-24 00:27:52 +0800793 nfs_pageio_reset_read_mds(pgio);
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700794 return;
795 }
796
797 pnfs_generic_pg_init_read(pgio, req);
Peng Taof742dc42012-08-24 00:27:52 +0800798}
799
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400800/*
801 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
802 * of bytes (maximum @req->wb_bytes) that can be coalesced.
803 */
804static size_t
Peng Taof742dc42012-08-24 00:27:52 +0800805bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
806 struct nfs_page *req)
807{
Kinglong Meef35592a2016-02-13 21:51:31 +0800808 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400809 return 0;
Peng Taof742dc42012-08-24 00:27:52 +0800810 return pnfs_generic_pg_test(pgio, prev, req);
811}
812
Peng Tao62965562012-09-25 14:55:57 +0800813/*
814 * Return the number of contiguous bytes for a given inode
815 * starting at page frame idx.
816 */
817static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
818{
819 struct address_space *mapping = inode->i_mapping;
820 pgoff_t end;
821
822 /* Optimize common case that writes from 0 to end of file */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300823 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Christoph Hellwig6a74c0c2014-11-24 16:47:02 -0500824 if (end != inode->i_mapping->nrpages) {
Peng Tao62965562012-09-25 14:55:57 +0800825 rcu_read_lock();
Johannes Weinere7b563b2014-04-03 14:47:44 -0700826 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
Peng Tao62965562012-09-25 14:55:57 +0800827 rcu_read_unlock();
828 }
829
830 if (!end)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300831 return i_size_read(inode) - (idx << PAGE_SHIFT);
Peng Tao62965562012-09-25 14:55:57 +0800832 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300833 return (end - idx) << PAGE_SHIFT;
Peng Tao62965562012-09-25 14:55:57 +0800834}
835
Trond Myklebust6f018ef2012-10-02 08:29:14 -0700836static void
Peng Tao96c9eae2012-08-24 00:27:53 +0800837bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
838{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700839 u64 wb_size;
Peng Tao62965562012-09-25 14:55:57 +0800840
Kinglong Meef35592a2016-02-13 21:51:31 +0800841 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700842 nfs_pageio_reset_write_mds(pgio);
843 return;
Peng Tao62965562012-09-25 14:55:57 +0800844 }
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700845
846 if (pgio->pg_dreq == NULL)
847 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
848 req->wb_index);
849 else
850 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
851
852 pnfs_generic_pg_init_write(pgio, req, wb_size);
Peng Tao96c9eae2012-08-24 00:27:53 +0800853}
854
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400855/*
856 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
857 * of bytes (maximum @req->wb_bytes) that can be coalesced.
858 */
859static size_t
Peng Tao96c9eae2012-08-24 00:27:53 +0800860bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
861 struct nfs_page *req)
862{
Kinglong Meef35592a2016-02-13 21:51:31 +0800863 if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400864 return 0;
Peng Tao96c9eae2012-08-24 00:27:53 +0800865 return pnfs_generic_pg_test(pgio, prev, req);
866}
867
Benny Halevye9643fe2011-07-30 20:52:40 -0400868static const struct nfs_pageio_ops bl_pg_read_ops = {
Peng Taof742dc42012-08-24 00:27:52 +0800869 .pg_init = bl_pg_init_read,
870 .pg_test = bl_pg_test_read,
Benny Halevye9643fe2011-07-30 20:52:40 -0400871 .pg_doio = pnfs_generic_pg_readpages,
Weston Andros Adamson180bb5e2014-09-10 15:48:01 -0400872 .pg_cleanup = pnfs_generic_pg_cleanup,
Benny Halevye9643fe2011-07-30 20:52:40 -0400873};
874
875static const struct nfs_pageio_ops bl_pg_write_ops = {
Peng Tao96c9eae2012-08-24 00:27:53 +0800876 .pg_init = bl_pg_init_write,
877 .pg_test = bl_pg_test_write,
Benny Halevye9643fe2011-07-30 20:52:40 -0400878 .pg_doio = pnfs_generic_pg_writepages,
Weston Andros Adamson180bb5e2014-09-10 15:48:01 -0400879 .pg_cleanup = pnfs_generic_pg_cleanup,
Benny Halevye9643fe2011-07-30 20:52:40 -0400880};
881
Fred Isaman155e7522011-07-30 20:52:39 -0400882static struct pnfs_layoutdriver_type blocklayout_type = {
883 .id = LAYOUT_BLOCK_VOLUME,
884 .name = "LAYOUT_BLOCK_VOLUME",
fanchaoting5a12cca2013-02-04 21:15:02 +0800885 .owner = THIS_MODULE,
Christoph Hellwig848746b2014-09-10 08:23:36 -0700886 .flags = PNFS_LAYOUTRET_ON_SETATTR |
887 PNFS_READ_WHOLE_PAGE,
Fred Isaman155e7522011-07-30 20:52:39 -0400888 .read_pagelist = bl_read_pagelist,
889 .write_pagelist = bl_write_pagelist,
890 .alloc_layout_hdr = bl_alloc_layout_hdr,
891 .free_layout_hdr = bl_free_layout_hdr,
892 .alloc_lseg = bl_alloc_lseg,
893 .free_lseg = bl_free_lseg,
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700894 .return_range = bl_return_range,
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700895 .prepare_layoutcommit = bl_prepare_layoutcommit,
Fred Isaman155e7522011-07-30 20:52:39 -0400896 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
897 .set_layoutdriver = bl_set_layoutdriver,
Christoph Hellwig20d655d2014-09-02 21:28:00 -0700898 .alloc_deviceid_node = bl_alloc_deviceid_node,
899 .free_deviceid_node = bl_free_deviceid_node,
Benny Halevye9643fe2011-07-30 20:52:40 -0400900 .pg_read_ops = &bl_pg_read_ops,
901 .pg_write_ops = &bl_pg_write_ops,
Trond Myklebust5bb89b42015-03-25 14:14:42 -0400902 .sync = pnfs_generic_sync,
Fred Isaman155e7522011-07-30 20:52:39 -0400903};
904
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100905static struct pnfs_layoutdriver_type scsilayout_type = {
906 .id = LAYOUT_SCSI,
907 .name = "LAYOUT_SCSI",
908 .owner = THIS_MODULE,
909 .flags = PNFS_LAYOUTRET_ON_SETATTR |
910 PNFS_READ_WHOLE_PAGE,
911 .read_pagelist = bl_read_pagelist,
912 .write_pagelist = bl_write_pagelist,
913 .alloc_layout_hdr = sl_alloc_layout_hdr,
914 .free_layout_hdr = bl_free_layout_hdr,
915 .alloc_lseg = bl_alloc_lseg,
916 .free_lseg = bl_free_lseg,
917 .return_range = bl_return_range,
918 .prepare_layoutcommit = bl_prepare_layoutcommit,
919 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
920 .set_layoutdriver = bl_set_layoutdriver,
921 .alloc_deviceid_node = bl_alloc_deviceid_node,
922 .free_deviceid_node = bl_free_deviceid_node,
923 .pg_read_ops = &bl_pg_read_ops,
924 .pg_write_ops = &bl_pg_write_ops,
925 .sync = pnfs_generic_sync,
926};
927
928
Fred Isaman155e7522011-07-30 20:52:39 -0400929static int __init nfs4blocklayout_init(void)
930{
931 int ret;
932
933 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
934
Christoph Hellwig871760c2014-09-10 17:37:26 -0700935 ret = bl_init_pipefs();
Stanislav Kinsbursky9e2e74d2012-01-10 17:04:24 +0400936 if (ret)
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100937 goto out;
938
939 ret = pnfs_register_layoutdriver(&blocklayout_type);
940 if (ret)
941 goto out_cleanup_pipe;
942
943 ret = pnfs_register_layoutdriver(&scsilayout_type);
944 if (ret)
945 goto out_unregister_block;
Christoph Hellwig871760c2014-09-10 17:37:26 -0700946 return 0;
Jim Reesfe0a9b72011-07-30 20:52:42 -0400947
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100948out_unregister_block:
Jim Reesfe0a9b72011-07-30 20:52:42 -0400949 pnfs_unregister_layoutdriver(&blocklayout_type);
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100950out_cleanup_pipe:
951 bl_cleanup_pipefs();
Christoph Hellwig871760c2014-09-10 17:37:26 -0700952out:
Fred Isaman155e7522011-07-30 20:52:39 -0400953 return ret;
954}
955
956static void __exit nfs4blocklayout_exit(void)
957{
958 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
959 __func__);
960
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100961 pnfs_unregister_layoutdriver(&scsilayout_type);
Fred Isaman155e7522011-07-30 20:52:39 -0400962 pnfs_unregister_layoutdriver(&blocklayout_type);
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100963 bl_cleanup_pipefs();
Fred Isaman155e7522011-07-30 20:52:39 -0400964}
965
966MODULE_ALIAS("nfs-layouttype4-3");
967
968module_init(nfs4blocklayout_init);
969module_exit(nfs4blocklayout_exit);