blob: 9cd4eb3a1e22718946ebc35fa39c75477d16889b [file] [log] [blame]
Fred Isaman155e7522011-07-30 20:52:39 -04001/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
Fred Isaman9549ec02011-07-30 20:52:53 -040032
Fred Isaman155e7522011-07-30 20:52:39 -040033#include <linux/module.h>
34#include <linux/init.h>
Jim Reesfe0a9b72011-07-30 20:52:42 -040035#include <linux/mount.h>
36#include <linux/namei.h>
Fred Isaman9549ec02011-07-30 20:52:53 -040037#include <linux/bio.h> /* struct bio */
Heiko Carstens88c9e422011-08-02 09:57:35 +020038#include <linux/prefetch.h>
Peng Tao62965562012-09-25 14:55:57 +080039#include <linux/pagevec.h>
Fred Isaman155e7522011-07-30 20:52:39 -040040
Jim Rees10bd2952012-04-09 22:33:39 -040041#include "../pnfs.h"
Trond Myklebust76e697b2012-11-26 14:20:49 -050042#include "../nfs4session.h"
Jim Rees10bd2952012-04-09 22:33:39 -040043#include "../internal.h"
Fred Isaman155e7522011-07-30 20:52:39 -040044#include "blocklayout.h"
45
46#define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
Christoph Hellwig80672532014-09-10 08:23:34 -070052static bool is_hole(struct pnfs_block_extent *be)
Fred Isaman9549ec02011-07-30 20:52:53 -040053{
Christoph Hellwig80672532014-09-10 08:23:34 -070054 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
Fred Isaman650e2d32011-07-30 20:52:54 -040062}
63
Fred Isaman9549ec02011-07-30 20:52:53 -040064/* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67struct parallel_io {
68 struct kref refcnt;
Christoph Hellwig80672532014-09-10 08:23:34 -070069 void (*pnfs_callback) (void *data);
Fred Isaman9549ec02011-07-30 20:52:53 -040070 void *data;
71};
72
73static inline struct parallel_io *alloc_parallel(void *data)
74{
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83}
84
85static inline void get_parallel(struct parallel_io *p)
86{
87 kref_get(&p->refcnt);
88}
89
90static void destroy_parallel(struct kref *kref)
91{
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -070095 p->pnfs_callback(p->data);
Fred Isaman9549ec02011-07-30 20:52:53 -040096 kfree(p);
97}
98
99static inline void put_parallel(struct parallel_io *p)
100{
101 kref_put(&p->refcnt, destroy_parallel);
102}
103
104static struct bio *
105bl_submit_bio(int rw, struct bio *bio)
106{
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700110 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
111 (unsigned long long)bio->bi_iter.bi_sector);
Fred Isaman9549ec02011-07-30 20:52:53 -0400112 submit_bio(rw, bio);
113 }
114 return NULL;
115}
116
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700117static struct bio *
118bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200119 bio_end_io_t end_io, struct parallel_io *par)
Fred Isaman9549ec02011-07-30 20:52:53 -0400120{
121 struct bio *bio;
122
Peng Tao74a6eeb2012-01-12 23:18:48 +0800123 npg = min(npg, BIO_MAX_PAGES);
Fred Isaman9549ec02011-07-30 20:52:53 -0400124 bio = bio_alloc(GFP_NOIO, npg);
Peng Tao74a6eeb2012-01-12 23:18:48 +0800125 if (!bio && (current->flags & PF_MEMALLOC)) {
126 while (!bio && (npg /= 2))
127 bio = bio_alloc(GFP_NOIO, npg);
128 }
Fred Isaman9549ec02011-07-30 20:52:53 -0400129
Peng Tao74a6eeb2012-01-12 23:18:48 +0800130 if (bio) {
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700131 bio->bi_iter.bi_sector = disk_sector;
132 bio->bi_bdev = bdev;
Peng Tao74a6eeb2012-01-12 23:18:48 +0800133 bio->bi_end_io = end_io;
134 bio->bi_private = par;
135 }
Fred Isaman9549ec02011-07-30 20:52:53 -0400136 return bio;
137}
138
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700139static struct bio *
140do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
141 struct page *page, struct pnfs_block_dev_map *map,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200142 struct pnfs_block_extent *be, bio_end_io_t end_io,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700143 struct parallel_io *par, unsigned int offset, int *len)
Fred Isaman9549ec02011-07-30 20:52:53 -0400144{
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700145 struct pnfs_block_dev *dev =
146 container_of(be->be_device, struct pnfs_block_dev, node);
147 u64 disk_addr, end;
148
Peng Taofe6e1e82012-08-24 00:27:51 +0800149 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700150 npg, rw, (unsigned long long)isect, offset, *len);
151
152 /* translate to device offset */
153 isect += be->be_v_offset;
154 isect -= be->be_f_offset;
155
156 /* translate to physical disk offset */
157 disk_addr = (u64)isect << SECTOR_SHIFT;
158 if (disk_addr < map->start || disk_addr >= map->start + map->len) {
159 if (!dev->map(dev, disk_addr, map))
160 return ERR_PTR(-EIO);
161 bio = bl_submit_bio(rw, bio);
162 }
163 disk_addr += map->disk_offset;
164 disk_addr -= map->start;
165
166 /* limit length to what the device mapping allows */
167 end = disk_addr + *len;
168 if (end >= map->start + map->len)
169 *len = map->start + map->len - disk_addr;
170
Fred Isaman9549ec02011-07-30 20:52:53 -0400171retry:
172 if (!bio) {
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700173 bio = bl_alloc_init_bio(npg, map->bdev,
174 disk_addr >> SECTOR_SHIFT, end_io, par);
Fred Isaman9549ec02011-07-30 20:52:53 -0400175 if (!bio)
176 return ERR_PTR(-ENOMEM);
177 }
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700178 if (bio_add_page(bio, page, *len, offset) < *len) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400179 bio = bl_submit_bio(rw, bio);
180 goto retry;
181 }
182 return bio;
183}
184
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200185static void bl_end_io_read(struct bio *bio)
Fred Isaman9549ec02011-07-30 20:52:53 -0400186{
187 struct parallel_io *par = bio->bi_private;
Fred Isaman9549ec02011-07-30 20:52:53 -0400188
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200189 if (bio->bi_error) {
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400190 struct nfs_pgio_header *header = par->data;
Fred Isamancd841602012-04-20 14:47:44 -0400191
192 if (!header->pnfs_error)
193 header->pnfs_error = -EIO;
194 pnfs_set_lo_fail(header->lseg);
Fred Isaman9549ec02011-07-30 20:52:53 -0400195 }
Christoph Hellwig8c792ea2014-09-10 08:23:33 -0700196
Fred Isaman9549ec02011-07-30 20:52:53 -0400197 bio_put(bio);
198 put_parallel(par);
199}
200
201static void bl_read_cleanup(struct work_struct *work)
202{
203 struct rpc_task *task;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400204 struct nfs_pgio_header *hdr;
Fred Isaman9549ec02011-07-30 20:52:53 -0400205 dprintk("%s enter\n", __func__);
206 task = container_of(work, struct rpc_task, u.tk_work);
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400207 hdr = container_of(task, struct nfs_pgio_header, task);
208 pnfs_ld_read_done(hdr);
Fred Isaman9549ec02011-07-30 20:52:53 -0400209}
210
211static void
Christoph Hellwig80672532014-09-10 08:23:34 -0700212bl_end_par_io_read(void *data)
Fred Isaman9549ec02011-07-30 20:52:53 -0400213{
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400214 struct nfs_pgio_header *hdr = data;
Fred Isaman9549ec02011-07-30 20:52:53 -0400215
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400216 hdr->task.tk_status = hdr->pnfs_error;
217 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
218 schedule_work(&hdr->task.u.tk_work);
Fred Isaman9549ec02011-07-30 20:52:53 -0400219}
220
Fred Isaman155e7522011-07-30 20:52:39 -0400221static enum pnfs_try_status
Christoph Hellwig80672532014-09-10 08:23:34 -0700222bl_read_pagelist(struct nfs_pgio_header *header)
Fred Isaman155e7522011-07-30 20:52:39 -0400223{
Christoph Hellwig80672532014-09-10 08:23:34 -0700224 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700225 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
Fred Isaman9549ec02011-07-30 20:52:53 -0400226 struct bio *bio = NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700227 struct pnfs_block_extent be;
Fred Isaman9549ec02011-07-30 20:52:53 -0400228 sector_t isect, extent_length = 0;
229 struct parallel_io *par;
Christoph Hellwig80672532014-09-10 08:23:34 -0700230 loff_t f_offset = header->args.offset;
231 size_t bytes_left = header->args.count;
Peng Taof742dc42012-08-24 00:27:52 +0800232 unsigned int pg_offset, pg_len;
Christoph Hellwig80672532014-09-10 08:23:34 -0700233 struct page **pages = header->args.pages;
234 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
Peng Taof742dc42012-08-24 00:27:52 +0800235 const bool is_dio = (header->dreq != NULL);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500236 struct blk_plug plug;
Christoph Hellwig80672532014-09-10 08:23:34 -0700237 int i;
Fred Isaman9549ec02011-07-30 20:52:53 -0400238
Trond Myklebust6f008662012-03-20 14:12:46 -0400239 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
Christoph Hellwig80672532014-09-10 08:23:34 -0700240 header->page_array.npages, f_offset,
241 (unsigned int)header->args.count);
Fred Isaman9549ec02011-07-30 20:52:53 -0400242
Christoph Hellwig80672532014-09-10 08:23:34 -0700243 par = alloc_parallel(header);
Fred Isaman9549ec02011-07-30 20:52:53 -0400244 if (!par)
Christoph Hellwig80672532014-09-10 08:23:34 -0700245 return PNFS_NOT_ATTEMPTED;
Fred Isaman9549ec02011-07-30 20:52:53 -0400246 par->pnfs_callback = bl_end_par_io_read;
Fred Isaman9549ec02011-07-30 20:52:53 -0400247
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500248 blk_start_plug(&plug);
249
Fred Isaman9549ec02011-07-30 20:52:53 -0400250 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
251 /* Code assumes extents are page-aligned */
Christoph Hellwig80672532014-09-10 08:23:34 -0700252 for (i = pg_index; i < header->page_array.npages; i++) {
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500253 if (extent_length <= 0) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400254 /* We've used up the previous extent */
Fred Isaman9549ec02011-07-30 20:52:53 -0400255 bio = bl_submit_bio(READ, bio);
Christoph Hellwig80672532014-09-10 08:23:34 -0700256
Fred Isaman9549ec02011-07-30 20:52:53 -0400257 /* Get the next one */
Christoph Hellwig80672532014-09-10 08:23:34 -0700258 if (!ext_tree_lookup(bl, isect, &be, false)) {
Fred Isamancd841602012-04-20 14:47:44 -0400259 header->pnfs_error = -EIO;
Fred Isaman9549ec02011-07-30 20:52:53 -0400260 goto out;
261 }
Christoph Hellwig80672532014-09-10 08:23:34 -0700262 extent_length = be.be_length - (isect - be.be_f_offset);
Fred Isaman9549ec02011-07-30 20:52:53 -0400263 }
Peng Taof742dc42012-08-24 00:27:52 +0800264
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700265 pg_offset = f_offset & ~PAGE_CACHE_MASK;
Peng Taof742dc42012-08-24 00:27:52 +0800266 if (is_dio) {
Peng Taof742dc42012-08-24 00:27:52 +0800267 if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
268 pg_len = PAGE_CACHE_SIZE - pg_offset;
269 else
270 pg_len = bytes_left;
Peng Taof742dc42012-08-24 00:27:52 +0800271 } else {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700272 BUG_ON(pg_offset != 0);
Peng Taof742dc42012-08-24 00:27:52 +0800273 pg_len = PAGE_CACHE_SIZE;
274 }
275
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700276 isect += (pg_offset >> SECTOR_SHIFT);
277 extent_length -= (pg_offset >> SECTOR_SHIFT);
278
Christoph Hellwig80672532014-09-10 08:23:34 -0700279 if (is_hole(&be)) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400280 bio = bl_submit_bio(READ, bio);
281 /* Fill hole w/ zeroes w/o accessing device */
282 dprintk("%s Zeroing page for hole\n", __func__);
Peng Taof742dc42012-08-24 00:27:52 +0800283 zero_user_segment(pages[i], pg_offset, pg_len);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700284
285 /* invalidate map */
286 map.start = NFS4_MAX_UINT64;
Fred Isaman9549ec02011-07-30 20:52:53 -0400287 } else {
Weston Andros Adamson823b0c92014-06-09 11:48:34 -0400288 bio = do_add_page_to_bio(bio,
Christoph Hellwig80672532014-09-10 08:23:34 -0700289 header->page_array.npages - i,
Fred Isaman30dd3742012-04-20 14:47:45 -0400290 READ,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700291 isect, pages[i], &map, &be,
Peng Taof742dc42012-08-24 00:27:52 +0800292 bl_end_io_read, par,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700293 pg_offset, &pg_len);
Fred Isaman9549ec02011-07-30 20:52:53 -0400294 if (IS_ERR(bio)) {
Fred Isamancd841602012-04-20 14:47:44 -0400295 header->pnfs_error = PTR_ERR(bio);
Peng Taoe6d05a72011-09-22 21:50:16 -0400296 bio = NULL;
Fred Isaman9549ec02011-07-30 20:52:53 -0400297 goto out;
298 }
299 }
Peng Taof742dc42012-08-24 00:27:52 +0800300 isect += (pg_len >> SECTOR_SHIFT);
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500301 extent_length -= (pg_len >> SECTOR_SHIFT);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700302 f_offset += pg_len;
303 bytes_left -= pg_len;
Fred Isaman9549ec02011-07-30 20:52:53 -0400304 }
Fred Isamancd841602012-04-20 14:47:44 -0400305 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
Christoph Hellwig80672532014-09-10 08:23:34 -0700306 header->res.eof = 1;
307 header->res.count = header->inode->i_size - header->args.offset;
Fred Isaman9549ec02011-07-30 20:52:53 -0400308 } else {
Christoph Hellwig80672532014-09-10 08:23:34 -0700309 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
Fred Isaman9549ec02011-07-30 20:52:53 -0400310 }
311out:
Fred Isaman9549ec02011-07-30 20:52:53 -0400312 bl_submit_bio(READ, bio);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500313 blk_finish_plug(&plug);
Fred Isaman9549ec02011-07-30 20:52:53 -0400314 put_parallel(par);
315 return PNFS_ATTEMPTED;
Fred Isaman31e63062011-07-30 20:52:55 -0400316}
317
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200318static void bl_end_io_write(struct bio *bio)
Fred Isaman155e7522011-07-30 20:52:39 -0400319{
Fred Isaman650e2d32011-07-30 20:52:54 -0400320 struct parallel_io *par = bio->bi_private;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400321 struct nfs_pgio_header *header = par->data;
Fred Isaman650e2d32011-07-30 20:52:54 -0400322
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200323 if (bio->bi_error) {
Fred Isamancd841602012-04-20 14:47:44 -0400324 if (!header->pnfs_error)
325 header->pnfs_error = -EIO;
326 pnfs_set_lo_fail(header->lseg);
Fred Isaman650e2d32011-07-30 20:52:54 -0400327 }
328 bio_put(bio);
329 put_parallel(par);
330}
331
332/* Function scheduled for call during bl_end_par_io_write,
333 * it marks sectors as written and extends the commitlist.
334 */
335static void bl_write_cleanup(struct work_struct *work)
336{
Christoph Hellwig80672532014-09-10 08:23:34 -0700337 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
338 struct nfs_pgio_header *hdr =
339 container_of(task, struct nfs_pgio_header, task);
340
Fred Isaman650e2d32011-07-30 20:52:54 -0400341 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -0700342
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400343 if (likely(!hdr->pnfs_error)) {
Christoph Hellwig80672532014-09-10 08:23:34 -0700344 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
345 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
346 u64 end = (hdr->args.offset + hdr->args.count +
347 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
348
349 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
350 (end - start) >> SECTOR_SHIFT);
Fred Isaman31e63062011-07-30 20:52:55 -0400351 }
Christoph Hellwig80672532014-09-10 08:23:34 -0700352
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400353 pnfs_ld_write_done(hdr);
Fred Isaman650e2d32011-07-30 20:52:54 -0400354}
355
356/* Called when last of bios associated with a bl_write_pagelist call finishes */
Christoph Hellwig80672532014-09-10 08:23:34 -0700357static void bl_end_par_io_write(void *data)
Fred Isaman650e2d32011-07-30 20:52:54 -0400358{
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400359 struct nfs_pgio_header *hdr = data;
Fred Isaman650e2d32011-07-30 20:52:54 -0400360
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400361 hdr->task.tk_status = hdr->pnfs_error;
Weston Andros Adamsonc65e6252014-06-09 11:48:36 -0400362 hdr->verf.committed = NFS_FILE_SYNC;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400363 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
364 schedule_work(&hdr->task.u.tk_work);
Fred Isaman650e2d32011-07-30 20:52:54 -0400365}
366
367static enum pnfs_try_status
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400368bl_write_pagelist(struct nfs_pgio_header *header, int sync)
Fred Isaman650e2d32011-07-30 20:52:54 -0400369{
Christoph Hellwig80672532014-09-10 08:23:34 -0700370 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700371 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
Fred Isaman650e2d32011-07-30 20:52:54 -0400372 struct bio *bio = NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700373 struct pnfs_block_extent be;
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700374 sector_t isect, extent_length = 0;
Peng Tao96c9eae2012-08-24 00:27:53 +0800375 struct parallel_io *par = NULL;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400376 loff_t offset = header->args.offset;
377 size_t count = header->args.count;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400378 struct page **pages = header->args.pages;
Jan Karab283f942014-10-21 13:32:10 +0200379 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700380 unsigned int pg_len;
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500381 struct blk_plug plug;
Christoph Hellwig80672532014-09-10 08:23:34 -0700382 int i;
Fred Isaman650e2d32011-07-30 20:52:54 -0400383
384 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
Peng Tao96c9eae2012-08-24 00:27:53 +0800385
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400386 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
Peng Tao71cdd402011-07-30 20:52:56 -0400387 * We want to write each, and if there is an error set pnfs_error
388 * to have it redone using nfs.
Fred Isaman650e2d32011-07-30 20:52:54 -0400389 */
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400390 par = alloc_parallel(header);
Fred Isaman650e2d32011-07-30 20:52:54 -0400391 if (!par)
Christoph Hellwig80672532014-09-10 08:23:34 -0700392 return PNFS_NOT_ATTEMPTED;
Fred Isaman650e2d32011-07-30 20:52:54 -0400393 par->pnfs_callback = bl_end_par_io_write;
Fred Isaman650e2d32011-07-30 20:52:54 -0400394
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700395 blk_start_plug(&plug);
Peng Tao71cdd402011-07-30 20:52:56 -0400396
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700397 /* we always write out the whole page */
398 offset = offset & (loff_t)PAGE_CACHE_MASK;
399 isect = offset >> SECTOR_SHIFT;
Peng Tao71cdd402011-07-30 20:52:56 -0400400
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400401 for (i = pg_index; i < header->page_array.npages; i++) {
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500402 if (extent_length <= 0) {
Fred Isaman650e2d32011-07-30 20:52:54 -0400403 /* We've used up the previous extent */
Fred Isaman650e2d32011-07-30 20:52:54 -0400404 bio = bl_submit_bio(WRITE, bio);
405 /* Get the next one */
Christoph Hellwig80672532014-09-10 08:23:34 -0700406 if (!ext_tree_lookup(bl, isect, &be, true)) {
Fred Isamancd841602012-04-20 14:47:44 -0400407 header->pnfs_error = -EINVAL;
Fred Isaman650e2d32011-07-30 20:52:54 -0400408 goto out;
409 }
Peng Taofe6e1e82012-08-24 00:27:51 +0800410
Christoph Hellwig80672532014-09-10 08:23:34 -0700411 extent_length = be.be_length - (isect - be.be_f_offset);
Peng Tao71cdd402011-07-30 20:52:56 -0400412 }
Peng Taofe6e1e82012-08-24 00:27:51 +0800413
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700414 pg_len = PAGE_CACHE_SIZE;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400415 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700416 WRITE, isect, pages[i], &map, &be,
Peng Taofe6e1e82012-08-24 00:27:51 +0800417 bl_end_io_write, par,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700418 0, &pg_len);
Peng Tao71cdd402011-07-30 20:52:56 -0400419 if (IS_ERR(bio)) {
Fred Isamancd841602012-04-20 14:47:44 -0400420 header->pnfs_error = PTR_ERR(bio);
Peng Taoe6d05a72011-09-22 21:50:16 -0400421 bio = NULL;
Peng Tao71cdd402011-07-30 20:52:56 -0400422 goto out;
Fred Isaman650e2d32011-07-30 20:52:54 -0400423 }
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700424
425 offset += pg_len;
426 count -= pg_len;
427 isect += (pg_len >> SECTOR_SHIFT);
428 extent_length -= (pg_len >> SECTOR_SHIFT);
Fred Isaman650e2d32011-07-30 20:52:54 -0400429 }
Peng Tao71cdd402011-07-30 20:52:56 -0400430
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400431 header->res.count = header->args.count;
Fred Isaman650e2d32011-07-30 20:52:54 -0400432out:
Fred Isaman650e2d32011-07-30 20:52:54 -0400433 bl_submit_bio(WRITE, bio);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500434 blk_finish_plug(&plug);
Fred Isaman650e2d32011-07-30 20:52:54 -0400435 put_parallel(par);
436 return PNFS_ATTEMPTED;
Fred Isaman155e7522011-07-30 20:52:39 -0400437}
438
439static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
440{
441 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
Christoph Hellwig80672532014-09-10 08:23:34 -0700442 int err;
Fred Isaman155e7522011-07-30 20:52:39 -0400443
444 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -0700445
446 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
447 WARN_ON(err);
448
Fred Isaman155e7522011-07-30 20:52:39 -0400449 kfree(bl);
450}
451
452static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
453 gfp_t gfp_flags)
454{
455 struct pnfs_block_layout *bl;
456
457 dprintk("%s enter\n", __func__);
458 bl = kzalloc(sizeof(*bl), gfp_flags);
459 if (!bl)
460 return NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700461
462 bl->bl_ext_rw = RB_ROOT;
463 bl->bl_ext_ro = RB_ROOT;
Fred Isaman155e7522011-07-30 20:52:39 -0400464 spin_lock_init(&bl->bl_ext_lock);
Christoph Hellwig80672532014-09-10 08:23:34 -0700465
Fred Isaman155e7522011-07-30 20:52:39 -0400466 return &bl->bl_layout;
467}
468
Fred Isamana60d2eb2011-07-30 20:52:44 -0400469static void bl_free_lseg(struct pnfs_layout_segment *lseg)
Fred Isaman155e7522011-07-30 20:52:39 -0400470{
Fred Isamana60d2eb2011-07-30 20:52:44 -0400471 dprintk("%s enter\n", __func__);
472 kfree(lseg);
Fred Isaman155e7522011-07-30 20:52:39 -0400473}
474
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700475/* Tracks info needed to ensure extents in layout obey constraints of spec */
476struct layout_verification {
477 u32 mode; /* R or RW */
478 u64 start; /* Expected start of next non-COW extent */
479 u64 inval; /* Start of INVAL coverage */
480 u64 cowread; /* End of COW read coverage */
481};
482
483/* Verify the extent meets the layout requirements of the pnfs-block draft,
484 * section 2.3.1.
485 */
486static int verify_extent(struct pnfs_block_extent *be,
487 struct layout_verification *lv)
488{
489 if (lv->mode == IOMODE_READ) {
490 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
491 be->be_state == PNFS_BLOCK_INVALID_DATA)
492 return -EIO;
493 if (be->be_f_offset != lv->start)
494 return -EIO;
495 lv->start += be->be_length;
496 return 0;
497 }
498 /* lv->mode == IOMODE_RW */
499 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
500 if (be->be_f_offset != lv->start)
501 return -EIO;
502 if (lv->cowread > lv->start)
503 return -EIO;
504 lv->start += be->be_length;
505 lv->inval = lv->start;
506 return 0;
507 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
508 if (be->be_f_offset != lv->start)
509 return -EIO;
510 lv->start += be->be_length;
511 return 0;
512 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
513 if (be->be_f_offset > lv->start)
514 return -EIO;
515 if (be->be_f_offset < lv->inval)
516 return -EIO;
517 if (be->be_f_offset < lv->cowread)
518 return -EIO;
519 /* It looks like you might want to min this with lv->start,
520 * but you really don't.
521 */
522 lv->inval = lv->inval + be->be_length;
523 lv->cowread = be->be_f_offset + be->be_length;
524 return 0;
525 } else
526 return -EIO;
527}
528
529static int decode_sector_number(__be32 **rp, sector_t *sp)
530{
531 uint64_t s;
532
533 *rp = xdr_decode_hyper(*rp, &s);
534 if (s & 0x1ff) {
535 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
536 return -1;
537 }
538 *sp = s >> SECTOR_SHIFT;
539 return 0;
540}
541
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700542static int
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700543bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
544 struct layout_verification *lv, struct list_head *extents,
545 gfp_t gfp_mask)
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700546{
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700547 struct pnfs_block_extent *be;
548 struct nfs4_deviceid id;
549 int error;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700550 __be32 *p;
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700551
552 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
553 if (!p)
554 return -EIO;
555
556 be = kzalloc(sizeof(*be), GFP_NOFS);
557 if (!be)
558 return -ENOMEM;
559
560 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
561 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
562
563 error = -EIO;
564 be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
565 lo->plh_lc_cred, gfp_mask);
566 if (!be->be_device)
567 goto out_free_be;
568
569 /*
570 * The next three values are read in as bytes, but stored in the
571 * extent structure in 512-byte granularity.
572 */
573 if (decode_sector_number(&p, &be->be_f_offset) < 0)
574 goto out_put_deviceid;
575 if (decode_sector_number(&p, &be->be_length) < 0)
576 goto out_put_deviceid;
577 if (decode_sector_number(&p, &be->be_v_offset) < 0)
578 goto out_put_deviceid;
579 be->be_state = be32_to_cpup(p++);
580
581 error = verify_extent(be, lv);
582 if (error) {
583 dprintk("%s: extent verification failed\n", __func__);
584 goto out_put_deviceid;
585 }
586
587 list_add_tail(&be->be_list, extents);
588 return 0;
589
590out_put_deviceid:
591 nfs4_put_deviceid_node(be->be_device);
592out_free_be:
593 kfree(be);
594 return error;
595}
596
597static struct pnfs_layout_segment *
598bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
599 gfp_t gfp_mask)
600{
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700601 struct layout_verification lv = {
602 .mode = lgr->range.iomode,
603 .start = lgr->range.offset >> SECTOR_SHIFT,
604 .inval = lgr->range.offset >> SECTOR_SHIFT,
605 .cowread = lgr->range.offset >> SECTOR_SHIFT,
606 };
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700607 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
608 struct pnfs_layout_segment *lseg;
609 struct xdr_buf buf;
610 struct xdr_stream xdr;
611 struct page *scratch;
612 int status, i;
613 uint32_t count;
614 __be32 *p;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700615 LIST_HEAD(extents);
616
617 dprintk("---> %s\n", __func__);
618
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700619 lseg = kzalloc(sizeof(*lseg), gfp_mask);
620 if (!lseg)
621 return ERR_PTR(-ENOMEM);
622
623 status = -ENOMEM;
624 scratch = alloc_page(gfp_mask);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700625 if (!scratch)
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700626 goto out;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700627
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700628 xdr_init_decode_pages(&xdr, &buf,
629 lgr->layoutp->pages, lgr->layoutp->len);
630 xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700631
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700632 status = -EIO;
633 p = xdr_inline_decode(&xdr, 4);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700634 if (unlikely(!p))
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700635 goto out_free_scratch;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700636
637 count = be32_to_cpup(p++);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700638 dprintk("%s: number of extents %d\n", __func__, count);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700639
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700640 /*
641 * Decode individual extents, putting them in temporary staging area
642 * until whole layout is decoded to make error recovery easier.
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700643 */
644 for (i = 0; i < count; i++) {
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700645 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
646 if (status)
647 goto process_extents;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700648 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700649
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700650 if (lgr->range.offset + lgr->range.length !=
651 lv.start << SECTOR_SHIFT) {
652 dprintk("%s Final length mismatch\n", __func__);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700653 status = -EIO;
654 goto process_extents;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700655 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700656
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700657 if (lv.start < lv.cowread) {
658 dprintk("%s Final uncovered COW extent\n", __func__);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700659 status = -EIO;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700660 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700661
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700662process_extents:
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700663 while (!list_empty(&extents)) {
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700664 struct pnfs_block_extent *be =
665 list_first_entry(&extents, struct pnfs_block_extent,
666 be_list);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700667 list_del(&be->be_list);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700668
669 if (!status)
670 status = ext_tree_insert(bl, be);
671
672 if (status) {
673 nfs4_put_deviceid_node(be->be_device);
674 kfree(be);
675 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700676 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700677
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700678out_free_scratch:
679 __free_page(scratch);
680out:
681 dprintk("%s returns %d\n", __func__, status);
Fred Isamana60d2eb2011-07-30 20:52:44 -0400682 if (status) {
Fred Isamana60d2eb2011-07-30 20:52:44 -0400683 kfree(lseg);
684 return ERR_PTR(status);
685 }
686 return lseg;
Fred Isaman155e7522011-07-30 20:52:39 -0400687}
688
689static void
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700690bl_return_range(struct pnfs_layout_hdr *lo,
691 struct pnfs_layout_range *range)
692{
693 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
694 sector_t offset = range->offset >> SECTOR_SHIFT, end;
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700695
696 if (range->offset % 8) {
697 dprintk("%s: offset %lld not block size aligned\n",
698 __func__, range->offset);
699 return;
700 }
701
702 if (range->length != NFS4_MAX_UINT64) {
703 if (range->length % 8) {
704 dprintk("%s: length %lld not block size aligned\n",
705 __func__, range->length);
706 return;
707 }
708
709 end = offset + (range->length >> SECTOR_SHIFT);
710 } else {
711 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
712 }
713
Trond Myklebust164ae582014-09-12 13:25:14 -0400714 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700715}
716
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700717static int
718bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
Fred Isaman155e7522011-07-30 20:52:39 -0400719{
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700720 return ext_tree_prepare_commit(arg);
Fred Isaman155e7522011-07-30 20:52:39 -0400721}
722
723static void
724bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
725{
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700726 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
Fred Isaman155e7522011-07-30 20:52:39 -0400727}
728
729static int
730bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
731{
732 dprintk("%s enter\n", __func__);
Fred Isaman2f9fd182011-07-30 20:52:46 -0400733
734 if (server->pnfs_blksize == 0) {
735 dprintk("%s Server did not return blksize\n", __func__);
736 return -EINVAL;
737 }
Christoph Hellwige3aaf7f2014-08-21 11:09:26 -0500738 if (server->pnfs_blksize > PAGE_SIZE) {
739 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
740 __func__, server->pnfs_blksize);
741 return -EINVAL;
742 }
743
Christoph Hellwigd4b18c32014-09-10 17:36:31 -0700744 return 0;
Fred Isaman155e7522011-07-30 20:52:39 -0400745}
746
Peng Taof742dc42012-08-24 00:27:52 +0800747static bool
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700748is_aligned_req(struct nfs_pageio_descriptor *pgio,
749 struct nfs_page *req, unsigned int alignment)
Peng Taof742dc42012-08-24 00:27:52 +0800750{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700751 /*
752 * Always accept buffered writes, higher layers take care of the
753 * right alignment.
754 */
755 if (pgio->pg_dreq == NULL)
756 return true;
757
758 if (!IS_ALIGNED(req->wb_offset, alignment))
759 return false;
760
761 if (IS_ALIGNED(req->wb_bytes, alignment))
762 return true;
763
764 if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
765 /*
766 * If the write goes up to the inode size, just write
767 * the full page. Data past the inode size is
768 * guaranteed to be zeroed by the higher level client
769 * code, and this behaviour is mandated by RFC 5663
770 * section 2.3.2.
771 */
772 return true;
773 }
774
775 return false;
Peng Taof742dc42012-08-24 00:27:52 +0800776}
777
778static void
779bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
780{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700781 if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
Peng Taof742dc42012-08-24 00:27:52 +0800782 nfs_pageio_reset_read_mds(pgio);
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700783 return;
784 }
785
786 pnfs_generic_pg_init_read(pgio, req);
Peng Taof742dc42012-08-24 00:27:52 +0800787}
788
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400789/*
790 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
791 * of bytes (maximum @req->wb_bytes) that can be coalesced.
792 */
793static size_t
Peng Taof742dc42012-08-24 00:27:52 +0800794bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
795 struct nfs_page *req)
796{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700797 if (!is_aligned_req(pgio, req, SECTOR_SIZE))
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400798 return 0;
Peng Taof742dc42012-08-24 00:27:52 +0800799 return pnfs_generic_pg_test(pgio, prev, req);
800}
801
Peng Tao62965562012-09-25 14:55:57 +0800802/*
803 * Return the number of contiguous bytes for a given inode
804 * starting at page frame idx.
805 */
806static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
807{
808 struct address_space *mapping = inode->i_mapping;
809 pgoff_t end;
810
811 /* Optimize common case that writes from 0 to end of file */
812 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
Christoph Hellwig6a74c0c2014-11-24 16:47:02 -0500813 if (end != inode->i_mapping->nrpages) {
Peng Tao62965562012-09-25 14:55:57 +0800814 rcu_read_lock();
Johannes Weinere7b563b2014-04-03 14:47:44 -0700815 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
Peng Tao62965562012-09-25 14:55:57 +0800816 rcu_read_unlock();
817 }
818
819 if (!end)
820 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
821 else
822 return (end - idx) << PAGE_CACHE_SHIFT;
823}
824
Trond Myklebust6f018ef2012-10-02 08:29:14 -0700825static void
Peng Tao96c9eae2012-08-24 00:27:53 +0800826bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
827{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700828 u64 wb_size;
Peng Tao62965562012-09-25 14:55:57 +0800829
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700830 if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
831 nfs_pageio_reset_write_mds(pgio);
832 return;
Peng Tao62965562012-09-25 14:55:57 +0800833 }
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700834
835 if (pgio->pg_dreq == NULL)
836 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
837 req->wb_index);
838 else
839 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
840
841 pnfs_generic_pg_init_write(pgio, req, wb_size);
Peng Tao96c9eae2012-08-24 00:27:53 +0800842}
843
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400844/*
845 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
846 * of bytes (maximum @req->wb_bytes) that can be coalesced.
847 */
848static size_t
Peng Tao96c9eae2012-08-24 00:27:53 +0800849bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
850 struct nfs_page *req)
851{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700852 if (!is_aligned_req(pgio, req, PAGE_SIZE))
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400853 return 0;
Peng Tao96c9eae2012-08-24 00:27:53 +0800854 return pnfs_generic_pg_test(pgio, prev, req);
855}
856
Benny Halevye9643fe2011-07-30 20:52:40 -0400857static const struct nfs_pageio_ops bl_pg_read_ops = {
Peng Taof742dc42012-08-24 00:27:52 +0800858 .pg_init = bl_pg_init_read,
859 .pg_test = bl_pg_test_read,
Benny Halevye9643fe2011-07-30 20:52:40 -0400860 .pg_doio = pnfs_generic_pg_readpages,
Weston Andros Adamson180bb5e2014-09-10 15:48:01 -0400861 .pg_cleanup = pnfs_generic_pg_cleanup,
Benny Halevye9643fe2011-07-30 20:52:40 -0400862};
863
864static const struct nfs_pageio_ops bl_pg_write_ops = {
Peng Tao96c9eae2012-08-24 00:27:53 +0800865 .pg_init = bl_pg_init_write,
866 .pg_test = bl_pg_test_write,
Benny Halevye9643fe2011-07-30 20:52:40 -0400867 .pg_doio = pnfs_generic_pg_writepages,
Weston Andros Adamson180bb5e2014-09-10 15:48:01 -0400868 .pg_cleanup = pnfs_generic_pg_cleanup,
Benny Halevye9643fe2011-07-30 20:52:40 -0400869};
870
Fred Isaman155e7522011-07-30 20:52:39 -0400871static struct pnfs_layoutdriver_type blocklayout_type = {
872 .id = LAYOUT_BLOCK_VOLUME,
873 .name = "LAYOUT_BLOCK_VOLUME",
fanchaoting5a12cca2013-02-04 21:15:02 +0800874 .owner = THIS_MODULE,
Christoph Hellwig848746b2014-09-10 08:23:36 -0700875 .flags = PNFS_LAYOUTRET_ON_SETATTR |
876 PNFS_READ_WHOLE_PAGE,
Fred Isaman155e7522011-07-30 20:52:39 -0400877 .read_pagelist = bl_read_pagelist,
878 .write_pagelist = bl_write_pagelist,
879 .alloc_layout_hdr = bl_alloc_layout_hdr,
880 .free_layout_hdr = bl_free_layout_hdr,
881 .alloc_lseg = bl_alloc_lseg,
882 .free_lseg = bl_free_lseg,
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700883 .return_range = bl_return_range,
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700884 .prepare_layoutcommit = bl_prepare_layoutcommit,
Fred Isaman155e7522011-07-30 20:52:39 -0400885 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
886 .set_layoutdriver = bl_set_layoutdriver,
Christoph Hellwig20d655d2014-09-02 21:28:00 -0700887 .alloc_deviceid_node = bl_alloc_deviceid_node,
888 .free_deviceid_node = bl_free_deviceid_node,
Benny Halevye9643fe2011-07-30 20:52:40 -0400889 .pg_read_ops = &bl_pg_read_ops,
890 .pg_write_ops = &bl_pg_write_ops,
Trond Myklebust5bb89b42015-03-25 14:14:42 -0400891 .sync = pnfs_generic_sync,
Fred Isaman155e7522011-07-30 20:52:39 -0400892};
893
894static int __init nfs4blocklayout_init(void)
895{
896 int ret;
897
898 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
899
900 ret = pnfs_register_layoutdriver(&blocklayout_type);
Jim Reesfe0a9b72011-07-30 20:52:42 -0400901 if (ret)
902 goto out;
Christoph Hellwig871760c2014-09-10 17:37:26 -0700903 ret = bl_init_pipefs();
Stanislav Kinsbursky9e2e74d2012-01-10 17:04:24 +0400904 if (ret)
Christoph Hellwig871760c2014-09-10 17:37:26 -0700905 goto out_unregister;
906 return 0;
Jim Reesfe0a9b72011-07-30 20:52:42 -0400907
Christoph Hellwig871760c2014-09-10 17:37:26 -0700908out_unregister:
Jim Reesfe0a9b72011-07-30 20:52:42 -0400909 pnfs_unregister_layoutdriver(&blocklayout_type);
Christoph Hellwig871760c2014-09-10 17:37:26 -0700910out:
Fred Isaman155e7522011-07-30 20:52:39 -0400911 return ret;
912}
913
914static void __exit nfs4blocklayout_exit(void)
915{
916 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
917 __func__);
918
Christoph Hellwig871760c2014-09-10 17:37:26 -0700919 bl_cleanup_pipefs();
Fred Isaman155e7522011-07-30 20:52:39 -0400920 pnfs_unregister_layoutdriver(&blocklayout_type);
Fred Isaman155e7522011-07-30 20:52:39 -0400921}
922
923MODULE_ALIAS("nfs-layouttype4-3");
924
925module_init(nfs4blocklayout_init);
926module_exit(nfs4blocklayout_exit);