blob: 0f1410c94827e95d1985eba0cd4b7f42d42712ee [file] [log] [blame]
Tom Haynesd67ae822014-12-11 17:02:04 -05001/*
2 * Module for pnfs flexfile layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9#include <linux/nfs_fs.h>
10#include <linux/nfs_page.h>
11#include <linux/module.h>
12
13#include <linux/sunrpc/metrics.h>
Tom Haynesd67ae822014-12-11 17:02:04 -050014
15#include "flexfilelayout.h"
16#include "../nfs4session.h"
Anna Schumaker40c64c22015-04-15 13:00:05 -040017#include "../nfs4idmap.h"
Tom Haynesd67ae822014-12-11 17:02:04 -050018#include "../internal.h"
19#include "../delegation.h"
20#include "../nfs4trace.h"
21#include "../iostat.h"
22#include "../nfs.h"
23
24#define NFSDBG_FACILITY NFSDBG_PNFS_LD
25
26#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
27
28static struct pnfs_layout_hdr *
29ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
30{
31 struct nfs4_flexfile_layout *ffl;
32
33 ffl = kzalloc(sizeof(*ffl), gfp_flags);
34 if (ffl) {
35 INIT_LIST_HEAD(&ffl->error_list);
36 return &ffl->generic_hdr;
37 } else
38 return NULL;
39}
40
41static void
42ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
43{
44 struct nfs4_ff_layout_ds_err *err, *n;
45
46 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
47 list) {
48 list_del(&err->list);
49 kfree(err);
50 }
51 kfree(FF_LAYOUT_FROM_HDR(lo));
52}
53
54static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
55{
56 __be32 *p;
57
58 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
59 if (unlikely(p == NULL))
60 return -ENOBUFS;
61 memcpy(stateid, p, NFS4_STATEID_SIZE);
62 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
63 p[0], p[1], p[2], p[3]);
64 return 0;
65}
66
67static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
68{
69 __be32 *p;
70
71 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
72 if (unlikely(!p))
73 return -ENOBUFS;
74 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
75 nfs4_print_deviceid(devid);
76 return 0;
77}
78
79static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
80{
81 __be32 *p;
82
83 p = xdr_inline_decode(xdr, 4);
84 if (unlikely(!p))
85 return -ENOBUFS;
86 fh->size = be32_to_cpup(p++);
87 if (fh->size > sizeof(struct nfs_fh)) {
88 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
89 fh->size);
90 return -EOVERFLOW;
91 }
92 /* fh.data */
93 p = xdr_inline_decode(xdr, fh->size);
94 if (unlikely(!p))
95 return -ENOBUFS;
96 memcpy(&fh->data, p, fh->size);
97 dprintk("%s: fh len %d\n", __func__, fh->size);
98
99 return 0;
100}
101
102/*
103 * Currently only stringified uids and gids are accepted.
104 * I.e., kerberos is not supported to the DSes, so no pricipals.
105 *
106 * That means that one common function will suffice, but when
107 * principals are added, this should be split to accomodate
108 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
109 */
110static int
111decode_name(struct xdr_stream *xdr, u32 *id)
112{
113 __be32 *p;
114 int len;
115
116 /* opaque_length(4)*/
117 p = xdr_inline_decode(xdr, 4);
118 if (unlikely(!p))
119 return -ENOBUFS;
120 len = be32_to_cpup(p++);
121 if (len < 0)
122 return -EINVAL;
123
124 dprintk("%s: len %u\n", __func__, len);
125
126 /* opaque body */
127 p = xdr_inline_decode(xdr, len);
128 if (unlikely(!p))
129 return -ENOBUFS;
130
131 if (!nfs_map_string_to_numeric((char *)p, len, id))
132 return -EINVAL;
133
134 return 0;
135}
136
137static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
138{
139 int i;
140
141 if (fls->mirror_array) {
142 for (i = 0; i < fls->mirror_array_cnt; i++) {
143 /* normally mirror_ds is freed in
144 * .free_deviceid_node but we still do it here
145 * for .alloc_lseg error path */
146 if (fls->mirror_array[i]) {
147 kfree(fls->mirror_array[i]->fh_versions);
148 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
149 kfree(fls->mirror_array[i]);
150 }
151 }
152 kfree(fls->mirror_array);
153 fls->mirror_array = NULL;
154 }
155}
156
157static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
158{
159 int ret = 0;
160
161 dprintk("--> %s\n", __func__);
162
163 /* FIXME: remove this check when layout segment support is added */
164 if (lgr->range.offset != 0 ||
165 lgr->range.length != NFS4_MAX_UINT64) {
166 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
167 __func__);
168 ret = -EINVAL;
169 }
170
171 dprintk("--> %s returns %d\n", __func__, ret);
172 return ret;
173}
174
175static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
176{
177 if (fls) {
178 ff_layout_free_mirror_array(fls);
179 kfree(fls);
180 }
181}
182
183static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
184{
Tom Haynesd67ae822014-12-11 17:02:04 -0500185 int i, j;
186
187 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
188 for (j = i + 1; j < fls->mirror_array_cnt; j++)
189 if (fls->mirror_array[i]->efficiency <
Fabian Frederick455b6ee2015-06-12 18:58:50 +0200190 fls->mirror_array[j]->efficiency)
191 swap(fls->mirror_array[i],
192 fls->mirror_array[j]);
Tom Haynesd67ae822014-12-11 17:02:04 -0500193 }
194}
195
196static struct pnfs_layout_segment *
197ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
198 struct nfs4_layoutget_res *lgr,
199 gfp_t gfp_flags)
200{
201 struct pnfs_layout_segment *ret;
202 struct nfs4_ff_layout_segment *fls = NULL;
203 struct xdr_stream stream;
204 struct xdr_buf buf;
205 struct page *scratch;
206 u64 stripe_unit;
207 u32 mirror_array_cnt;
208 __be32 *p;
209 int i, rc;
210
211 dprintk("--> %s\n", __func__);
212 scratch = alloc_page(gfp_flags);
213 if (!scratch)
214 return ERR_PTR(-ENOMEM);
215
216 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
217 lgr->layoutp->len);
218 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
219
220 /* stripe unit and mirror_array_cnt */
221 rc = -EIO;
222 p = xdr_inline_decode(&stream, 8 + 4);
223 if (!p)
224 goto out_err_free;
225
226 p = xdr_decode_hyper(p, &stripe_unit);
227 mirror_array_cnt = be32_to_cpup(p++);
228 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
229 stripe_unit, mirror_array_cnt);
230
231 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
232 mirror_array_cnt == 0)
233 goto out_err_free;
234
235 rc = -ENOMEM;
236 fls = kzalloc(sizeof(*fls), gfp_flags);
237 if (!fls)
238 goto out_err_free;
239
240 fls->mirror_array_cnt = mirror_array_cnt;
241 fls->stripe_unit = stripe_unit;
242 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
243 sizeof(fls->mirror_array[0]), gfp_flags);
244 if (fls->mirror_array == NULL)
245 goto out_err_free;
246
247 for (i = 0; i < fls->mirror_array_cnt; i++) {
248 struct nfs4_deviceid devid;
249 struct nfs4_deviceid_node *idnode;
250 u32 ds_count;
251 u32 fh_count;
252 int j;
253
254 rc = -EIO;
255 p = xdr_inline_decode(&stream, 4);
256 if (!p)
257 goto out_err_free;
258 ds_count = be32_to_cpup(p);
259
260 /* FIXME: allow for striping? */
261 if (ds_count != 1)
262 goto out_err_free;
263
264 fls->mirror_array[i] =
265 kzalloc(sizeof(struct nfs4_ff_layout_mirror),
266 gfp_flags);
267 if (fls->mirror_array[i] == NULL) {
268 rc = -ENOMEM;
269 goto out_err_free;
270 }
271
272 spin_lock_init(&fls->mirror_array[i]->lock);
273 fls->mirror_array[i]->ds_count = ds_count;
274
275 /* deviceid */
276 rc = decode_deviceid(&stream, &devid);
277 if (rc)
278 goto out_err_free;
279
280 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
281 &devid, lh->plh_lc_cred,
282 gfp_flags);
283 /*
284 * upon success, mirror_ds is allocated by previous
285 * getdeviceinfo, or newly by .alloc_deviceid_node
286 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
287 */
288 if (idnode)
289 fls->mirror_array[i]->mirror_ds =
290 FF_LAYOUT_MIRROR_DS(idnode);
291 else
292 goto out_err_free;
293
294 /* efficiency */
295 rc = -EIO;
296 p = xdr_inline_decode(&stream, 4);
297 if (!p)
298 goto out_err_free;
299 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
300
301 /* stateid */
302 rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
303 if (rc)
304 goto out_err_free;
305
306 /* fh */
307 p = xdr_inline_decode(&stream, 4);
308 if (!p)
309 goto out_err_free;
310 fh_count = be32_to_cpup(p);
311
312 fls->mirror_array[i]->fh_versions =
313 kzalloc(fh_count * sizeof(struct nfs_fh),
314 gfp_flags);
315 if (fls->mirror_array[i]->fh_versions == NULL) {
316 rc = -ENOMEM;
317 goto out_err_free;
318 }
319
320 for (j = 0; j < fh_count; j++) {
321 rc = decode_nfs_fh(&stream,
322 &fls->mirror_array[i]->fh_versions[j]);
323 if (rc)
324 goto out_err_free;
325 }
326
327 fls->mirror_array[i]->fh_versions_cnt = fh_count;
328
329 /* user */
330 rc = decode_name(&stream, &fls->mirror_array[i]->uid);
331 if (rc)
332 goto out_err_free;
333
334 /* group */
335 rc = decode_name(&stream, &fls->mirror_array[i]->gid);
336 if (rc)
337 goto out_err_free;
338
339 dprintk("%s: uid %d gid %d\n", __func__,
340 fls->mirror_array[i]->uid,
341 fls->mirror_array[i]->gid);
342 }
343
344 ff_layout_sort_mirrors(fls);
345 rc = ff_layout_check_layout(lgr);
346 if (rc)
347 goto out_err_free;
348
349 ret = &fls->generic_hdr;
350 dprintk("<-- %s (success)\n", __func__);
351out_free_page:
352 __free_page(scratch);
353 return ret;
354out_err_free:
355 _ff_layout_free_lseg(fls);
356 ret = ERR_PTR(rc);
357 dprintk("<-- %s (%d)\n", __func__, rc);
358 goto out_free_page;
359}
360
361static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
362{
363 struct pnfs_layout_segment *lseg;
364
365 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
366 if (lseg->pls_range.iomode == IOMODE_RW)
367 return true;
368
369 return false;
370}
371
372static void
373ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
374{
375 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
376 int i;
377
378 dprintk("--> %s\n", __func__);
379
380 for (i = 0; i < fls->mirror_array_cnt; i++) {
381 if (fls->mirror_array[i]) {
382 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
383 fls->mirror_array[i]->mirror_ds = NULL;
384 if (fls->mirror_array[i]->cred) {
385 put_rpccred(fls->mirror_array[i]->cred);
386 fls->mirror_array[i]->cred = NULL;
387 }
388 }
389 }
390
391 if (lseg->pls_range.iomode == IOMODE_RW) {
392 struct nfs4_flexfile_layout *ffl;
393 struct inode *inode;
394
395 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
396 inode = ffl->generic_hdr.plh_inode;
397 spin_lock(&inode->i_lock);
398 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
399 ffl->commit_info.nbuckets = 0;
400 kfree(ffl->commit_info.buckets);
401 ffl->commit_info.buckets = NULL;
402 }
403 spin_unlock(&inode->i_lock);
404 }
405 _ff_layout_free_lseg(fls);
406}
407
408/* Return 1 until we have multiple lsegs support */
409static int
410ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
411{
412 return 1;
413}
414
415static int
416ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
417 struct nfs_commit_info *cinfo,
418 gfp_t gfp_flags)
419{
420 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
421 struct pnfs_commit_bucket *buckets;
422 int size;
423
424 if (cinfo->ds->nbuckets != 0) {
425 /* This assumes there is only one RW lseg per file.
426 * To support multiple lseg per file, we need to
427 * change struct pnfs_commit_bucket to allow dynamic
428 * increasing nbuckets.
429 */
430 return 0;
431 }
432
433 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
434
435 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
436 gfp_flags);
437 if (!buckets)
438 return -ENOMEM;
439 else {
440 int i;
441
442 spin_lock(cinfo->lock);
443 if (cinfo->ds->nbuckets != 0)
444 kfree(buckets);
445 else {
446 cinfo->ds->buckets = buckets;
447 cinfo->ds->nbuckets = size;
448 for (i = 0; i < size; i++) {
449 INIT_LIST_HEAD(&buckets[i].written);
450 INIT_LIST_HEAD(&buckets[i].committing);
451 /* mark direct verifier as unset */
452 buckets[i].direct_verf.committed =
453 NFS_INVALID_STABLE_HOW;
454 }
455 }
456 spin_unlock(cinfo->lock);
457 return 0;
458 }
459}
460
461static struct nfs4_pnfs_ds *
462ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
463 int *best_idx)
464{
465 struct nfs4_ff_layout_segment *fls;
466 struct nfs4_pnfs_ds *ds;
467 int idx;
468
469 fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
470 /* mirrors are sorted by efficiency */
471 for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
472 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
473 if (ds) {
474 *best_idx = idx;
475 return ds;
476 }
477 }
478
479 return NULL;
480}
481
482static void
483ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
484 struct nfs_page *req)
485{
486 struct nfs_pgio_mirror *pgm;
487 struct nfs4_ff_layout_mirror *mirror;
488 struct nfs4_pnfs_ds *ds;
489 int ds_idx;
490
491 /* Use full layout for now */
492 if (!pgio->pg_lseg)
493 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
494 req->wb_context,
495 0,
496 NFS4_MAX_UINT64,
497 IOMODE_READ,
498 GFP_KERNEL);
499 /* If no lseg, fall back to read through mds */
500 if (pgio->pg_lseg == NULL)
501 goto out_mds;
502
503 ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
504 if (!ds)
505 goto out_mds;
506 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
507
508 pgio->pg_mirror_idx = ds_idx;
509
510 /* read always uses only one mirror - idx 0 for pgio layer */
511 pgm = &pgio->pg_mirrors[0];
512 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
513
514 return;
515out_mds:
516 pnfs_put_lseg(pgio->pg_lseg);
517 pgio->pg_lseg = NULL;
518 nfs_pageio_reset_read_mds(pgio);
519}
520
521static void
522ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
523 struct nfs_page *req)
524{
525 struct nfs4_ff_layout_mirror *mirror;
526 struct nfs_pgio_mirror *pgm;
527 struct nfs_commit_info cinfo;
528 struct nfs4_pnfs_ds *ds;
529 int i;
530 int status;
531
532 if (!pgio->pg_lseg)
533 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
534 req->wb_context,
535 0,
536 NFS4_MAX_UINT64,
537 IOMODE_RW,
538 GFP_NOFS);
539 /* If no lseg, fall back to write through mds */
540 if (pgio->pg_lseg == NULL)
541 goto out_mds;
542
543 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
544 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
545 if (status < 0)
546 goto out_mds;
547
548 /* Use a direct mapping of ds_idx to pgio mirror_idx */
549 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
550 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
551 goto out_mds;
552
553 for (i = 0; i < pgio->pg_mirror_count; i++) {
554 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
555 if (!ds)
556 goto out_mds;
557 pgm = &pgio->pg_mirrors[i];
558 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
559 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
560 }
561
562 return;
563
564out_mds:
565 pnfs_put_lseg(pgio->pg_lseg);
566 pgio->pg_lseg = NULL;
567 nfs_pageio_reset_write_mds(pgio);
568}
569
570static unsigned int
571ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
572 struct nfs_page *req)
573{
574 if (!pgio->pg_lseg)
575 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
576 req->wb_context,
577 0,
578 NFS4_MAX_UINT64,
579 IOMODE_RW,
580 GFP_NOFS);
581 if (pgio->pg_lseg)
582 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
583
584 /* no lseg means that pnfs is not in use, so no mirroring here */
585 pnfs_put_lseg(pgio->pg_lseg);
586 pgio->pg_lseg = NULL;
587 nfs_pageio_reset_write_mds(pgio);
588 return 1;
589}
590
591static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
592 .pg_init = ff_layout_pg_init_read,
593 .pg_test = pnfs_generic_pg_test,
594 .pg_doio = pnfs_generic_pg_readpages,
595 .pg_cleanup = pnfs_generic_pg_cleanup,
596};
597
598static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
599 .pg_init = ff_layout_pg_init_write,
600 .pg_test = pnfs_generic_pg_test,
601 .pg_doio = pnfs_generic_pg_writepages,
602 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
603 .pg_cleanup = pnfs_generic_pg_cleanup,
604};
605
606static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
607{
608 struct rpc_task *task = &hdr->task;
609
610 pnfs_layoutcommit_inode(hdr->inode, false);
611
612 if (retry_pnfs) {
613 dprintk("%s Reset task %5u for i/o through pNFS "
614 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
615 hdr->task.tk_pid,
616 hdr->inode->i_sb->s_id,
617 (unsigned long long)NFS_FILEID(hdr->inode),
618 hdr->args.count,
619 (unsigned long long)hdr->args.offset);
620
621 if (!hdr->dreq) {
622 struct nfs_open_context *ctx;
623
624 ctx = nfs_list_entry(hdr->pages.next)->wb_context;
625 set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
626 hdr->completion_ops->error_cleanup(&hdr->pages);
627 } else {
628 nfs_direct_set_resched_writes(hdr->dreq);
629 /* fake unstable write to let common nfs resend pages */
630 hdr->verf.committed = NFS_UNSTABLE;
631 hdr->good_bytes = 0;
632 }
633 return;
634 }
635
636 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
637 dprintk("%s Reset task %5u for i/o through MDS "
638 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
639 hdr->task.tk_pid,
640 hdr->inode->i_sb->s_id,
641 (unsigned long long)NFS_FILEID(hdr->inode),
642 hdr->args.count,
643 (unsigned long long)hdr->args.offset);
644
645 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
646 }
647}
648
649static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
650{
651 struct rpc_task *task = &hdr->task;
652
653 pnfs_layoutcommit_inode(hdr->inode, false);
654
655 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
656 dprintk("%s Reset task %5u for i/o through MDS "
657 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
658 hdr->task.tk_pid,
659 hdr->inode->i_sb->s_id,
660 (unsigned long long)NFS_FILEID(hdr->inode),
661 hdr->args.count,
662 (unsigned long long)hdr->args.offset);
663
664 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
665 }
666}
667
668static int ff_layout_async_handle_error_v4(struct rpc_task *task,
669 struct nfs4_state *state,
670 struct nfs_client *clp,
671 struct pnfs_layout_segment *lseg,
672 int idx)
673{
674 struct pnfs_layout_hdr *lo = lseg->pls_layout;
675 struct inode *inode = lo->plh_inode;
676 struct nfs_server *mds_server = NFS_SERVER(inode);
677
678 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
679 struct nfs_client *mds_client = mds_server->nfs_client;
680 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
681
682 if (task->tk_status >= 0)
683 return 0;
684
685 switch (task->tk_status) {
686 /* MDS state errors */
687 case -NFS4ERR_DELEG_REVOKED:
688 case -NFS4ERR_ADMIN_REVOKED:
689 case -NFS4ERR_BAD_STATEID:
690 if (state == NULL)
691 break;
692 nfs_remove_bad_delegation(state->inode);
693 case -NFS4ERR_OPENMODE:
694 if (state == NULL)
695 break;
696 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
697 goto out_bad_stateid;
698 goto wait_on_recovery;
699 case -NFS4ERR_EXPIRED:
700 if (state != NULL) {
701 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
702 goto out_bad_stateid;
703 }
704 nfs4_schedule_lease_recovery(mds_client);
705 goto wait_on_recovery;
706 /* DS session errors */
707 case -NFS4ERR_BADSESSION:
708 case -NFS4ERR_BADSLOT:
709 case -NFS4ERR_BAD_HIGH_SLOT:
710 case -NFS4ERR_DEADSESSION:
711 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
712 case -NFS4ERR_SEQ_FALSE_RETRY:
713 case -NFS4ERR_SEQ_MISORDERED:
714 dprintk("%s ERROR %d, Reset session. Exchangeid "
715 "flags 0x%x\n", __func__, task->tk_status,
716 clp->cl_exchange_flags);
717 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
718 break;
719 case -NFS4ERR_DELAY:
720 case -NFS4ERR_GRACE:
721 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
722 break;
723 case -NFS4ERR_RETRY_UNCACHED_REP:
724 break;
725 /* Invalidate Layout errors */
726 case -NFS4ERR_PNFS_NO_LAYOUT:
727 case -ESTALE: /* mapped NFS4ERR_STALE */
728 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
729 case -EISDIR: /* mapped NFS4ERR_ISDIR */
730 case -NFS4ERR_FHEXPIRED:
731 case -NFS4ERR_WRONG_TYPE:
732 dprintk("%s Invalid layout error %d\n", __func__,
733 task->tk_status);
734 /*
735 * Destroy layout so new i/o will get a new layout.
736 * Layout will not be destroyed until all current lseg
737 * references are put. Mark layout as invalid to resend failed
738 * i/o and all i/o waiting on the slot table to the MDS until
739 * layout is destroyed and a new valid layout is obtained.
740 */
741 pnfs_destroy_layout(NFS_I(inode));
742 rpc_wake_up(&tbl->slot_tbl_waitq);
743 goto reset;
744 /* RPC connection errors */
745 case -ECONNREFUSED:
746 case -EHOSTDOWN:
747 case -EHOSTUNREACH:
748 case -ENETUNREACH:
749 case -EIO:
750 case -ETIMEDOUT:
751 case -EPIPE:
752 dprintk("%s DS connection error %d\n", __func__,
753 task->tk_status);
754 nfs4_mark_deviceid_unavailable(devid);
755 rpc_wake_up(&tbl->slot_tbl_waitq);
756 /* fall through */
757 default:
758 if (ff_layout_has_available_ds(lseg))
759 return -NFS4ERR_RESET_TO_PNFS;
760reset:
761 dprintk("%s Retry through MDS. Error %d\n", __func__,
762 task->tk_status);
763 return -NFS4ERR_RESET_TO_MDS;
764 }
765out:
766 task->tk_status = 0;
767 return -EAGAIN;
768out_bad_stateid:
769 task->tk_status = -EIO;
770 return 0;
771wait_on_recovery:
772 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
773 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
774 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
775 goto out;
776}
777
778/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
779static int ff_layout_async_handle_error_v3(struct rpc_task *task,
780 struct pnfs_layout_segment *lseg,
781 int idx)
782{
783 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
784
785 if (task->tk_status >= 0)
786 return 0;
787
788 if (task->tk_status != -EJUKEBOX) {
789 dprintk("%s DS connection error %d\n", __func__,
790 task->tk_status);
791 nfs4_mark_deviceid_unavailable(devid);
792 if (ff_layout_has_available_ds(lseg))
793 return -NFS4ERR_RESET_TO_PNFS;
794 else
795 return -NFS4ERR_RESET_TO_MDS;
796 }
797
798 if (task->tk_status == -EJUKEBOX)
799 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
800 task->tk_status = 0;
801 rpc_restart_call(task);
802 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
803 return -EAGAIN;
804}
805
806static int ff_layout_async_handle_error(struct rpc_task *task,
807 struct nfs4_state *state,
808 struct nfs_client *clp,
809 struct pnfs_layout_segment *lseg,
810 int idx)
811{
812 int vers = clp->cl_nfs_mod->rpc_vers->number;
813
814 switch (vers) {
815 case 3:
816 return ff_layout_async_handle_error_v3(task, lseg, idx);
817 case 4:
818 return ff_layout_async_handle_error_v4(task, state, clp,
819 lseg, idx);
820 default:
821 /* should never happen */
822 WARN_ON_ONCE(1);
823 return 0;
824 }
825}
826
827static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
828 int idx, u64 offset, u64 length,
829 u32 status, int opnum)
830{
831 struct nfs4_ff_layout_mirror *mirror;
832 int err;
833
834 mirror = FF_LAYOUT_COMP(lseg, idx);
835 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
836 mirror, offset, length, status, opnum,
837 GFP_NOIO);
838 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
839}
840
841/* NFS_PROTO call done callback routines */
842
843static int ff_layout_read_done_cb(struct rpc_task *task,
844 struct nfs_pgio_header *hdr)
845{
846 struct inode *inode;
847 int err;
848
849 trace_nfs4_pnfs_read(hdr, task->tk_status);
850 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
851 hdr->res.op_status = NFS4ERR_NXIO;
852 if (task->tk_status < 0 && hdr->res.op_status)
853 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
854 hdr->args.offset, hdr->args.count,
855 hdr->res.op_status, OP_READ);
856 err = ff_layout_async_handle_error(task, hdr->args.context->state,
857 hdr->ds_clp, hdr->lseg,
858 hdr->pgio_mirror_idx);
859
860 switch (err) {
861 case -NFS4ERR_RESET_TO_PNFS:
862 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
863 &hdr->lseg->pls_layout->plh_flags);
864 pnfs_read_resend_pnfs(hdr);
865 return task->tk_status;
866 case -NFS4ERR_RESET_TO_MDS:
867 inode = hdr->lseg->pls_layout->plh_inode;
868 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
869 ff_layout_reset_read(hdr);
870 return task->tk_status;
871 case -EAGAIN:
872 rpc_restart_call_prepare(task);
873 return -EAGAIN;
874 }
875
876 return 0;
877}
878
879/*
880 * We reference the rpc_cred of the first WRITE that triggers the need for
881 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
882 * rfc5661 is not clear about which credential should be used.
883 *
884 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
885 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
886 * we always send layoutcommit after DS writes.
887 */
888static void
889ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
890{
Trond Myklebust67af7612015-03-25 20:40:38 -0400891 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
892 hdr->mds_offset + hdr->res.count);
Tom Haynesd67ae822014-12-11 17:02:04 -0500893 dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
894 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
895}
896
897static bool
898ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
899{
900 /* No mirroring for now */
901 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
902
903 return ff_layout_test_devid_unavailable(node);
904}
905
906static int ff_layout_read_prepare_common(struct rpc_task *task,
907 struct nfs_pgio_header *hdr)
908{
909 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
910 rpc_exit(task, -EIO);
911 return -EIO;
912 }
913 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
914 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
915 if (ff_layout_has_available_ds(hdr->lseg))
916 pnfs_read_resend_pnfs(hdr);
917 else
918 ff_layout_reset_read(hdr);
919 rpc_exit(task, 0);
920 return -EAGAIN;
921 }
922 hdr->pgio_done_cb = ff_layout_read_done_cb;
923
924 return 0;
925}
926
927/*
928 * Call ops for the async read/write cases
929 * In the case of dense layouts, the offset needs to be reset to its
930 * original value.
931 */
932static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
933{
934 struct nfs_pgio_header *hdr = data;
935
936 if (ff_layout_read_prepare_common(task, hdr))
937 return;
938
939 rpc_call_start(task);
940}
941
942static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
943 struct nfs4_sequence_args *args,
944 struct nfs4_sequence_res *res,
945 struct rpc_task *task)
946{
947 if (ds_clp->cl_session)
948 return nfs41_setup_sequence(ds_clp->cl_session,
949 args,
950 res,
951 task);
952 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
953 args,
954 res,
955 task);
956}
957
958static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
959{
960 struct nfs_pgio_header *hdr = data;
961
962 if (ff_layout_read_prepare_common(task, hdr))
963 return;
964
965 if (ff_layout_setup_sequence(hdr->ds_clp,
966 &hdr->args.seq_args,
967 &hdr->res.seq_res,
968 task))
969 return;
970
971 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
972 hdr->args.lock_context, FMODE_READ) == -EIO)
973 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
974}
975
976static void ff_layout_read_call_done(struct rpc_task *task, void *data)
977{
978 struct nfs_pgio_header *hdr = data;
979
980 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
981
982 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
983 task->tk_status == 0) {
984 nfs4_sequence_done(task, &hdr->res.seq_res);
985 return;
986 }
987
988 /* Note this may cause RPC to be resent */
989 hdr->mds_ops->rpc_call_done(task, hdr);
990}
991
992static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
993{
994 struct nfs_pgio_header *hdr = data;
995
996 rpc_count_iostats_metrics(task,
997 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
998}
999
1000static int ff_layout_write_done_cb(struct rpc_task *task,
1001 struct nfs_pgio_header *hdr)
1002{
1003 struct inode *inode;
1004 int err;
1005
1006 trace_nfs4_pnfs_write(hdr, task->tk_status);
1007 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
1008 hdr->res.op_status = NFS4ERR_NXIO;
1009 if (task->tk_status < 0 && hdr->res.op_status)
1010 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1011 hdr->args.offset, hdr->args.count,
1012 hdr->res.op_status, OP_WRITE);
1013 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1014 hdr->ds_clp, hdr->lseg,
1015 hdr->pgio_mirror_idx);
1016
1017 switch (err) {
1018 case -NFS4ERR_RESET_TO_PNFS:
1019 case -NFS4ERR_RESET_TO_MDS:
1020 inode = hdr->lseg->pls_layout->plh_inode;
1021 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1022 if (err == -NFS4ERR_RESET_TO_PNFS) {
1023 pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1024 ff_layout_reset_write(hdr, true);
1025 } else {
1026 pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1027 ff_layout_reset_write(hdr, false);
1028 }
1029 return task->tk_status;
1030 case -EAGAIN:
1031 rpc_restart_call_prepare(task);
1032 return -EAGAIN;
1033 }
1034
1035 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1036 hdr->res.verf->committed == NFS_DATA_SYNC)
1037 ff_layout_set_layoutcommit(hdr);
1038
1039 return 0;
1040}
1041
1042static int ff_layout_commit_done_cb(struct rpc_task *task,
1043 struct nfs_commit_data *data)
1044{
1045 struct inode *inode;
1046 int err;
1047
1048 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1049 if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
1050 data->res.op_status = NFS4ERR_NXIO;
1051 if (task->tk_status < 0 && data->res.op_status)
1052 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1053 data->args.offset, data->args.count,
1054 data->res.op_status, OP_COMMIT);
1055 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1056 data->lseg, data->ds_commit_index);
1057
1058 switch (err) {
1059 case -NFS4ERR_RESET_TO_PNFS:
1060 case -NFS4ERR_RESET_TO_MDS:
1061 inode = data->lseg->pls_layout->plh_inode;
1062 pnfs_error_mark_layout_for_return(inode, data->lseg);
1063 if (err == -NFS4ERR_RESET_TO_PNFS)
1064 pnfs_set_retry_layoutget(data->lseg->pls_layout);
1065 else
1066 pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1067 pnfs_generic_prepare_to_resend_writes(data);
1068 return -EAGAIN;
1069 case -EAGAIN:
1070 rpc_restart_call_prepare(task);
1071 return -EAGAIN;
1072 }
1073
1074 if (data->verf.committed == NFS_UNSTABLE)
Trond Myklebust67af7612015-03-25 20:40:38 -04001075 pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
Tom Haynesd67ae822014-12-11 17:02:04 -05001076
1077 return 0;
1078}
1079
1080static int ff_layout_write_prepare_common(struct rpc_task *task,
1081 struct nfs_pgio_header *hdr)
1082{
1083 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1084 rpc_exit(task, -EIO);
1085 return -EIO;
1086 }
1087
1088 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1089 bool retry_pnfs;
1090
1091 retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1092 dprintk("%s task %u reset io to %s\n", __func__,
1093 task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1094 ff_layout_reset_write(hdr, retry_pnfs);
1095 rpc_exit(task, 0);
1096 return -EAGAIN;
1097 }
1098
1099 return 0;
1100}
1101
1102static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1103{
1104 struct nfs_pgio_header *hdr = data;
1105
1106 if (ff_layout_write_prepare_common(task, hdr))
1107 return;
1108
1109 rpc_call_start(task);
1110}
1111
1112static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1113{
1114 struct nfs_pgio_header *hdr = data;
1115
1116 if (ff_layout_write_prepare_common(task, hdr))
1117 return;
1118
1119 if (ff_layout_setup_sequence(hdr->ds_clp,
1120 &hdr->args.seq_args,
1121 &hdr->res.seq_res,
1122 task))
1123 return;
1124
1125 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1126 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1127 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1128}
1129
1130static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1131{
1132 struct nfs_pgio_header *hdr = data;
1133
1134 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1135 task->tk_status == 0) {
1136 nfs4_sequence_done(task, &hdr->res.seq_res);
1137 return;
1138 }
1139
1140 /* Note this may cause RPC to be resent */
1141 hdr->mds_ops->rpc_call_done(task, hdr);
1142}
1143
1144static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1145{
1146 struct nfs_pgio_header *hdr = data;
1147
1148 rpc_count_iostats_metrics(task,
1149 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1150}
1151
1152static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1153{
1154 rpc_call_start(task);
1155}
1156
1157static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1158{
1159 struct nfs_commit_data *wdata = data;
1160
1161 ff_layout_setup_sequence(wdata->ds_clp,
1162 &wdata->args.seq_args,
1163 &wdata->res.seq_res,
1164 task);
1165}
1166
1167static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1168{
1169 struct nfs_commit_data *cdata = data;
1170
1171 rpc_count_iostats_metrics(task,
1172 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1173}
1174
1175static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1176 .rpc_call_prepare = ff_layout_read_prepare_v3,
1177 .rpc_call_done = ff_layout_read_call_done,
1178 .rpc_count_stats = ff_layout_read_count_stats,
1179 .rpc_release = pnfs_generic_rw_release,
1180};
1181
1182static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1183 .rpc_call_prepare = ff_layout_read_prepare_v4,
1184 .rpc_call_done = ff_layout_read_call_done,
1185 .rpc_count_stats = ff_layout_read_count_stats,
1186 .rpc_release = pnfs_generic_rw_release,
1187};
1188
1189static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1190 .rpc_call_prepare = ff_layout_write_prepare_v3,
1191 .rpc_call_done = ff_layout_write_call_done,
1192 .rpc_count_stats = ff_layout_write_count_stats,
1193 .rpc_release = pnfs_generic_rw_release,
1194};
1195
1196static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1197 .rpc_call_prepare = ff_layout_write_prepare_v4,
1198 .rpc_call_done = ff_layout_write_call_done,
1199 .rpc_count_stats = ff_layout_write_count_stats,
1200 .rpc_release = pnfs_generic_rw_release,
1201};
1202
1203static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1204 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1205 .rpc_call_done = pnfs_generic_write_commit_done,
1206 .rpc_count_stats = ff_layout_commit_count_stats,
1207 .rpc_release = pnfs_generic_commit_release,
1208};
1209
1210static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1211 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1212 .rpc_call_done = pnfs_generic_write_commit_done,
1213 .rpc_count_stats = ff_layout_commit_count_stats,
1214 .rpc_release = pnfs_generic_commit_release,
1215};
1216
1217static enum pnfs_try_status
1218ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1219{
1220 struct pnfs_layout_segment *lseg = hdr->lseg;
1221 struct nfs4_pnfs_ds *ds;
1222 struct rpc_clnt *ds_clnt;
1223 struct rpc_cred *ds_cred;
1224 loff_t offset = hdr->args.offset;
1225 u32 idx = hdr->pgio_mirror_idx;
1226 int vers;
1227 struct nfs_fh *fh;
1228
1229 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1230 __func__, hdr->inode->i_ino,
1231 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1232
1233 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1234 if (!ds)
1235 goto out_failed;
1236
1237 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1238 hdr->inode);
1239 if (IS_ERR(ds_clnt))
1240 goto out_failed;
1241
1242 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1243 if (IS_ERR(ds_cred))
1244 goto out_failed;
1245
1246 vers = nfs4_ff_layout_ds_version(lseg, idx);
1247
1248 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1249 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1250
1251 atomic_inc(&ds->ds_clp->cl_count);
1252 hdr->ds_clp = ds->ds_clp;
1253 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1254 if (fh)
1255 hdr->args.fh = fh;
1256
1257 /*
1258 * Note that if we ever decide to split across DSes,
1259 * then we may need to handle dense-like offsets.
1260 */
1261 hdr->args.offset = offset;
1262 hdr->mds_offset = offset;
1263
1264 /* Perform an asynchronous read to ds */
1265 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1266 vers == 3 ? &ff_layout_read_call_ops_v3 :
1267 &ff_layout_read_call_ops_v4,
1268 0, RPC_TASK_SOFTCONN);
1269
1270 return PNFS_ATTEMPTED;
1271
1272out_failed:
1273 if (ff_layout_has_available_ds(lseg))
1274 return PNFS_TRY_AGAIN;
1275 return PNFS_NOT_ATTEMPTED;
1276}
1277
1278/* Perform async writes. */
1279static enum pnfs_try_status
1280ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1281{
1282 struct pnfs_layout_segment *lseg = hdr->lseg;
1283 struct nfs4_pnfs_ds *ds;
1284 struct rpc_clnt *ds_clnt;
1285 struct rpc_cred *ds_cred;
1286 loff_t offset = hdr->args.offset;
1287 int vers;
1288 struct nfs_fh *fh;
1289 int idx = hdr->pgio_mirror_idx;
1290
1291 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1292 if (!ds)
1293 return PNFS_NOT_ATTEMPTED;
1294
1295 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1296 hdr->inode);
1297 if (IS_ERR(ds_clnt))
1298 return PNFS_NOT_ATTEMPTED;
1299
1300 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1301 if (IS_ERR(ds_cred))
1302 return PNFS_NOT_ATTEMPTED;
1303
1304 vers = nfs4_ff_layout_ds_version(lseg, idx);
1305
1306 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1307 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1308 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1309 vers);
1310
1311 hdr->pgio_done_cb = ff_layout_write_done_cb;
1312 atomic_inc(&ds->ds_clp->cl_count);
1313 hdr->ds_clp = ds->ds_clp;
1314 hdr->ds_commit_idx = idx;
1315 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1316 if (fh)
1317 hdr->args.fh = fh;
1318
1319 /*
1320 * Note that if we ever decide to split across DSes,
1321 * then we may need to handle dense-like offsets.
1322 */
1323 hdr->args.offset = offset;
1324
1325 /* Perform an asynchronous write */
1326 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1327 vers == 3 ? &ff_layout_write_call_ops_v3 :
1328 &ff_layout_write_call_ops_v4,
1329 sync, RPC_TASK_SOFTCONN);
1330 return PNFS_ATTEMPTED;
1331}
1332
Tom Haynesd67ae822014-12-11 17:02:04 -05001333static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1334{
1335 return i;
1336}
1337
1338static struct nfs_fh *
1339select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1340{
1341 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1342
1343 /* FIXME: Assume that there is only one NFS version available
1344 * for the DS.
1345 */
1346 return &flseg->mirror_array[i]->fh_versions[0];
1347}
1348
1349static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1350{
1351 struct pnfs_layout_segment *lseg = data->lseg;
1352 struct nfs4_pnfs_ds *ds;
1353 struct rpc_clnt *ds_clnt;
1354 struct rpc_cred *ds_cred;
1355 u32 idx;
1356 int vers;
1357 struct nfs_fh *fh;
1358
1359 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1360 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1361 if (!ds)
1362 goto out_err;
1363
1364 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1365 data->inode);
1366 if (IS_ERR(ds_clnt))
1367 goto out_err;
1368
1369 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1370 if (IS_ERR(ds_cred))
1371 goto out_err;
1372
1373 vers = nfs4_ff_layout_ds_version(lseg, idx);
1374
1375 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1376 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1377 vers);
1378 data->commit_done_cb = ff_layout_commit_done_cb;
1379 data->cred = ds_cred;
1380 atomic_inc(&ds->ds_clp->cl_count);
1381 data->ds_clp = ds->ds_clp;
1382 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1383 if (fh)
1384 data->args.fh = fh;
1385 return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1386 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1387 &ff_layout_commit_call_ops_v4,
1388 how, RPC_TASK_SOFTCONN);
1389out_err:
1390 pnfs_generic_prepare_to_resend_writes(data);
1391 pnfs_generic_commit_release(data);
1392 return -EAGAIN;
1393}
1394
1395static int
1396ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1397 int how, struct nfs_commit_info *cinfo)
1398{
1399 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1400 ff_layout_initiate_commit);
1401}
1402
1403static struct pnfs_ds_commit_info *
1404ff_layout_get_ds_info(struct inode *inode)
1405{
1406 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1407
1408 if (layout == NULL)
1409 return NULL;
1410
1411 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1412}
1413
1414static void
Trond Myklebustfc877012015-03-09 17:25:14 -04001415ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
Tom Haynesd67ae822014-12-11 17:02:04 -05001416{
1417 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1418 id_node));
1419}
1420
1421static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1422 struct xdr_stream *xdr,
1423 const struct nfs4_layoutreturn_args *args)
1424{
1425 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1426 __be32 *start;
1427 int count = 0, ret = 0;
1428
1429 start = xdr_reserve_space(xdr, 4);
1430 if (unlikely(!start))
1431 return -E2BIG;
1432
1433 /* This assume we always return _ALL_ layouts */
1434 spin_lock(&hdr->plh_inode->i_lock);
1435 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1436 spin_unlock(&hdr->plh_inode->i_lock);
1437
1438 *start = cpu_to_be32(count);
1439
1440 return ret;
1441}
1442
1443/* report nothing for now */
1444static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1445 struct xdr_stream *xdr,
1446 const struct nfs4_layoutreturn_args *args)
1447{
1448 __be32 *p;
1449
1450 p = xdr_reserve_space(xdr, 4);
1451 if (likely(p))
1452 *p = cpu_to_be32(0);
1453}
1454
1455static struct nfs4_deviceid_node *
1456ff_layout_alloc_deviceid_node(struct nfs_server *server,
1457 struct pnfs_device *pdev, gfp_t gfp_flags)
1458{
1459 struct nfs4_ff_layout_ds *dsaddr;
1460
1461 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1462 if (!dsaddr)
1463 return NULL;
1464 return &dsaddr->id_node;
1465}
1466
1467static void
1468ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1469 struct xdr_stream *xdr,
1470 const struct nfs4_layoutreturn_args *args)
1471{
1472 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1473 __be32 *start;
1474
1475 dprintk("%s: Begin\n", __func__);
1476 start = xdr_reserve_space(xdr, 4);
1477 BUG_ON(!start);
1478
1479 if (ff_layout_encode_ioerr(flo, xdr, args))
1480 goto out;
1481
1482 ff_layout_encode_iostats(flo, xdr, args);
1483out:
1484 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1485 dprintk("%s: Return\n", __func__);
1486}
1487
1488static struct pnfs_layoutdriver_type flexfilelayout_type = {
1489 .id = LAYOUT_FLEX_FILES,
1490 .name = "LAYOUT_FLEX_FILES",
1491 .owner = THIS_MODULE,
1492 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
1493 .free_layout_hdr = ff_layout_free_layout_hdr,
1494 .alloc_lseg = ff_layout_alloc_lseg,
1495 .free_lseg = ff_layout_free_lseg,
1496 .pg_read_ops = &ff_layout_pg_read_ops,
1497 .pg_write_ops = &ff_layout_pg_write_ops,
1498 .get_ds_info = ff_layout_get_ds_info,
Trond Myklebustfc877012015-03-09 17:25:14 -04001499 .free_deviceid_node = ff_layout_free_deviceid_node,
Tom Haynes338d00c2015-02-17 14:58:15 -08001500 .mark_request_commit = pnfs_layout_mark_request_commit,
Tom Haynesd67ae822014-12-11 17:02:04 -05001501 .clear_request_commit = pnfs_generic_clear_request_commit,
1502 .scan_commit_lists = pnfs_generic_scan_commit_lists,
1503 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
1504 .commit_pagelist = ff_layout_commit_pagelist,
1505 .read_pagelist = ff_layout_read_pagelist,
1506 .write_pagelist = ff_layout_write_pagelist,
1507 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
1508 .encode_layoutreturn = ff_layout_encode_layoutreturn,
Trond Myklebust5bb89b42015-03-25 14:14:42 -04001509 .sync = pnfs_nfs_generic_sync,
Tom Haynesd67ae822014-12-11 17:02:04 -05001510};
1511
1512static int __init nfs4flexfilelayout_init(void)
1513{
1514 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
1515 __func__);
1516 return pnfs_register_layoutdriver(&flexfilelayout_type);
1517}
1518
1519static void __exit nfs4flexfilelayout_exit(void)
1520{
1521 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
1522 __func__);
1523 pnfs_unregister_layoutdriver(&flexfilelayout_type);
1524}
1525
1526MODULE_ALIAS("nfs-layouttype4-4");
1527
1528MODULE_LICENSE("GPL");
1529MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
1530
1531module_init(nfs4flexfilelayout_init);
1532module_exit(nfs4flexfilelayout_exit);