blob: 8ac31fbf040a6b1fe9666145693c3cbaed25dc02 [file] [log] [blame]
Tom Haynesd67ae822014-12-11 17:02:04 -05001/*
2 * Module for pnfs flexfile layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9#include <linux/nfs_fs.h>
10#include <linux/nfs_page.h>
11#include <linux/module.h>
12
13#include <linux/sunrpc/metrics.h>
Tom Haynesd67ae822014-12-11 17:02:04 -050014
15#include "flexfilelayout.h"
16#include "../nfs4session.h"
Anna Schumaker40c64c22015-04-15 13:00:05 -040017#include "../nfs4idmap.h"
Tom Haynesd67ae822014-12-11 17:02:04 -050018#include "../internal.h"
19#include "../delegation.h"
20#include "../nfs4trace.h"
21#include "../iostat.h"
22#include "../nfs.h"
23
24#define NFSDBG_FACILITY NFSDBG_PNFS_LD
25
26#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
27
28static struct pnfs_layout_hdr *
29ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
30{
31 struct nfs4_flexfile_layout *ffl;
32
33 ffl = kzalloc(sizeof(*ffl), gfp_flags);
34 if (ffl) {
35 INIT_LIST_HEAD(&ffl->error_list);
36 return &ffl->generic_hdr;
37 } else
38 return NULL;
39}
40
41static void
42ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
43{
44 struct nfs4_ff_layout_ds_err *err, *n;
45
46 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
47 list) {
48 list_del(&err->list);
49 kfree(err);
50 }
51 kfree(FF_LAYOUT_FROM_HDR(lo));
52}
53
54static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
55{
56 __be32 *p;
57
58 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
59 if (unlikely(p == NULL))
60 return -ENOBUFS;
61 memcpy(stateid, p, NFS4_STATEID_SIZE);
62 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
63 p[0], p[1], p[2], p[3]);
64 return 0;
65}
66
67static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
68{
69 __be32 *p;
70
71 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
72 if (unlikely(!p))
73 return -ENOBUFS;
74 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
75 nfs4_print_deviceid(devid);
76 return 0;
77}
78
79static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
80{
81 __be32 *p;
82
83 p = xdr_inline_decode(xdr, 4);
84 if (unlikely(!p))
85 return -ENOBUFS;
86 fh->size = be32_to_cpup(p++);
87 if (fh->size > sizeof(struct nfs_fh)) {
88 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
89 fh->size);
90 return -EOVERFLOW;
91 }
92 /* fh.data */
93 p = xdr_inline_decode(xdr, fh->size);
94 if (unlikely(!p))
95 return -ENOBUFS;
96 memcpy(&fh->data, p, fh->size);
97 dprintk("%s: fh len %d\n", __func__, fh->size);
98
99 return 0;
100}
101
102/*
103 * Currently only stringified uids and gids are accepted.
104 * I.e., kerberos is not supported to the DSes, so no pricipals.
105 *
106 * That means that one common function will suffice, but when
107 * principals are added, this should be split to accomodate
108 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
109 */
110static int
111decode_name(struct xdr_stream *xdr, u32 *id)
112{
113 __be32 *p;
114 int len;
115
116 /* opaque_length(4)*/
117 p = xdr_inline_decode(xdr, 4);
118 if (unlikely(!p))
119 return -ENOBUFS;
120 len = be32_to_cpup(p++);
121 if (len < 0)
122 return -EINVAL;
123
124 dprintk("%s: len %u\n", __func__, len);
125
126 /* opaque body */
127 p = xdr_inline_decode(xdr, len);
128 if (unlikely(!p))
129 return -ENOBUFS;
130
131 if (!nfs_map_string_to_numeric((char *)p, len, id))
132 return -EINVAL;
133
134 return 0;
135}
136
137static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
138{
139 int i;
140
141 if (fls->mirror_array) {
142 for (i = 0; i < fls->mirror_array_cnt; i++) {
143 /* normally mirror_ds is freed in
144 * .free_deviceid_node but we still do it here
145 * for .alloc_lseg error path */
146 if (fls->mirror_array[i]) {
147 kfree(fls->mirror_array[i]->fh_versions);
148 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
149 kfree(fls->mirror_array[i]);
150 }
151 }
152 kfree(fls->mirror_array);
153 fls->mirror_array = NULL;
154 }
155}
156
157static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
158{
159 int ret = 0;
160
161 dprintk("--> %s\n", __func__);
162
163 /* FIXME: remove this check when layout segment support is added */
164 if (lgr->range.offset != 0 ||
165 lgr->range.length != NFS4_MAX_UINT64) {
166 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
167 __func__);
168 ret = -EINVAL;
169 }
170
171 dprintk("--> %s returns %d\n", __func__, ret);
172 return ret;
173}
174
175static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
176{
177 if (fls) {
178 ff_layout_free_mirror_array(fls);
179 kfree(fls);
180 }
181}
182
183static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
184{
Tom Haynesd67ae822014-12-11 17:02:04 -0500185 int i, j;
186
187 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
188 for (j = i + 1; j < fls->mirror_array_cnt; j++)
189 if (fls->mirror_array[i]->efficiency <
Fabian Frederick455b6ee2015-06-12 18:58:50 +0200190 fls->mirror_array[j]->efficiency)
191 swap(fls->mirror_array[i],
192 fls->mirror_array[j]);
Tom Haynesd67ae822014-12-11 17:02:04 -0500193 }
194}
195
196static struct pnfs_layout_segment *
197ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
198 struct nfs4_layoutget_res *lgr,
199 gfp_t gfp_flags)
200{
201 struct pnfs_layout_segment *ret;
202 struct nfs4_ff_layout_segment *fls = NULL;
203 struct xdr_stream stream;
204 struct xdr_buf buf;
205 struct page *scratch;
206 u64 stripe_unit;
207 u32 mirror_array_cnt;
208 __be32 *p;
209 int i, rc;
210
211 dprintk("--> %s\n", __func__);
212 scratch = alloc_page(gfp_flags);
213 if (!scratch)
214 return ERR_PTR(-ENOMEM);
215
216 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
217 lgr->layoutp->len);
218 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
219
220 /* stripe unit and mirror_array_cnt */
221 rc = -EIO;
222 p = xdr_inline_decode(&stream, 8 + 4);
223 if (!p)
224 goto out_err_free;
225
226 p = xdr_decode_hyper(p, &stripe_unit);
227 mirror_array_cnt = be32_to_cpup(p++);
228 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
229 stripe_unit, mirror_array_cnt);
230
231 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
232 mirror_array_cnt == 0)
233 goto out_err_free;
234
235 rc = -ENOMEM;
236 fls = kzalloc(sizeof(*fls), gfp_flags);
237 if (!fls)
238 goto out_err_free;
239
240 fls->mirror_array_cnt = mirror_array_cnt;
241 fls->stripe_unit = stripe_unit;
242 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
243 sizeof(fls->mirror_array[0]), gfp_flags);
244 if (fls->mirror_array == NULL)
245 goto out_err_free;
246
247 for (i = 0; i < fls->mirror_array_cnt; i++) {
248 struct nfs4_deviceid devid;
249 struct nfs4_deviceid_node *idnode;
250 u32 ds_count;
251 u32 fh_count;
252 int j;
253
254 rc = -EIO;
255 p = xdr_inline_decode(&stream, 4);
256 if (!p)
257 goto out_err_free;
258 ds_count = be32_to_cpup(p);
259
260 /* FIXME: allow for striping? */
261 if (ds_count != 1)
262 goto out_err_free;
263
264 fls->mirror_array[i] =
265 kzalloc(sizeof(struct nfs4_ff_layout_mirror),
266 gfp_flags);
267 if (fls->mirror_array[i] == NULL) {
268 rc = -ENOMEM;
269 goto out_err_free;
270 }
271
272 spin_lock_init(&fls->mirror_array[i]->lock);
273 fls->mirror_array[i]->ds_count = ds_count;
274
275 /* deviceid */
276 rc = decode_deviceid(&stream, &devid);
277 if (rc)
278 goto out_err_free;
279
280 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
281 &devid, lh->plh_lc_cred,
282 gfp_flags);
283 /*
284 * upon success, mirror_ds is allocated by previous
285 * getdeviceinfo, or newly by .alloc_deviceid_node
286 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
287 */
288 if (idnode)
289 fls->mirror_array[i]->mirror_ds =
290 FF_LAYOUT_MIRROR_DS(idnode);
291 else
292 goto out_err_free;
293
294 /* efficiency */
295 rc = -EIO;
296 p = xdr_inline_decode(&stream, 4);
297 if (!p)
298 goto out_err_free;
299 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
300
301 /* stateid */
302 rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
303 if (rc)
304 goto out_err_free;
305
306 /* fh */
307 p = xdr_inline_decode(&stream, 4);
308 if (!p)
309 goto out_err_free;
310 fh_count = be32_to_cpup(p);
311
312 fls->mirror_array[i]->fh_versions =
313 kzalloc(fh_count * sizeof(struct nfs_fh),
314 gfp_flags);
315 if (fls->mirror_array[i]->fh_versions == NULL) {
316 rc = -ENOMEM;
317 goto out_err_free;
318 }
319
320 for (j = 0; j < fh_count; j++) {
321 rc = decode_nfs_fh(&stream,
322 &fls->mirror_array[i]->fh_versions[j]);
323 if (rc)
324 goto out_err_free;
325 }
326
327 fls->mirror_array[i]->fh_versions_cnt = fh_count;
328
329 /* user */
330 rc = decode_name(&stream, &fls->mirror_array[i]->uid);
331 if (rc)
332 goto out_err_free;
333
334 /* group */
335 rc = decode_name(&stream, &fls->mirror_array[i]->gid);
336 if (rc)
337 goto out_err_free;
338
339 dprintk("%s: uid %d gid %d\n", __func__,
340 fls->mirror_array[i]->uid,
341 fls->mirror_array[i]->gid);
342 }
343
344 ff_layout_sort_mirrors(fls);
345 rc = ff_layout_check_layout(lgr);
346 if (rc)
347 goto out_err_free;
348
349 ret = &fls->generic_hdr;
350 dprintk("<-- %s (success)\n", __func__);
351out_free_page:
352 __free_page(scratch);
353 return ret;
354out_err_free:
355 _ff_layout_free_lseg(fls);
356 ret = ERR_PTR(rc);
357 dprintk("<-- %s (%d)\n", __func__, rc);
358 goto out_free_page;
359}
360
361static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
362{
363 struct pnfs_layout_segment *lseg;
364
365 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
366 if (lseg->pls_range.iomode == IOMODE_RW)
367 return true;
368
369 return false;
370}
371
372static void
373ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
374{
375 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
376 int i;
377
378 dprintk("--> %s\n", __func__);
379
380 for (i = 0; i < fls->mirror_array_cnt; i++) {
381 if (fls->mirror_array[i]) {
382 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
383 fls->mirror_array[i]->mirror_ds = NULL;
384 if (fls->mirror_array[i]->cred) {
385 put_rpccred(fls->mirror_array[i]->cred);
386 fls->mirror_array[i]->cred = NULL;
387 }
388 }
389 }
390
391 if (lseg->pls_range.iomode == IOMODE_RW) {
392 struct nfs4_flexfile_layout *ffl;
393 struct inode *inode;
394
395 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
396 inode = ffl->generic_hdr.plh_inode;
397 spin_lock(&inode->i_lock);
398 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
399 ffl->commit_info.nbuckets = 0;
400 kfree(ffl->commit_info.buckets);
401 ffl->commit_info.buckets = NULL;
402 }
403 spin_unlock(&inode->i_lock);
404 }
405 _ff_layout_free_lseg(fls);
406}
407
408/* Return 1 until we have multiple lsegs support */
409static int
410ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
411{
412 return 1;
413}
414
Trond Myklebustabcb7bf2015-06-23 19:51:59 +0800415static void
416nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer)
417{
418 ktime_t old, new;
419
420 /*
421 * Note: careful here!
422 * If the counter is zero, then we must not increment it until after
423 * we've set the start_time.
424 * If we were instead to use atomic_inc_return(), then another
425 * request might come in, bump, and then call end_busy_timer()
426 * before we've set the timer->start_time.
427 */
428 old = timer->start_time;
429 if (atomic_inc_not_zero(&timer->n_ops) == 0) {
430 new = ktime_get();
431 cmpxchg(&timer->start_time.tv64, old.tv64, new.tv64);
432 atomic_inc(&timer->n_ops);
433 }
434}
435
436static ktime_t
437nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer)
438{
439 ktime_t start, now;
440
441 now = ktime_get();
442 start.tv64 = xchg(&timer->start_time.tv64, now.tv64);
443 atomic_dec(&timer->n_ops);
444 return ktime_sub(now, start);
445}
446
447static ktime_t
448nfs4_ff_layout_calc_completion_time(struct rpc_task *task)
449{
450 return ktime_sub(ktime_get(), task->tk_start);
451}
452
453static void
454nfs4_ff_layoutstat_start_io(struct nfs4_ff_layoutstat *layoutstat)
455{
456 nfs4_ff_start_busy_timer(&layoutstat->busy_timer);
457}
458
459static void
460nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
461 __u64 requested)
462{
463 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
464
465 iostat->ops_requested++;
466 iostat->bytes_requested += requested;
467}
468
469static void
470nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
471 __u64 requested,
472 __u64 completed,
473 ktime_t time_completed)
474{
475 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
476 ktime_t timer;
477
478 iostat->ops_completed++;
479 iostat->bytes_completed += completed;
480 iostat->bytes_not_delivered += requested - completed;
481
482 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer);
483 iostat->total_busy_time =
484 ktime_add(iostat->total_busy_time, timer);
485 iostat->aggregate_completion_time =
486 ktime_add(iostat->aggregate_completion_time, time_completed);
487}
488
489static void
490nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror *mirror,
491 __u64 requested)
492{
493 spin_lock(&mirror->lock);
494 nfs4_ff_layoutstat_start_io(&mirror->read_stat);
495 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
496 spin_unlock(&mirror->lock);
497}
498
499static void
500nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
501 struct nfs4_ff_layout_mirror *mirror,
502 __u64 requested,
503 __u64 completed)
504{
505 spin_lock(&mirror->lock);
506 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
507 requested, completed,
508 nfs4_ff_layout_calc_completion_time(task));
509 spin_unlock(&mirror->lock);
510}
511
512static void
513nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror *mirror,
514 __u64 requested)
515{
516 spin_lock(&mirror->lock);
517 nfs4_ff_layoutstat_start_io(&mirror->write_stat);
518 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
519 spin_unlock(&mirror->lock);
520}
521
522static void
523nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
524 struct nfs4_ff_layout_mirror *mirror,
525 __u64 requested,
526 __u64 completed,
527 enum nfs3_stable_how committed)
528{
529 if (committed == NFS_UNSTABLE)
530 requested = completed = 0;
531
532 spin_lock(&mirror->lock);
533 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
534 requested, completed,
535 nfs4_ff_layout_calc_completion_time(task));
536 spin_unlock(&mirror->lock);
537}
538
Tom Haynesd67ae822014-12-11 17:02:04 -0500539static int
540ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
541 struct nfs_commit_info *cinfo,
542 gfp_t gfp_flags)
543{
544 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
545 struct pnfs_commit_bucket *buckets;
546 int size;
547
548 if (cinfo->ds->nbuckets != 0) {
549 /* This assumes there is only one RW lseg per file.
550 * To support multiple lseg per file, we need to
551 * change struct pnfs_commit_bucket to allow dynamic
552 * increasing nbuckets.
553 */
554 return 0;
555 }
556
557 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
558
559 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
560 gfp_flags);
561 if (!buckets)
562 return -ENOMEM;
563 else {
564 int i;
565
566 spin_lock(cinfo->lock);
567 if (cinfo->ds->nbuckets != 0)
568 kfree(buckets);
569 else {
570 cinfo->ds->buckets = buckets;
571 cinfo->ds->nbuckets = size;
572 for (i = 0; i < size; i++) {
573 INIT_LIST_HEAD(&buckets[i].written);
574 INIT_LIST_HEAD(&buckets[i].committing);
575 /* mark direct verifier as unset */
576 buckets[i].direct_verf.committed =
577 NFS_INVALID_STABLE_HOW;
578 }
579 }
580 spin_unlock(cinfo->lock);
581 return 0;
582 }
583}
584
585static struct nfs4_pnfs_ds *
586ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
587 int *best_idx)
588{
589 struct nfs4_ff_layout_segment *fls;
590 struct nfs4_pnfs_ds *ds;
591 int idx;
592
593 fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
594 /* mirrors are sorted by efficiency */
595 for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
596 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
597 if (ds) {
598 *best_idx = idx;
599 return ds;
600 }
601 }
602
603 return NULL;
604}
605
606static void
607ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
608 struct nfs_page *req)
609{
610 struct nfs_pgio_mirror *pgm;
611 struct nfs4_ff_layout_mirror *mirror;
612 struct nfs4_pnfs_ds *ds;
613 int ds_idx;
614
615 /* Use full layout for now */
616 if (!pgio->pg_lseg)
617 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
618 req->wb_context,
619 0,
620 NFS4_MAX_UINT64,
621 IOMODE_READ,
622 GFP_KERNEL);
623 /* If no lseg, fall back to read through mds */
624 if (pgio->pg_lseg == NULL)
625 goto out_mds;
626
627 ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
628 if (!ds)
629 goto out_mds;
630 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
631
632 pgio->pg_mirror_idx = ds_idx;
633
634 /* read always uses only one mirror - idx 0 for pgio layer */
635 pgm = &pgio->pg_mirrors[0];
636 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
637
638 return;
639out_mds:
640 pnfs_put_lseg(pgio->pg_lseg);
641 pgio->pg_lseg = NULL;
642 nfs_pageio_reset_read_mds(pgio);
643}
644
645static void
646ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
647 struct nfs_page *req)
648{
649 struct nfs4_ff_layout_mirror *mirror;
650 struct nfs_pgio_mirror *pgm;
651 struct nfs_commit_info cinfo;
652 struct nfs4_pnfs_ds *ds;
653 int i;
654 int status;
655
656 if (!pgio->pg_lseg)
657 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
658 req->wb_context,
659 0,
660 NFS4_MAX_UINT64,
661 IOMODE_RW,
662 GFP_NOFS);
663 /* If no lseg, fall back to write through mds */
664 if (pgio->pg_lseg == NULL)
665 goto out_mds;
666
667 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
668 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
669 if (status < 0)
670 goto out_mds;
671
672 /* Use a direct mapping of ds_idx to pgio mirror_idx */
673 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
674 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
675 goto out_mds;
676
677 for (i = 0; i < pgio->pg_mirror_count; i++) {
678 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
679 if (!ds)
680 goto out_mds;
681 pgm = &pgio->pg_mirrors[i];
682 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
683 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
684 }
685
686 return;
687
688out_mds:
689 pnfs_put_lseg(pgio->pg_lseg);
690 pgio->pg_lseg = NULL;
691 nfs_pageio_reset_write_mds(pgio);
692}
693
694static unsigned int
695ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
696 struct nfs_page *req)
697{
698 if (!pgio->pg_lseg)
699 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
700 req->wb_context,
701 0,
702 NFS4_MAX_UINT64,
703 IOMODE_RW,
704 GFP_NOFS);
705 if (pgio->pg_lseg)
706 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
707
708 /* no lseg means that pnfs is not in use, so no mirroring here */
709 pnfs_put_lseg(pgio->pg_lseg);
710 pgio->pg_lseg = NULL;
711 nfs_pageio_reset_write_mds(pgio);
712 return 1;
713}
714
715static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
716 .pg_init = ff_layout_pg_init_read,
717 .pg_test = pnfs_generic_pg_test,
718 .pg_doio = pnfs_generic_pg_readpages,
719 .pg_cleanup = pnfs_generic_pg_cleanup,
720};
721
722static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
723 .pg_init = ff_layout_pg_init_write,
724 .pg_test = pnfs_generic_pg_test,
725 .pg_doio = pnfs_generic_pg_writepages,
726 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
727 .pg_cleanup = pnfs_generic_pg_cleanup,
728};
729
730static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
731{
732 struct rpc_task *task = &hdr->task;
733
734 pnfs_layoutcommit_inode(hdr->inode, false);
735
736 if (retry_pnfs) {
737 dprintk("%s Reset task %5u for i/o through pNFS "
738 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
739 hdr->task.tk_pid,
740 hdr->inode->i_sb->s_id,
741 (unsigned long long)NFS_FILEID(hdr->inode),
742 hdr->args.count,
743 (unsigned long long)hdr->args.offset);
744
745 if (!hdr->dreq) {
746 struct nfs_open_context *ctx;
747
748 ctx = nfs_list_entry(hdr->pages.next)->wb_context;
749 set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
750 hdr->completion_ops->error_cleanup(&hdr->pages);
751 } else {
752 nfs_direct_set_resched_writes(hdr->dreq);
753 /* fake unstable write to let common nfs resend pages */
754 hdr->verf.committed = NFS_UNSTABLE;
755 hdr->good_bytes = 0;
756 }
757 return;
758 }
759
760 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
761 dprintk("%s Reset task %5u for i/o through MDS "
762 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
763 hdr->task.tk_pid,
764 hdr->inode->i_sb->s_id,
765 (unsigned long long)NFS_FILEID(hdr->inode),
766 hdr->args.count,
767 (unsigned long long)hdr->args.offset);
768
769 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
770 }
771}
772
773static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
774{
775 struct rpc_task *task = &hdr->task;
776
777 pnfs_layoutcommit_inode(hdr->inode, false);
778
779 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
780 dprintk("%s Reset task %5u for i/o through MDS "
781 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
782 hdr->task.tk_pid,
783 hdr->inode->i_sb->s_id,
784 (unsigned long long)NFS_FILEID(hdr->inode),
785 hdr->args.count,
786 (unsigned long long)hdr->args.offset);
787
788 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
789 }
790}
791
792static int ff_layout_async_handle_error_v4(struct rpc_task *task,
793 struct nfs4_state *state,
794 struct nfs_client *clp,
795 struct pnfs_layout_segment *lseg,
796 int idx)
797{
798 struct pnfs_layout_hdr *lo = lseg->pls_layout;
799 struct inode *inode = lo->plh_inode;
800 struct nfs_server *mds_server = NFS_SERVER(inode);
801
802 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
803 struct nfs_client *mds_client = mds_server->nfs_client;
804 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
805
806 if (task->tk_status >= 0)
807 return 0;
808
809 switch (task->tk_status) {
810 /* MDS state errors */
811 case -NFS4ERR_DELEG_REVOKED:
812 case -NFS4ERR_ADMIN_REVOKED:
813 case -NFS4ERR_BAD_STATEID:
814 if (state == NULL)
815 break;
816 nfs_remove_bad_delegation(state->inode);
817 case -NFS4ERR_OPENMODE:
818 if (state == NULL)
819 break;
820 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
821 goto out_bad_stateid;
822 goto wait_on_recovery;
823 case -NFS4ERR_EXPIRED:
824 if (state != NULL) {
825 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
826 goto out_bad_stateid;
827 }
828 nfs4_schedule_lease_recovery(mds_client);
829 goto wait_on_recovery;
830 /* DS session errors */
831 case -NFS4ERR_BADSESSION:
832 case -NFS4ERR_BADSLOT:
833 case -NFS4ERR_BAD_HIGH_SLOT:
834 case -NFS4ERR_DEADSESSION:
835 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
836 case -NFS4ERR_SEQ_FALSE_RETRY:
837 case -NFS4ERR_SEQ_MISORDERED:
838 dprintk("%s ERROR %d, Reset session. Exchangeid "
839 "flags 0x%x\n", __func__, task->tk_status,
840 clp->cl_exchange_flags);
841 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
842 break;
843 case -NFS4ERR_DELAY:
844 case -NFS4ERR_GRACE:
845 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
846 break;
847 case -NFS4ERR_RETRY_UNCACHED_REP:
848 break;
849 /* Invalidate Layout errors */
850 case -NFS4ERR_PNFS_NO_LAYOUT:
851 case -ESTALE: /* mapped NFS4ERR_STALE */
852 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
853 case -EISDIR: /* mapped NFS4ERR_ISDIR */
854 case -NFS4ERR_FHEXPIRED:
855 case -NFS4ERR_WRONG_TYPE:
856 dprintk("%s Invalid layout error %d\n", __func__,
857 task->tk_status);
858 /*
859 * Destroy layout so new i/o will get a new layout.
860 * Layout will not be destroyed until all current lseg
861 * references are put. Mark layout as invalid to resend failed
862 * i/o and all i/o waiting on the slot table to the MDS until
863 * layout is destroyed and a new valid layout is obtained.
864 */
865 pnfs_destroy_layout(NFS_I(inode));
866 rpc_wake_up(&tbl->slot_tbl_waitq);
867 goto reset;
868 /* RPC connection errors */
869 case -ECONNREFUSED:
870 case -EHOSTDOWN:
871 case -EHOSTUNREACH:
872 case -ENETUNREACH:
873 case -EIO:
874 case -ETIMEDOUT:
875 case -EPIPE:
876 dprintk("%s DS connection error %d\n", __func__,
877 task->tk_status);
878 nfs4_mark_deviceid_unavailable(devid);
879 rpc_wake_up(&tbl->slot_tbl_waitq);
880 /* fall through */
881 default:
882 if (ff_layout_has_available_ds(lseg))
883 return -NFS4ERR_RESET_TO_PNFS;
884reset:
885 dprintk("%s Retry through MDS. Error %d\n", __func__,
886 task->tk_status);
887 return -NFS4ERR_RESET_TO_MDS;
888 }
889out:
890 task->tk_status = 0;
891 return -EAGAIN;
892out_bad_stateid:
893 task->tk_status = -EIO;
894 return 0;
895wait_on_recovery:
896 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
897 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
898 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
899 goto out;
900}
901
902/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
903static int ff_layout_async_handle_error_v3(struct rpc_task *task,
904 struct pnfs_layout_segment *lseg,
905 int idx)
906{
907 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
908
909 if (task->tk_status >= 0)
910 return 0;
911
912 if (task->tk_status != -EJUKEBOX) {
913 dprintk("%s DS connection error %d\n", __func__,
914 task->tk_status);
915 nfs4_mark_deviceid_unavailable(devid);
916 if (ff_layout_has_available_ds(lseg))
917 return -NFS4ERR_RESET_TO_PNFS;
918 else
919 return -NFS4ERR_RESET_TO_MDS;
920 }
921
922 if (task->tk_status == -EJUKEBOX)
923 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
924 task->tk_status = 0;
925 rpc_restart_call(task);
926 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
927 return -EAGAIN;
928}
929
930static int ff_layout_async_handle_error(struct rpc_task *task,
931 struct nfs4_state *state,
932 struct nfs_client *clp,
933 struct pnfs_layout_segment *lseg,
934 int idx)
935{
936 int vers = clp->cl_nfs_mod->rpc_vers->number;
937
938 switch (vers) {
939 case 3:
940 return ff_layout_async_handle_error_v3(task, lseg, idx);
941 case 4:
942 return ff_layout_async_handle_error_v4(task, state, clp,
943 lseg, idx);
944 default:
945 /* should never happen */
946 WARN_ON_ONCE(1);
947 return 0;
948 }
949}
950
951static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
952 int idx, u64 offset, u64 length,
953 u32 status, int opnum)
954{
955 struct nfs4_ff_layout_mirror *mirror;
956 int err;
957
958 mirror = FF_LAYOUT_COMP(lseg, idx);
959 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
960 mirror, offset, length, status, opnum,
961 GFP_NOIO);
962 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
963}
964
965/* NFS_PROTO call done callback routines */
966
967static int ff_layout_read_done_cb(struct rpc_task *task,
968 struct nfs_pgio_header *hdr)
969{
970 struct inode *inode;
971 int err;
972
973 trace_nfs4_pnfs_read(hdr, task->tk_status);
974 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
975 hdr->res.op_status = NFS4ERR_NXIO;
976 if (task->tk_status < 0 && hdr->res.op_status)
977 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
978 hdr->args.offset, hdr->args.count,
979 hdr->res.op_status, OP_READ);
980 err = ff_layout_async_handle_error(task, hdr->args.context->state,
981 hdr->ds_clp, hdr->lseg,
982 hdr->pgio_mirror_idx);
983
984 switch (err) {
985 case -NFS4ERR_RESET_TO_PNFS:
986 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
987 &hdr->lseg->pls_layout->plh_flags);
988 pnfs_read_resend_pnfs(hdr);
989 return task->tk_status;
990 case -NFS4ERR_RESET_TO_MDS:
991 inode = hdr->lseg->pls_layout->plh_inode;
992 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
993 ff_layout_reset_read(hdr);
994 return task->tk_status;
995 case -EAGAIN:
996 rpc_restart_call_prepare(task);
997 return -EAGAIN;
998 }
999
1000 return 0;
1001}
1002
1003/*
1004 * We reference the rpc_cred of the first WRITE that triggers the need for
1005 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1006 * rfc5661 is not clear about which credential should be used.
1007 *
1008 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1009 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1010 * we always send layoutcommit after DS writes.
1011 */
1012static void
1013ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
1014{
Trond Myklebust67af7612015-03-25 20:40:38 -04001015 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1016 hdr->mds_offset + hdr->res.count);
Tom Haynesd67ae822014-12-11 17:02:04 -05001017 dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
1018 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
1019}
1020
1021static bool
1022ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
1023{
1024 /* No mirroring for now */
1025 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1026
1027 return ff_layout_test_devid_unavailable(node);
1028}
1029
1030static int ff_layout_read_prepare_common(struct rpc_task *task,
1031 struct nfs_pgio_header *hdr)
1032{
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001033 nfs4_ff_layout_stat_io_start_read(
1034 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1035 hdr->args.count);
1036
Tom Haynesd67ae822014-12-11 17:02:04 -05001037 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1038 rpc_exit(task, -EIO);
1039 return -EIO;
1040 }
1041 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1042 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
1043 if (ff_layout_has_available_ds(hdr->lseg))
1044 pnfs_read_resend_pnfs(hdr);
1045 else
1046 ff_layout_reset_read(hdr);
1047 rpc_exit(task, 0);
1048 return -EAGAIN;
1049 }
1050 hdr->pgio_done_cb = ff_layout_read_done_cb;
1051
1052 return 0;
1053}
1054
1055/*
1056 * Call ops for the async read/write cases
1057 * In the case of dense layouts, the offset needs to be reset to its
1058 * original value.
1059 */
1060static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1061{
1062 struct nfs_pgio_header *hdr = data;
1063
1064 if (ff_layout_read_prepare_common(task, hdr))
1065 return;
1066
1067 rpc_call_start(task);
1068}
1069
1070static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
1071 struct nfs4_sequence_args *args,
1072 struct nfs4_sequence_res *res,
1073 struct rpc_task *task)
1074{
1075 if (ds_clp->cl_session)
1076 return nfs41_setup_sequence(ds_clp->cl_session,
1077 args,
1078 res,
1079 task);
1080 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
1081 args,
1082 res,
1083 task);
1084}
1085
1086static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1087{
1088 struct nfs_pgio_header *hdr = data;
1089
Tom Haynesd67ae822014-12-11 17:02:04 -05001090 if (ff_layout_setup_sequence(hdr->ds_clp,
1091 &hdr->args.seq_args,
1092 &hdr->res.seq_res,
1093 task))
1094 return;
1095
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001096 if (ff_layout_read_prepare_common(task, hdr))
1097 return;
1098
Tom Haynesd67ae822014-12-11 17:02:04 -05001099 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1100 hdr->args.lock_context, FMODE_READ) == -EIO)
1101 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1102}
1103
1104static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1105{
1106 struct nfs_pgio_header *hdr = data;
1107
1108 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1109
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001110 nfs4_ff_layout_stat_io_end_read(task,
1111 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1112 hdr->args.count, hdr->res.count);
1113
Tom Haynesd67ae822014-12-11 17:02:04 -05001114 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1115 task->tk_status == 0) {
1116 nfs4_sequence_done(task, &hdr->res.seq_res);
1117 return;
1118 }
1119
1120 /* Note this may cause RPC to be resent */
1121 hdr->mds_ops->rpc_call_done(task, hdr);
1122}
1123
1124static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1125{
1126 struct nfs_pgio_header *hdr = data;
1127
1128 rpc_count_iostats_metrics(task,
1129 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1130}
1131
1132static int ff_layout_write_done_cb(struct rpc_task *task,
1133 struct nfs_pgio_header *hdr)
1134{
1135 struct inode *inode;
1136 int err;
1137
1138 trace_nfs4_pnfs_write(hdr, task->tk_status);
1139 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
1140 hdr->res.op_status = NFS4ERR_NXIO;
1141 if (task->tk_status < 0 && hdr->res.op_status)
1142 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1143 hdr->args.offset, hdr->args.count,
1144 hdr->res.op_status, OP_WRITE);
1145 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1146 hdr->ds_clp, hdr->lseg,
1147 hdr->pgio_mirror_idx);
1148
1149 switch (err) {
1150 case -NFS4ERR_RESET_TO_PNFS:
1151 case -NFS4ERR_RESET_TO_MDS:
1152 inode = hdr->lseg->pls_layout->plh_inode;
1153 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1154 if (err == -NFS4ERR_RESET_TO_PNFS) {
1155 pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1156 ff_layout_reset_write(hdr, true);
1157 } else {
1158 pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1159 ff_layout_reset_write(hdr, false);
1160 }
1161 return task->tk_status;
1162 case -EAGAIN:
1163 rpc_restart_call_prepare(task);
1164 return -EAGAIN;
1165 }
1166
1167 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1168 hdr->res.verf->committed == NFS_DATA_SYNC)
1169 ff_layout_set_layoutcommit(hdr);
1170
1171 return 0;
1172}
1173
1174static int ff_layout_commit_done_cb(struct rpc_task *task,
1175 struct nfs_commit_data *data)
1176{
1177 struct inode *inode;
1178 int err;
1179
1180 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1181 if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
1182 data->res.op_status = NFS4ERR_NXIO;
1183 if (task->tk_status < 0 && data->res.op_status)
1184 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1185 data->args.offset, data->args.count,
1186 data->res.op_status, OP_COMMIT);
1187 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1188 data->lseg, data->ds_commit_index);
1189
1190 switch (err) {
1191 case -NFS4ERR_RESET_TO_PNFS:
1192 case -NFS4ERR_RESET_TO_MDS:
1193 inode = data->lseg->pls_layout->plh_inode;
1194 pnfs_error_mark_layout_for_return(inode, data->lseg);
1195 if (err == -NFS4ERR_RESET_TO_PNFS)
1196 pnfs_set_retry_layoutget(data->lseg->pls_layout);
1197 else
1198 pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1199 pnfs_generic_prepare_to_resend_writes(data);
1200 return -EAGAIN;
1201 case -EAGAIN:
1202 rpc_restart_call_prepare(task);
1203 return -EAGAIN;
1204 }
1205
1206 if (data->verf.committed == NFS_UNSTABLE)
Trond Myklebust67af7612015-03-25 20:40:38 -04001207 pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
Tom Haynesd67ae822014-12-11 17:02:04 -05001208
1209 return 0;
1210}
1211
1212static int ff_layout_write_prepare_common(struct rpc_task *task,
1213 struct nfs_pgio_header *hdr)
1214{
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001215 nfs4_ff_layout_stat_io_start_write(
1216 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1217 hdr->args.count);
1218
Tom Haynesd67ae822014-12-11 17:02:04 -05001219 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1220 rpc_exit(task, -EIO);
1221 return -EIO;
1222 }
1223
1224 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1225 bool retry_pnfs;
1226
1227 retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1228 dprintk("%s task %u reset io to %s\n", __func__,
1229 task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1230 ff_layout_reset_write(hdr, retry_pnfs);
1231 rpc_exit(task, 0);
1232 return -EAGAIN;
1233 }
1234
1235 return 0;
1236}
1237
1238static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1239{
1240 struct nfs_pgio_header *hdr = data;
1241
1242 if (ff_layout_write_prepare_common(task, hdr))
1243 return;
1244
1245 rpc_call_start(task);
1246}
1247
1248static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1249{
1250 struct nfs_pgio_header *hdr = data;
1251
Tom Haynesd67ae822014-12-11 17:02:04 -05001252 if (ff_layout_setup_sequence(hdr->ds_clp,
1253 &hdr->args.seq_args,
1254 &hdr->res.seq_res,
1255 task))
1256 return;
1257
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001258 if (ff_layout_write_prepare_common(task, hdr))
1259 return;
1260
Tom Haynesd67ae822014-12-11 17:02:04 -05001261 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1262 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1263 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1264}
1265
1266static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1267{
1268 struct nfs_pgio_header *hdr = data;
1269
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001270 nfs4_ff_layout_stat_io_end_write(task,
1271 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1272 hdr->args.count, hdr->res.count,
1273 hdr->res.verf->committed);
1274
Tom Haynesd67ae822014-12-11 17:02:04 -05001275 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1276 task->tk_status == 0) {
1277 nfs4_sequence_done(task, &hdr->res.seq_res);
1278 return;
1279 }
1280
1281 /* Note this may cause RPC to be resent */
1282 hdr->mds_ops->rpc_call_done(task, hdr);
1283}
1284
1285static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1286{
1287 struct nfs_pgio_header *hdr = data;
1288
1289 rpc_count_iostats_metrics(task,
1290 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1291}
1292
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001293static void ff_layout_commit_prepare_common(struct rpc_task *task,
1294 struct nfs_commit_data *cdata)
1295{
1296 nfs4_ff_layout_stat_io_start_write(
1297 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1298 0);
1299}
1300
Tom Haynesd67ae822014-12-11 17:02:04 -05001301static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1302{
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001303 ff_layout_commit_prepare_common(task, data);
Tom Haynesd67ae822014-12-11 17:02:04 -05001304 rpc_call_start(task);
1305}
1306
1307static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1308{
1309 struct nfs_commit_data *wdata = data;
1310
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001311 if (ff_layout_setup_sequence(wdata->ds_clp,
Tom Haynesd67ae822014-12-11 17:02:04 -05001312 &wdata->args.seq_args,
1313 &wdata->res.seq_res,
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001314 task))
1315 return;
1316 ff_layout_commit_prepare_common(task, data);
1317}
1318
1319static void ff_layout_commit_done(struct rpc_task *task, void *data)
1320{
1321 struct nfs_commit_data *cdata = data;
1322 struct nfs_page *req;
1323 __u64 count = 0;
1324
1325 if (task->tk_status == 0) {
1326 list_for_each_entry(req, &cdata->pages, wb_list)
1327 count += req->wb_bytes;
1328 }
1329
1330 nfs4_ff_layout_stat_io_end_write(task,
1331 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1332 count, count, NFS_FILE_SYNC);
1333
1334 pnfs_generic_write_commit_done(task, data);
Tom Haynesd67ae822014-12-11 17:02:04 -05001335}
1336
1337static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1338{
1339 struct nfs_commit_data *cdata = data;
1340
1341 rpc_count_iostats_metrics(task,
1342 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1343}
1344
1345static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1346 .rpc_call_prepare = ff_layout_read_prepare_v3,
1347 .rpc_call_done = ff_layout_read_call_done,
1348 .rpc_count_stats = ff_layout_read_count_stats,
1349 .rpc_release = pnfs_generic_rw_release,
1350};
1351
1352static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1353 .rpc_call_prepare = ff_layout_read_prepare_v4,
1354 .rpc_call_done = ff_layout_read_call_done,
1355 .rpc_count_stats = ff_layout_read_count_stats,
1356 .rpc_release = pnfs_generic_rw_release,
1357};
1358
1359static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1360 .rpc_call_prepare = ff_layout_write_prepare_v3,
1361 .rpc_call_done = ff_layout_write_call_done,
1362 .rpc_count_stats = ff_layout_write_count_stats,
1363 .rpc_release = pnfs_generic_rw_release,
1364};
1365
1366static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1367 .rpc_call_prepare = ff_layout_write_prepare_v4,
1368 .rpc_call_done = ff_layout_write_call_done,
1369 .rpc_count_stats = ff_layout_write_count_stats,
1370 .rpc_release = pnfs_generic_rw_release,
1371};
1372
1373static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1374 .rpc_call_prepare = ff_layout_commit_prepare_v3,
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001375 .rpc_call_done = ff_layout_commit_done,
Tom Haynesd67ae822014-12-11 17:02:04 -05001376 .rpc_count_stats = ff_layout_commit_count_stats,
1377 .rpc_release = pnfs_generic_commit_release,
1378};
1379
1380static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1381 .rpc_call_prepare = ff_layout_commit_prepare_v4,
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001382 .rpc_call_done = ff_layout_commit_done,
Tom Haynesd67ae822014-12-11 17:02:04 -05001383 .rpc_count_stats = ff_layout_commit_count_stats,
1384 .rpc_release = pnfs_generic_commit_release,
1385};
1386
1387static enum pnfs_try_status
1388ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1389{
1390 struct pnfs_layout_segment *lseg = hdr->lseg;
1391 struct nfs4_pnfs_ds *ds;
1392 struct rpc_clnt *ds_clnt;
1393 struct rpc_cred *ds_cred;
1394 loff_t offset = hdr->args.offset;
1395 u32 idx = hdr->pgio_mirror_idx;
1396 int vers;
1397 struct nfs_fh *fh;
1398
1399 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1400 __func__, hdr->inode->i_ino,
1401 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1402
1403 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1404 if (!ds)
1405 goto out_failed;
1406
1407 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1408 hdr->inode);
1409 if (IS_ERR(ds_clnt))
1410 goto out_failed;
1411
1412 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1413 if (IS_ERR(ds_cred))
1414 goto out_failed;
1415
1416 vers = nfs4_ff_layout_ds_version(lseg, idx);
1417
1418 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1419 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1420
1421 atomic_inc(&ds->ds_clp->cl_count);
1422 hdr->ds_clp = ds->ds_clp;
1423 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1424 if (fh)
1425 hdr->args.fh = fh;
Tom Haynesd67ae822014-12-11 17:02:04 -05001426 /*
1427 * Note that if we ever decide to split across DSes,
1428 * then we may need to handle dense-like offsets.
1429 */
1430 hdr->args.offset = offset;
1431 hdr->mds_offset = offset;
1432
1433 /* Perform an asynchronous read to ds */
1434 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1435 vers == 3 ? &ff_layout_read_call_ops_v3 :
1436 &ff_layout_read_call_ops_v4,
1437 0, RPC_TASK_SOFTCONN);
1438
1439 return PNFS_ATTEMPTED;
1440
1441out_failed:
1442 if (ff_layout_has_available_ds(lseg))
1443 return PNFS_TRY_AGAIN;
1444 return PNFS_NOT_ATTEMPTED;
1445}
1446
1447/* Perform async writes. */
1448static enum pnfs_try_status
1449ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1450{
1451 struct pnfs_layout_segment *lseg = hdr->lseg;
1452 struct nfs4_pnfs_ds *ds;
1453 struct rpc_clnt *ds_clnt;
1454 struct rpc_cred *ds_cred;
1455 loff_t offset = hdr->args.offset;
1456 int vers;
1457 struct nfs_fh *fh;
1458 int idx = hdr->pgio_mirror_idx;
1459
1460 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1461 if (!ds)
1462 return PNFS_NOT_ATTEMPTED;
1463
1464 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1465 hdr->inode);
1466 if (IS_ERR(ds_clnt))
1467 return PNFS_NOT_ATTEMPTED;
1468
1469 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1470 if (IS_ERR(ds_cred))
1471 return PNFS_NOT_ATTEMPTED;
1472
1473 vers = nfs4_ff_layout_ds_version(lseg, idx);
1474
1475 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1476 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1477 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1478 vers);
1479
1480 hdr->pgio_done_cb = ff_layout_write_done_cb;
1481 atomic_inc(&ds->ds_clp->cl_count);
1482 hdr->ds_clp = ds->ds_clp;
1483 hdr->ds_commit_idx = idx;
1484 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1485 if (fh)
1486 hdr->args.fh = fh;
1487
1488 /*
1489 * Note that if we ever decide to split across DSes,
1490 * then we may need to handle dense-like offsets.
1491 */
1492 hdr->args.offset = offset;
1493
1494 /* Perform an asynchronous write */
1495 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1496 vers == 3 ? &ff_layout_write_call_ops_v3 :
1497 &ff_layout_write_call_ops_v4,
1498 sync, RPC_TASK_SOFTCONN);
1499 return PNFS_ATTEMPTED;
1500}
1501
Tom Haynesd67ae822014-12-11 17:02:04 -05001502static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1503{
1504 return i;
1505}
1506
1507static struct nfs_fh *
1508select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1509{
1510 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1511
1512 /* FIXME: Assume that there is only one NFS version available
1513 * for the DS.
1514 */
1515 return &flseg->mirror_array[i]->fh_versions[0];
1516}
1517
1518static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1519{
1520 struct pnfs_layout_segment *lseg = data->lseg;
1521 struct nfs4_pnfs_ds *ds;
1522 struct rpc_clnt *ds_clnt;
1523 struct rpc_cred *ds_cred;
1524 u32 idx;
1525 int vers;
1526 struct nfs_fh *fh;
1527
1528 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1529 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1530 if (!ds)
1531 goto out_err;
1532
1533 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1534 data->inode);
1535 if (IS_ERR(ds_clnt))
1536 goto out_err;
1537
1538 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1539 if (IS_ERR(ds_cred))
1540 goto out_err;
1541
1542 vers = nfs4_ff_layout_ds_version(lseg, idx);
1543
1544 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1545 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1546 vers);
1547 data->commit_done_cb = ff_layout_commit_done_cb;
1548 data->cred = ds_cred;
1549 atomic_inc(&ds->ds_clp->cl_count);
1550 data->ds_clp = ds->ds_clp;
1551 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1552 if (fh)
1553 data->args.fh = fh;
Trond Myklebustabcb7bf2015-06-23 19:51:59 +08001554
Tom Haynesd67ae822014-12-11 17:02:04 -05001555 return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1556 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1557 &ff_layout_commit_call_ops_v4,
1558 how, RPC_TASK_SOFTCONN);
1559out_err:
1560 pnfs_generic_prepare_to_resend_writes(data);
1561 pnfs_generic_commit_release(data);
1562 return -EAGAIN;
1563}
1564
1565static int
1566ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1567 int how, struct nfs_commit_info *cinfo)
1568{
1569 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1570 ff_layout_initiate_commit);
1571}
1572
1573static struct pnfs_ds_commit_info *
1574ff_layout_get_ds_info(struct inode *inode)
1575{
1576 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1577
1578 if (layout == NULL)
1579 return NULL;
1580
1581 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1582}
1583
1584static void
Trond Myklebustfc877012015-03-09 17:25:14 -04001585ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
Tom Haynesd67ae822014-12-11 17:02:04 -05001586{
1587 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1588 id_node));
1589}
1590
1591static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1592 struct xdr_stream *xdr,
1593 const struct nfs4_layoutreturn_args *args)
1594{
1595 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1596 __be32 *start;
1597 int count = 0, ret = 0;
1598
1599 start = xdr_reserve_space(xdr, 4);
1600 if (unlikely(!start))
1601 return -E2BIG;
1602
1603 /* This assume we always return _ALL_ layouts */
1604 spin_lock(&hdr->plh_inode->i_lock);
1605 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1606 spin_unlock(&hdr->plh_inode->i_lock);
1607
1608 *start = cpu_to_be32(count);
1609
1610 return ret;
1611}
1612
1613/* report nothing for now */
1614static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1615 struct xdr_stream *xdr,
1616 const struct nfs4_layoutreturn_args *args)
1617{
1618 __be32 *p;
1619
1620 p = xdr_reserve_space(xdr, 4);
1621 if (likely(p))
1622 *p = cpu_to_be32(0);
1623}
1624
1625static struct nfs4_deviceid_node *
1626ff_layout_alloc_deviceid_node(struct nfs_server *server,
1627 struct pnfs_device *pdev, gfp_t gfp_flags)
1628{
1629 struct nfs4_ff_layout_ds *dsaddr;
1630
1631 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1632 if (!dsaddr)
1633 return NULL;
1634 return &dsaddr->id_node;
1635}
1636
1637static void
1638ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1639 struct xdr_stream *xdr,
1640 const struct nfs4_layoutreturn_args *args)
1641{
1642 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1643 __be32 *start;
1644
1645 dprintk("%s: Begin\n", __func__);
1646 start = xdr_reserve_space(xdr, 4);
1647 BUG_ON(!start);
1648
1649 if (ff_layout_encode_ioerr(flo, xdr, args))
1650 goto out;
1651
1652 ff_layout_encode_iostats(flo, xdr, args);
1653out:
1654 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1655 dprintk("%s: Return\n", __func__);
1656}
1657
1658static struct pnfs_layoutdriver_type flexfilelayout_type = {
1659 .id = LAYOUT_FLEX_FILES,
1660 .name = "LAYOUT_FLEX_FILES",
1661 .owner = THIS_MODULE,
1662 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
1663 .free_layout_hdr = ff_layout_free_layout_hdr,
1664 .alloc_lseg = ff_layout_alloc_lseg,
1665 .free_lseg = ff_layout_free_lseg,
1666 .pg_read_ops = &ff_layout_pg_read_ops,
1667 .pg_write_ops = &ff_layout_pg_write_ops,
1668 .get_ds_info = ff_layout_get_ds_info,
Trond Myklebustfc877012015-03-09 17:25:14 -04001669 .free_deviceid_node = ff_layout_free_deviceid_node,
Tom Haynes338d00c2015-02-17 14:58:15 -08001670 .mark_request_commit = pnfs_layout_mark_request_commit,
Tom Haynesd67ae822014-12-11 17:02:04 -05001671 .clear_request_commit = pnfs_generic_clear_request_commit,
1672 .scan_commit_lists = pnfs_generic_scan_commit_lists,
1673 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
1674 .commit_pagelist = ff_layout_commit_pagelist,
1675 .read_pagelist = ff_layout_read_pagelist,
1676 .write_pagelist = ff_layout_write_pagelist,
1677 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
1678 .encode_layoutreturn = ff_layout_encode_layoutreturn,
Trond Myklebust5bb89b42015-03-25 14:14:42 -04001679 .sync = pnfs_nfs_generic_sync,
Tom Haynesd67ae822014-12-11 17:02:04 -05001680};
1681
1682static int __init nfs4flexfilelayout_init(void)
1683{
1684 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
1685 __func__);
1686 return pnfs_register_layoutdriver(&flexfilelayout_type);
1687}
1688
1689static void __exit nfs4flexfilelayout_exit(void)
1690{
1691 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
1692 __func__);
1693 pnfs_unregister_layoutdriver(&flexfilelayout_type);
1694}
1695
1696MODULE_ALIAS("nfs-layouttype4-4");
1697
1698MODULE_LICENSE("GPL");
1699MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
1700
1701module_init(nfs4flexfilelayout_init);
1702module_exit(nfs4flexfilelayout_exit);