blob: 887ceef125b285f5123528a28614e9225fe6897c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Leverf13193f2017-04-09 13:06:16 -04002/*
3 * Copyright (c) 2016 Oracle. All rights reserved.
4 *
5 * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6 */
7
Chuck Lever98895ed2018-05-07 15:27:11 -04008#include <rdma/rw.h>
9
Chuck Leverf13193f2017-04-09 13:06:16 -040010#include <linux/sunrpc/rpc_rdma.h>
11#include <linux/sunrpc/svc_rdma.h>
12#include <linux/sunrpc/debug.h>
13
Chuck Lever98895ed2018-05-07 15:27:11 -040014#include "xprt_rdma.h"
15#include <trace/events/rpcrdma.h>
Chuck Leverf13193f2017-04-09 13:06:16 -040016
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18
Chuck Lever026d9582017-06-23 17:18:24 -040019static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
20static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
21
Chuck Leverf13193f2017-04-09 13:06:16 -040022/* Each R/W context contains state for one chain of RDMA Read or
23 * Write Work Requests.
24 *
25 * Each WR chain handles a single contiguous server-side buffer,
26 * because scatterlist entries after the first have to start on
27 * page alignment. xdr_buf iovecs cannot guarantee alignment.
28 *
29 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
30 * from a client may contain a unique R_key, so each WR chain moves
31 * up to one segment at a time.
32 *
33 * The scatterlist makes this data structure over 4KB in size. To
34 * make it less likely to fail, and to handle the allocation for
35 * smaller I/O requests without disabling bottom-halves, these
36 * contexts are created on demand, but cached and reused until the
37 * controlling svcxprt_rdma is destroyed.
38 */
39struct svc_rdma_rw_ctxt {
40 struct list_head rw_list;
41 struct rdma_rw_ctx rw_ctx;
42 int rw_nents;
43 struct sg_table rw_sg_table;
44 struct scatterlist rw_first_sgl[0];
45};
46
47static inline struct svc_rdma_rw_ctxt *
48svc_rdma_next_ctxt(struct list_head *list)
49{
50 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
51 rw_list);
52}
53
54static struct svc_rdma_rw_ctxt *
55svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
56{
57 struct svc_rdma_rw_ctxt *ctxt;
58
59 spin_lock(&rdma->sc_rw_ctxt_lock);
60
61 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
62 if (ctxt) {
63 list_del(&ctxt->rw_list);
64 spin_unlock(&rdma->sc_rw_ctxt_lock);
65 } else {
66 spin_unlock(&rdma->sc_rw_ctxt_lock);
67 ctxt = kmalloc(sizeof(*ctxt) +
68 SG_CHUNK_SIZE * sizeof(struct scatterlist),
69 GFP_KERNEL);
70 if (!ctxt)
71 goto out;
72 INIT_LIST_HEAD(&ctxt->rw_list);
73 }
74
75 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
76 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
77 ctxt->rw_sg_table.sgl)) {
78 kfree(ctxt);
79 ctxt = NULL;
80 }
81out:
82 return ctxt;
83}
84
85static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
86 struct svc_rdma_rw_ctxt *ctxt)
87{
88 sg_free_table_chained(&ctxt->rw_sg_table, true);
89
90 spin_lock(&rdma->sc_rw_ctxt_lock);
91 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
92 spin_unlock(&rdma->sc_rw_ctxt_lock);
93}
94
95/**
96 * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
97 * @rdma: transport about to be destroyed
98 *
99 */
100void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
101{
102 struct svc_rdma_rw_ctxt *ctxt;
103
104 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
105 list_del(&ctxt->rw_list);
106 kfree(ctxt);
107 }
108}
109
110/* A chunk context tracks all I/O for moving one Read or Write
111 * chunk. This is a a set of rdma_rw's that handle data movement
112 * for all segments of one chunk.
113 *
114 * These are small, acquired with a single allocator call, and
115 * no more than one is needed per chunk. They are allocated on
116 * demand, and not cached.
117 */
118struct svc_rdma_chunk_ctxt {
119 struct ib_cqe cc_cqe;
120 struct svcxprt_rdma *cc_rdma;
121 struct list_head cc_rwctxts;
122 int cc_sqecount;
Chuck Leverf13193f2017-04-09 13:06:16 -0400123};
124
125static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
Chuck Lever35a30fc2017-06-23 17:19:29 -0400126 struct svc_rdma_chunk_ctxt *cc)
Chuck Leverf13193f2017-04-09 13:06:16 -0400127{
128 cc->cc_rdma = rdma;
129 svc_xprt_get(&rdma->sc_xprt);
130
131 INIT_LIST_HEAD(&cc->cc_rwctxts);
132 cc->cc_sqecount = 0;
Chuck Leverf13193f2017-04-09 13:06:16 -0400133}
134
Chuck Lever35a30fc2017-06-23 17:19:29 -0400135static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
136 enum dma_data_direction dir)
Chuck Leverf13193f2017-04-09 13:06:16 -0400137{
138 struct svcxprt_rdma *rdma = cc->cc_rdma;
139 struct svc_rdma_rw_ctxt *ctxt;
140
141 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
142 list_del(&ctxt->rw_list);
143
144 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
145 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
Chuck Lever35a30fc2017-06-23 17:19:29 -0400146 ctxt->rw_nents, dir);
Chuck Leverf13193f2017-04-09 13:06:16 -0400147 svc_rdma_put_rw_ctxt(rdma, ctxt);
148 }
149 svc_xprt_put(&rdma->sc_xprt);
150}
151
152/* State for sending a Write or Reply chunk.
153 * - Tracks progress of writing one chunk over all its segments
154 * - Stores arguments for the SGL constructor functions
155 */
156struct svc_rdma_write_info {
157 /* write state of this chunk */
158 unsigned int wi_seg_off;
159 unsigned int wi_seg_no;
160 unsigned int wi_nsegs;
161 __be32 *wi_segs;
162
163 /* SGL constructor arguments */
164 struct xdr_buf *wi_xdr;
165 unsigned char *wi_base;
166 unsigned int wi_next_off;
167
168 struct svc_rdma_chunk_ctxt wi_cc;
169};
170
171static struct svc_rdma_write_info *
172svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
173{
174 struct svc_rdma_write_info *info;
175
176 info = kmalloc(sizeof(*info), GFP_KERNEL);
177 if (!info)
178 return info;
179
180 info->wi_seg_off = 0;
181 info->wi_seg_no = 0;
182 info->wi_nsegs = be32_to_cpup(++chunk);
183 info->wi_segs = ++chunk;
Chuck Lever35a30fc2017-06-23 17:19:29 -0400184 svc_rdma_cc_init(rdma, &info->wi_cc);
Chuck Lever026d9582017-06-23 17:18:24 -0400185 info->wi_cc.cc_cqe.done = svc_rdma_write_done;
Chuck Leverf13193f2017-04-09 13:06:16 -0400186 return info;
187}
188
189static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
190{
Chuck Lever35a30fc2017-06-23 17:19:29 -0400191 svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
Chuck Leverf13193f2017-04-09 13:06:16 -0400192 kfree(info);
193}
194
195/**
196 * svc_rdma_write_done - Write chunk completion
197 * @cq: controlling Completion Queue
198 * @wc: Work Completion
199 *
200 * Pages under I/O are freed by a subsequent Send completion.
201 */
202static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
203{
204 struct ib_cqe *cqe = wc->wr_cqe;
205 struct svc_rdma_chunk_ctxt *cc =
206 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
207 struct svcxprt_rdma *rdma = cc->cc_rdma;
208 struct svc_rdma_write_info *info =
209 container_of(cc, struct svc_rdma_write_info, wi_cc);
210
Chuck Leverbd2abef2018-05-07 15:27:16 -0400211 trace_svcrdma_wc_write(wc);
212
Chuck Leverf13193f2017-04-09 13:06:16 -0400213 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
214 wake_up(&rdma->sc_send_wait);
215
216 if (unlikely(wc->status != IB_WC_SUCCESS)) {
217 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
218 if (wc->status != IB_WC_WR_FLUSH_ERR)
219 pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
220 ib_wc_status_msg(wc->status),
221 wc->status, wc->vendor_err);
222 }
223
224 svc_rdma_write_info_free(info);
225}
226
Chuck Lever026d9582017-06-23 17:18:24 -0400227/* State for pulling a Read chunk.
228 */
229struct svc_rdma_read_info {
230 struct svc_rdma_op_ctxt *ri_readctxt;
231 unsigned int ri_position;
232 unsigned int ri_pageno;
233 unsigned int ri_pageoff;
234 unsigned int ri_chunklen;
235
236 struct svc_rdma_chunk_ctxt ri_cc;
237};
238
239static struct svc_rdma_read_info *
240svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
241{
242 struct svc_rdma_read_info *info;
243
244 info = kmalloc(sizeof(*info), GFP_KERNEL);
245 if (!info)
246 return info;
247
Chuck Lever35a30fc2017-06-23 17:19:29 -0400248 svc_rdma_cc_init(rdma, &info->ri_cc);
Chuck Lever026d9582017-06-23 17:18:24 -0400249 info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
250 return info;
251}
252
253static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
254{
Chuck Lever35a30fc2017-06-23 17:19:29 -0400255 svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
Chuck Lever026d9582017-06-23 17:18:24 -0400256 kfree(info);
257}
258
259/**
260 * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
261 * @cq: controlling Completion Queue
262 * @wc: Work Completion
263 *
264 */
265static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
266{
267 struct ib_cqe *cqe = wc->wr_cqe;
268 struct svc_rdma_chunk_ctxt *cc =
269 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
270 struct svcxprt_rdma *rdma = cc->cc_rdma;
271 struct svc_rdma_read_info *info =
272 container_of(cc, struct svc_rdma_read_info, ri_cc);
273
Chuck Leverbd2abef2018-05-07 15:27:16 -0400274 trace_svcrdma_wc_read(wc);
275
Chuck Lever026d9582017-06-23 17:18:24 -0400276 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
277 wake_up(&rdma->sc_send_wait);
278
279 if (unlikely(wc->status != IB_WC_SUCCESS)) {
280 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
281 if (wc->status != IB_WC_WR_FLUSH_ERR)
282 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
283 ib_wc_status_msg(wc->status),
284 wc->status, wc->vendor_err);
285 svc_rdma_put_context(info->ri_readctxt, 1);
286 } else {
287 spin_lock(&rdma->sc_rq_dto_lock);
288 list_add_tail(&info->ri_readctxt->list,
289 &rdma->sc_read_complete_q);
290 spin_unlock(&rdma->sc_rq_dto_lock);
291
292 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
293 svc_xprt_enqueue(&rdma->sc_xprt);
294 }
295
296 svc_rdma_read_info_free(info);
297}
298
Chuck Leverf13193f2017-04-09 13:06:16 -0400299/* This function sleeps when the transport's Send Queue is congested.
300 *
301 * Assumptions:
302 * - If ib_post_send() succeeds, only one completion is expected,
303 * even if one or more WRs are flushed. This is true when posting
304 * an rdma_rw_ctx or when posting a single signaled WR.
305 */
306static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
307{
308 struct svcxprt_rdma *rdma = cc->cc_rdma;
309 struct svc_xprt *xprt = &rdma->sc_xprt;
310 struct ib_send_wr *first_wr, *bad_wr;
311 struct list_head *tmp;
312 struct ib_cqe *cqe;
313 int ret;
314
Chuck Lever107c1d02017-06-23 17:17:24 -0400315 if (cc->cc_sqecount > rdma->sc_sq_depth)
316 return -EINVAL;
317
Chuck Leverf13193f2017-04-09 13:06:16 -0400318 first_wr = NULL;
319 cqe = &cc->cc_cqe;
320 list_for_each(tmp, &cc->cc_rwctxts) {
321 struct svc_rdma_rw_ctxt *ctxt;
322
323 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
324 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
325 rdma->sc_port_num, cqe, first_wr);
326 cqe = NULL;
327 }
328
329 do {
330 if (atomic_sub_return(cc->cc_sqecount,
331 &rdma->sc_sq_avail) > 0) {
332 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
Chuck Leverbd2abef2018-05-07 15:27:16 -0400333 trace_svcrdma_post_rw(&cc->cc_cqe,
334 cc->cc_sqecount, ret);
Chuck Leverf13193f2017-04-09 13:06:16 -0400335 if (ret)
336 break;
337 return 0;
338 }
339
Chuck Leverbd2abef2018-05-07 15:27:16 -0400340 trace_svcrdma_sq_full(rdma);
Chuck Leverf13193f2017-04-09 13:06:16 -0400341 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
342 wait_event(rdma->sc_send_wait,
343 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
Chuck Leverbd2abef2018-05-07 15:27:16 -0400344 trace_svcrdma_sq_retry(rdma);
Chuck Leverf13193f2017-04-09 13:06:16 -0400345 } while (1);
346
Chuck Leverf13193f2017-04-09 13:06:16 -0400347 set_bit(XPT_CLOSE, &xprt->xpt_flags);
348
349 /* If even one was posted, there will be a completion. */
350 if (bad_wr != first_wr)
351 return 0;
352
353 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
354 wake_up(&rdma->sc_send_wait);
355 return -ENOTCONN;
356}
357
358/* Build and DMA-map an SGL that covers one kvec in an xdr_buf
359 */
360static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
361 unsigned int len,
362 struct svc_rdma_rw_ctxt *ctxt)
363{
364 struct scatterlist *sg = ctxt->rw_sg_table.sgl;
365
366 sg_set_buf(&sg[0], info->wi_base, len);
367 info->wi_base += len;
368
369 ctxt->rw_nents = 1;
370}
371
372/* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
373 */
374static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
375 unsigned int remaining,
376 struct svc_rdma_rw_ctxt *ctxt)
377{
378 unsigned int sge_no, sge_bytes, page_off, page_no;
379 struct xdr_buf *xdr = info->wi_xdr;
380 struct scatterlist *sg;
381 struct page **page;
382
Chuck Lever91b022e2017-06-23 17:19:21 -0400383 page_off = info->wi_next_off + xdr->page_base;
384 page_no = page_off >> PAGE_SHIFT;
385 page_off = offset_in_page(page_off);
Chuck Leverf13193f2017-04-09 13:06:16 -0400386 page = xdr->pages + page_no;
387 info->wi_next_off += remaining;
388 sg = ctxt->rw_sg_table.sgl;
389 sge_no = 0;
390 do {
391 sge_bytes = min_t(unsigned int, remaining,
392 PAGE_SIZE - page_off);
393 sg_set_page(sg, *page, sge_bytes, page_off);
394
395 remaining -= sge_bytes;
396 sg = sg_next(sg);
397 page_off = 0;
398 sge_no++;
399 page++;
400 } while (remaining);
401
402 ctxt->rw_nents = sge_no;
403}
404
405/* Construct RDMA Write WRs to send a portion of an xdr_buf containing
406 * an RPC Reply.
407 */
408static int
409svc_rdma_build_writes(struct svc_rdma_write_info *info,
410 void (*constructor)(struct svc_rdma_write_info *info,
411 unsigned int len,
412 struct svc_rdma_rw_ctxt *ctxt),
413 unsigned int remaining)
414{
415 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
416 struct svcxprt_rdma *rdma = cc->cc_rdma;
417 struct svc_rdma_rw_ctxt *ctxt;
418 __be32 *seg;
419 int ret;
420
Chuck Leverf13193f2017-04-09 13:06:16 -0400421 seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
422 do {
423 unsigned int write_len;
424 u32 seg_length, seg_handle;
425 u64 seg_offset;
426
427 if (info->wi_seg_no >= info->wi_nsegs)
428 goto out_overflow;
429
430 seg_handle = be32_to_cpup(seg);
431 seg_length = be32_to_cpup(seg + 1);
432 xdr_decode_hyper(seg + 2, &seg_offset);
433 seg_offset += info->wi_seg_off;
434
435 write_len = min(remaining, seg_length - info->wi_seg_off);
436 ctxt = svc_rdma_get_rw_ctxt(rdma,
437 (write_len >> PAGE_SHIFT) + 2);
438 if (!ctxt)
439 goto out_noctx;
440
441 constructor(info, write_len, ctxt);
442 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
443 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
444 ctxt->rw_nents, 0, seg_offset,
445 seg_handle, DMA_TO_DEVICE);
446 if (ret < 0)
447 goto out_initerr;
448
Chuck Lever98895ed2018-05-07 15:27:11 -0400449 trace_svcrdma_encode_wseg(seg_handle, write_len, seg_offset);
Chuck Leverf13193f2017-04-09 13:06:16 -0400450 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
451 cc->cc_sqecount += ret;
452 if (write_len == seg_length - info->wi_seg_off) {
453 seg += 4;
454 info->wi_seg_no++;
455 info->wi_seg_off = 0;
456 } else {
457 info->wi_seg_off += write_len;
458 }
459 remaining -= write_len;
460 } while (remaining);
461
462 return 0;
463
464out_overflow:
465 dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
466 info->wi_nsegs);
467 return -E2BIG;
468
469out_noctx:
470 dprintk("svcrdma: no R/W ctxs available\n");
471 return -ENOMEM;
472
473out_initerr:
474 svc_rdma_put_rw_ctxt(rdma, ctxt);
Chuck Leverbd2abef2018-05-07 15:27:16 -0400475 trace_svcrdma_dma_map_rwctx(rdma, ret);
Chuck Leverf13193f2017-04-09 13:06:16 -0400476 return -EIO;
477}
478
479/* Send one of an xdr_buf's kvecs by itself. To send a Reply
480 * chunk, the whole RPC Reply is written back to the client.
481 * This function writes either the head or tail of the xdr_buf
482 * containing the Reply.
483 */
484static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
485 struct kvec *vec)
486{
487 info->wi_base = vec->iov_base;
488 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
489 vec->iov_len);
490}
491
492/* Send an xdr_buf's page list by itself. A Write chunk is
493 * just the page list. a Reply chunk is the head, page list,
494 * and tail. This function is shared between the two types
495 * of chunk.
496 */
497static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
498 struct xdr_buf *xdr)
499{
500 info->wi_xdr = xdr;
501 info->wi_next_off = 0;
502 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
503 xdr->page_len);
504}
505
506/**
507 * svc_rdma_send_write_chunk - Write all segments in a Write chunk
508 * @rdma: controlling RDMA transport
509 * @wr_ch: Write chunk provided by client
510 * @xdr: xdr_buf containing the data payload
511 *
512 * Returns a non-negative number of bytes the chunk consumed, or
513 * %-E2BIG if the payload was larger than the Write chunk,
Chuck Lever107c1d02017-06-23 17:17:24 -0400514 * %-EINVAL if client provided too many segments,
Chuck Leverf13193f2017-04-09 13:06:16 -0400515 * %-ENOMEM if rdma_rw context pool was exhausted,
516 * %-ENOTCONN if posting failed (connection is lost),
517 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
518 */
519int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
520 struct xdr_buf *xdr)
521{
522 struct svc_rdma_write_info *info;
523 int ret;
524
525 if (!xdr->page_len)
526 return 0;
527
528 info = svc_rdma_write_info_alloc(rdma, wr_ch);
529 if (!info)
530 return -ENOMEM;
531
532 ret = svc_rdma_send_xdr_pagelist(info, xdr);
533 if (ret < 0)
534 goto out_err;
535
536 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
537 if (ret < 0)
538 goto out_err;
Chuck Lever98895ed2018-05-07 15:27:11 -0400539
540 trace_svcrdma_encode_write(xdr->page_len);
Chuck Leverf13193f2017-04-09 13:06:16 -0400541 return xdr->page_len;
542
543out_err:
544 svc_rdma_write_info_free(info);
545 return ret;
546}
547
548/**
549 * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
550 * @rdma: controlling RDMA transport
551 * @rp_ch: Reply chunk provided by client
552 * @writelist: true if client provided a Write list
553 * @xdr: xdr_buf containing an RPC Reply
554 *
555 * Returns a non-negative number of bytes the chunk consumed, or
556 * %-E2BIG if the payload was larger than the Reply chunk,
Chuck Lever107c1d02017-06-23 17:17:24 -0400557 * %-EINVAL if client provided too many segments,
Chuck Leverf13193f2017-04-09 13:06:16 -0400558 * %-ENOMEM if rdma_rw context pool was exhausted,
559 * %-ENOTCONN if posting failed (connection is lost),
560 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
561 */
562int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
563 bool writelist, struct xdr_buf *xdr)
564{
565 struct svc_rdma_write_info *info;
566 int consumed, ret;
567
568 info = svc_rdma_write_info_alloc(rdma, rp_ch);
569 if (!info)
570 return -ENOMEM;
571
572 ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
573 if (ret < 0)
574 goto out_err;
575 consumed = xdr->head[0].iov_len;
576
577 /* Send the page list in the Reply chunk only if the
578 * client did not provide Write chunks.
579 */
580 if (!writelist && xdr->page_len) {
581 ret = svc_rdma_send_xdr_pagelist(info, xdr);
582 if (ret < 0)
583 goto out_err;
584 consumed += xdr->page_len;
585 }
586
587 if (xdr->tail[0].iov_len) {
588 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
589 if (ret < 0)
590 goto out_err;
591 consumed += xdr->tail[0].iov_len;
592 }
593
594 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
595 if (ret < 0)
596 goto out_err;
Chuck Lever98895ed2018-05-07 15:27:11 -0400597
598 trace_svcrdma_encode_reply(consumed);
Chuck Leverf13193f2017-04-09 13:06:16 -0400599 return consumed;
600
601out_err:
602 svc_rdma_write_info_free(info);
603 return ret;
604}
Chuck Lever026d9582017-06-23 17:18:24 -0400605
606static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
607 struct svc_rqst *rqstp,
608 u32 rkey, u32 len, u64 offset)
609{
610 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
611 struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
612 struct svc_rdma_rw_ctxt *ctxt;
613 unsigned int sge_no, seg_len;
614 struct scatterlist *sg;
615 int ret;
616
617 sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
618 ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
619 if (!ctxt)
620 goto out_noctx;
621 ctxt->rw_nents = sge_no;
622
Chuck Lever026d9582017-06-23 17:18:24 -0400623 sg = ctxt->rw_sg_table.sgl;
624 for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
625 seg_len = min_t(unsigned int, len,
626 PAGE_SIZE - info->ri_pageoff);
627
628 head->arg.pages[info->ri_pageno] =
629 rqstp->rq_pages[info->ri_pageno];
630 if (!info->ri_pageoff)
631 head->count++;
632
633 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
634 seg_len, info->ri_pageoff);
635 sg = sg_next(sg);
636
637 info->ri_pageoff += seg_len;
638 if (info->ri_pageoff == PAGE_SIZE) {
639 info->ri_pageno++;
640 info->ri_pageoff = 0;
641 }
642 len -= seg_len;
643
644 /* Safety check */
645 if (len &&
646 &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
647 goto out_overrun;
648 }
649
650 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
651 cc->cc_rdma->sc_port_num,
652 ctxt->rw_sg_table.sgl, ctxt->rw_nents,
653 0, offset, rkey, DMA_FROM_DEVICE);
654 if (ret < 0)
655 goto out_initerr;
656
657 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
658 cc->cc_sqecount += ret;
659 return 0;
660
661out_noctx:
662 dprintk("svcrdma: no R/W ctxs available\n");
663 return -ENOMEM;
664
665out_overrun:
666 dprintk("svcrdma: request overruns rq_pages\n");
667 return -EINVAL;
668
669out_initerr:
Chuck Leverbd2abef2018-05-07 15:27:16 -0400670 trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
Chuck Lever026d9582017-06-23 17:18:24 -0400671 svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
Chuck Lever026d9582017-06-23 17:18:24 -0400672 return -EIO;
673}
674
Chuck Lever7075a862017-08-01 12:00:14 -0400675/* Walk the segments in the Read chunk starting at @p and construct
676 * RDMA Read operations to pull the chunk to the server.
677 */
Chuck Lever026d9582017-06-23 17:18:24 -0400678static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
679 struct svc_rdma_read_info *info,
680 __be32 *p)
681{
682 int ret;
683
Chuck Lever7075a862017-08-01 12:00:14 -0400684 ret = -EINVAL;
Chuck Lever026d9582017-06-23 17:18:24 -0400685 info->ri_chunklen = 0;
Chuck Lever7075a862017-08-01 12:00:14 -0400686 while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
Chuck Lever026d9582017-06-23 17:18:24 -0400687 u32 rs_handle, rs_length;
688 u64 rs_offset;
689
Chuck Lever026d9582017-06-23 17:18:24 -0400690 rs_handle = be32_to_cpup(p++);
691 rs_length = be32_to_cpup(p++);
692 p = xdr_decode_hyper(p, &rs_offset);
693
694 ret = svc_rdma_build_read_segment(info, rqstp,
695 rs_handle, rs_length,
696 rs_offset);
697 if (ret < 0)
698 break;
699
Chuck Lever98895ed2018-05-07 15:27:11 -0400700 trace_svcrdma_encode_rseg(rs_handle, rs_length, rs_offset);
Chuck Lever026d9582017-06-23 17:18:24 -0400701 info->ri_chunklen += rs_length;
702 }
703
704 return ret;
705}
706
Chuck Lever026d9582017-06-23 17:18:24 -0400707/* Construct RDMA Reads to pull over a normal Read chunk. The chunk
708 * data lands in the page list of head->arg.pages.
709 *
710 * Currently NFSD does not look at the head->arg.tail[0] iovec.
711 * Therefore, XDR round-up of the Read chunk and trailing
712 * inline content must both be added at the end of the pagelist.
713 */
714static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
715 struct svc_rdma_read_info *info,
716 __be32 *p)
717{
718 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
719 int ret;
720
Chuck Lever026d9582017-06-23 17:18:24 -0400721 info->ri_pageno = head->hdr_count;
722 info->ri_pageoff = 0;
723
724 ret = svc_rdma_build_read_chunk(rqstp, info, p);
725 if (ret < 0)
726 goto out;
727
Chuck Lever98895ed2018-05-07 15:27:11 -0400728 trace_svcrdma_encode_read(info->ri_chunklen, info->ri_position);
729
Chuck Lever193bcb72017-08-18 11:12:35 -0400730 /* Split the Receive buffer between the head and tail
731 * buffers at Read chunk's position. XDR roundup of the
732 * chunk is not included in either the pagelist or in
733 * the tail.
Chuck Lever026d9582017-06-23 17:18:24 -0400734 */
Chuck Lever193bcb72017-08-18 11:12:35 -0400735 head->arg.tail[0].iov_base =
736 head->arg.head[0].iov_base + info->ri_position;
737 head->arg.tail[0].iov_len =
738 head->arg.head[0].iov_len - info->ri_position;
739 head->arg.head[0].iov_len = info->ri_position;
Chuck Lever026d9582017-06-23 17:18:24 -0400740
Chuck Lever175e0312018-02-02 14:28:59 -0500741 /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
Chuck Lever193bcb72017-08-18 11:12:35 -0400742 *
Chuck Lever175e0312018-02-02 14:28:59 -0500743 * If the client already rounded up the chunk length, the
744 * length does not change. Otherwise, the length of the page
745 * list is increased to include XDR round-up.
746 *
747 * Currently these chunks always start at page offset 0,
748 * thus the rounded-up length never crosses a page boundary.
Chuck Lever193bcb72017-08-18 11:12:35 -0400749 */
Chuck Lever175e0312018-02-02 14:28:59 -0500750 info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
Chuck Lever026d9582017-06-23 17:18:24 -0400751
752 head->arg.page_len = info->ri_chunklen;
753 head->arg.len += info->ri_chunklen;
754 head->arg.buflen += info->ri_chunklen;
755
Chuck Lever026d9582017-06-23 17:18:24 -0400756out:
757 return ret;
758}
759
760/* Construct RDMA Reads to pull over a Position Zero Read chunk.
761 * The start of the data lands in the first page just after
762 * the Transport header, and the rest lands in the page list of
763 * head->arg.pages.
764 *
765 * Assumptions:
766 * - A PZRC has an XDR-aligned length (no implicit round-up).
767 * - There can be no trailing inline content (IOW, we assume
768 * a PZRC is never sent in an RDMA_MSG message, though it's
769 * allowed by spec).
770 */
771static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
772 struct svc_rdma_read_info *info,
773 __be32 *p)
774{
775 struct svc_rdma_op_ctxt *head = info->ri_readctxt;
776 int ret;
777
Chuck Lever026d9582017-06-23 17:18:24 -0400778 info->ri_pageno = head->hdr_count - 1;
779 info->ri_pageoff = offset_in_page(head->byte_len);
780
781 ret = svc_rdma_build_read_chunk(rqstp, info, p);
782 if (ret < 0)
783 goto out;
784
Chuck Lever98895ed2018-05-07 15:27:11 -0400785 trace_svcrdma_encode_pzr(info->ri_chunklen);
786
Chuck Lever026d9582017-06-23 17:18:24 -0400787 head->arg.len += info->ri_chunklen;
788 head->arg.buflen += info->ri_chunklen;
789
Chuck Lever71641d92017-06-23 17:18:41 -0400790 if (head->arg.buflen <= head->sge[0].length) {
Chuck Lever026d9582017-06-23 17:18:24 -0400791 /* Transport header and RPC message fit entirely
792 * in page where head iovec resides.
793 */
794 head->arg.head[0].iov_len = info->ri_chunklen;
795 } else {
796 /* Transport header and part of RPC message reside
797 * in the head iovec's page.
798 */
799 head->arg.head[0].iov_len =
800 head->sge[0].length - head->byte_len;
801 head->arg.page_len =
802 info->ri_chunklen - head->arg.head[0].iov_len;
803 }
804
805out:
806 return ret;
807}
808
809/**
810 * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
811 * @rdma: controlling RDMA transport
812 * @rqstp: set of pages to use as Read sink buffers
813 * @head: pages under I/O collect here
814 * @p: pointer to start of Read chunk
815 *
816 * Returns:
817 * %0 if all needed RDMA Reads were posted successfully,
818 * %-EINVAL if client provided too many segments,
819 * %-ENOMEM if rdma_rw context pool was exhausted,
820 * %-ENOTCONN if posting failed (connection is lost),
821 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
822 *
823 * Assumptions:
824 * - All Read segments in @p have the same Position value.
825 */
826int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
827 struct svc_rdma_op_ctxt *head, __be32 *p)
828{
829 struct svc_rdma_read_info *info;
830 struct page **page;
831 int ret;
832
833 /* The request (with page list) is constructed in
834 * head->arg. Pages involved with RDMA Read I/O are
835 * transferred there.
836 */
837 head->hdr_count = head->count;
838 head->arg.head[0] = rqstp->rq_arg.head[0];
839 head->arg.tail[0] = rqstp->rq_arg.tail[0];
840 head->arg.pages = head->pages;
841 head->arg.page_base = 0;
842 head->arg.page_len = 0;
843 head->arg.len = rqstp->rq_arg.len;
844 head->arg.buflen = rqstp->rq_arg.buflen;
845
846 info = svc_rdma_read_info_alloc(rdma);
847 if (!info)
848 return -ENOMEM;
849 info->ri_readctxt = head;
850
851 info->ri_position = be32_to_cpup(p + 1);
852 if (info->ri_position)
853 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
854 else
855 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
856
857 /* Mark the start of the pages that can be used for the reply */
858 if (info->ri_pageoff > 0)
859 info->ri_pageno++;
860 rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno];
861 rqstp->rq_next_page = rqstp->rq_respages + 1;
862
863 if (ret < 0)
864 goto out;
865
866 ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
867
868out:
869 /* Read sink pages have been moved from rqstp->rq_pages to
870 * head->arg.pages. Force svc_recv to refill those slots
871 * in rq_pages.
872 */
873 for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++)
874 *page = NULL;
875
876 if (ret < 0)
877 svc_rdma_read_info_free(info);
878 return ret;
879}