blob: f27dbfd21a10f0e58e1de54d7248f3336b6b678a [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040046 */
47
48#include "xprt_rdma.h"
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Jeff Laytonf895b252014-11-17 16:58:04 -050052#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040053# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040056static const char transfertypes[][12] = {
Chuck Lever94f58c52016-05-02 14:41:30 -040057 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040061 "reply chunk" /* entire reply via rdma write */
62};
Chuck Lever302d3de2016-05-02 14:41:05 -040063
64/* Returns size of largest RPC-over-RDMA header in a Call message
65 *
Chuck Lever94f58c52016-05-02 14:41:30 -040066 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040068 */
69static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70{
71 unsigned int size;
72
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
75
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
79
Chuck Lever94f58c52016-05-02 14:41:30 -040080 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
84
Chuck Lever302d3de2016-05-02 14:41:05 -040085 dprintk("RPC: %s: max call header size = %u\n",
86 __func__, size);
87 return size;
88}
89
90/* Returns size of largest RPC-over-RDMA header in a Reply message
91 *
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
94 */
95static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
96{
97 unsigned int size;
98
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
101
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
107
108 dprintk("RPC: %s: max reply header size = %u\n",
109 __func__, size);
110 return size;
111}
112
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400113void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400114{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
118
Chuck Lever302d3de2016-05-02 14:41:05 -0400119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
123}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400124
Chuck Lever5457ced2015-08-03 13:03:49 -0400125/* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500128 * a Read chunk for this operation.
129 *
130 * A Read chunk is also required if sending the RPC call inline would
131 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400132 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400133static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
134 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400135{
Chuck Lever16f906d2017-02-08 17:00:10 -0500136 struct xdr_buf *xdr = &rqst->rq_snd_buf;
137 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400138
Chuck Lever16f906d2017-02-08 17:00:10 -0500139 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
140 return false;
141
142 if (xdr->page_len) {
143 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400144 offset = offset_in_page(xdr->page_base);
Chuck Lever16f906d2017-02-08 17:00:10 -0500145 count = 0;
146 while (remaining) {
147 remaining -= min_t(unsigned int,
148 PAGE_SIZE - offset, remaining);
149 offset = 0;
150 if (++count > r_xprt->rx_ia.ri_max_send_sges)
151 return false;
152 }
153 }
154
155 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400156}
157
158/* The client can't know how large the actual reply will be. Thus it
159 * plans for the largest possible reply for that particular ULP
160 * operation. If the maximum combined reply message size exceeds that
161 * limit, the client must provide a write list or a reply chunk for
162 * this request.
163 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400164static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
165 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400166{
Chuck Lever302d3de2016-05-02 14:41:05 -0400167 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400168
Chuck Lever302d3de2016-05-02 14:41:05 -0400169 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400170}
171
Chuck Lever821c7912016-03-04 11:27:52 -0500172/* Split "vec" on page boundaries into segments. FMR registers pages,
173 * not a byte range. Other modes coalesce these segments into a single
174 * MR when they can.
175 */
176static int
Chuck Lever5ab81422016-06-29 13:54:25 -0400177rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
Chuck Lever821c7912016-03-04 11:27:52 -0500178{
179 size_t page_offset;
180 u32 remaining;
181 char *base;
182
183 base = vec->iov_base;
184 page_offset = offset_in_page(base);
185 remaining = vec->iov_len;
Chuck Lever5ab81422016-06-29 13:54:25 -0400186 while (remaining && n < RPCRDMA_MAX_SEGS) {
Chuck Lever821c7912016-03-04 11:27:52 -0500187 seg[n].mr_page = NULL;
188 seg[n].mr_offset = base;
189 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
190 remaining -= seg[n].mr_len;
191 base += seg[n].mr_len;
192 ++n;
193 page_offset = 0;
194 }
195 return n;
196}
197
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400198/*
199 * Chunk assembly from upper layer xdr_buf.
200 *
201 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
202 * elements. Segments are then coalesced when registered, if possible
203 * within the selected memreg mode.
Chuck Leverc93c6222014-05-28 10:35:14 -0400204 *
205 * Returns positive number of segments converted, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400206 */
207
208static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500209rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
210 unsigned int pos, enum rpcrdma_chunktype type,
211 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400212{
Chuck Lever5ab81422016-06-29 13:54:25 -0400213 int len, n, p, page_base;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000214 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400215
Chuck Lever5ab81422016-06-29 13:54:25 -0400216 n = 0;
Chuck Lever821c7912016-03-04 11:27:52 -0500217 if (pos == 0) {
Chuck Lever5ab81422016-06-29 13:54:25 -0400218 n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n);
219 if (n == RPCRDMA_MAX_SEGS)
220 goto out_overflow;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400221 }
222
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000223 len = xdrbuf->page_len;
224 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400225 page_base = offset_in_page(xdrbuf->page_base);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000226 p = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400227 while (len && n < RPCRDMA_MAX_SEGS) {
Shirley Ma196c6992014-05-28 10:34:24 -0400228 if (!ppages[p]) {
229 /* alloc the pagelist for receiving buffer */
230 ppages[p] = alloc_page(GFP_ATOMIC);
231 if (!ppages[p])
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400232 return -EAGAIN;
Shirley Ma196c6992014-05-28 10:34:24 -0400233 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000234 seg[n].mr_page = ppages[p];
235 seg[n].mr_offset = (void *)(unsigned long) page_base;
236 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
Chuck Leverc93c6222014-05-28 10:35:14 -0400237 if (seg[n].mr_len > PAGE_SIZE)
Chuck Lever5ab81422016-06-29 13:54:25 -0400238 goto out_overflow;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000239 len -= seg[n].mr_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400240 ++n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000241 ++p;
242 page_base = 0; /* page offset only applies to first page */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400243 }
244
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000245 /* Message overflows the seg array */
Chuck Lever5ab81422016-06-29 13:54:25 -0400246 if (len && n == RPCRDMA_MAX_SEGS)
247 goto out_overflow;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000248
Chuck Lever24abdf12017-02-08 16:59:46 -0500249 /* When encoding a Read chunk, the tail iovec contains an
250 * XDR pad and may be omitted.
251 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500252 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever677eb172015-08-03 13:04:17 -0400253 return n;
254
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500255 /* When encoding a Write chunk, some servers need to see an
256 * extra segment for non-XDR-aligned Write chunks. The upper
257 * layer provides space in the tail iovec that may be used
258 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400259 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500260 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Leverc8b920b2016-09-15 10:57:16 -0400261 return n;
262
James Lentini50e10922007-12-10 11:24:48 -0500263 if (xdrbuf->tail[0].iov_len) {
Chuck Lever5ab81422016-06-29 13:54:25 -0400264 n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
265 if (n == RPCRDMA_MAX_SEGS)
266 goto out_overflow;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400267 }
268
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400269 return n;
Chuck Lever5ab81422016-06-29 13:54:25 -0400270
271out_overflow:
272 pr_err("rpcrdma: segment array overflow\n");
273 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400274}
275
Chuck Lever39f4cd92017-08-10 12:47:36 -0400276static inline int
277encode_item_present(struct xdr_stream *xdr)
278{
279 __be32 *p;
280
281 p = xdr_reserve_space(xdr, sizeof(*p));
282 if (unlikely(!p))
283 return -EMSGSIZE;
284
285 *p = xdr_one;
286 return 0;
287}
288
289static inline int
290encode_item_not_present(struct xdr_stream *xdr)
291{
292 __be32 *p;
293
294 p = xdr_reserve_space(xdr, sizeof(*p));
295 if (unlikely(!p))
296 return -EMSGSIZE;
297
298 *p = xdr_zero;
299 return 0;
300}
301
302static void
Chuck Lever9d6b0402016-06-29 13:54:16 -0400303xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
Chuck Lever94f58c52016-05-02 14:41:30 -0400304{
Chuck Lever9d6b0402016-06-29 13:54:16 -0400305 *iptr++ = cpu_to_be32(mw->mw_handle);
306 *iptr++ = cpu_to_be32(mw->mw_length);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400307 xdr_encode_hyper(iptr, mw->mw_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400308}
309
Chuck Lever39f4cd92017-08-10 12:47:36 -0400310static int
311encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
312{
313 __be32 *p;
314
315 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
316 if (unlikely(!p))
317 return -EMSGSIZE;
318
319 xdr_encode_rdma_segment(p, mw);
320 return 0;
321}
322
323static int
324encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
325 u32 position)
326{
327 __be32 *p;
328
329 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
330 if (unlikely(!p))
331 return -EMSGSIZE;
332
333 *p++ = xdr_one; /* Item present */
334 *p++ = cpu_to_be32(position);
335 xdr_encode_rdma_segment(p, mw);
336 return 0;
337}
338
339/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400340 * segments that belong to a single read chunk.
341 *
342 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
343 *
344 * Read chunklist (a linked list):
345 * N elements, position P (same P for all chunks of same arg!):
346 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
347 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400348 * Returns zero on success, or a negative errno if a failure occurred.
349 * @xdr is advanced to the next position in the stream.
350 *
351 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400352 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400353static noinline int
354rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
355 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400356{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400357 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400358 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400359 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400360 unsigned int pos;
361 int n, nsegs;
362
Chuck Lever94f58c52016-05-02 14:41:30 -0400363 pos = rqst->rq_snd_buf.head[0].iov_len;
364 if (rtype == rpcrdma_areadch)
365 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400366 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500367 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
368 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400369 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400370 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400371
372 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400373 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
374 false, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400375 if (n < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400376 return n;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500377 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400378
Chuck Lever39f4cd92017-08-10 12:47:36 -0400379 if (encode_read_segment(xdr, mw, pos) < 0)
380 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400381
Chuck Lever9d6b0402016-06-29 13:54:16 -0400382 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400383 rqst->rq_task->tk_pid, __func__, pos,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400384 mw->mw_length, (unsigned long long)mw->mw_offset,
385 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400386
387 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever94f58c52016-05-02 14:41:30 -0400388 seg += n;
389 nsegs -= n;
390 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400391
Chuck Lever39f4cd92017-08-10 12:47:36 -0400392 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400393}
394
Chuck Lever39f4cd92017-08-10 12:47:36 -0400395/* Register and XDR encode the Write list. Supports encoding a list
396 * containing one array of plain segments that belong to a single
397 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400398 *
399 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
400 *
401 * Write chunklist (a list of (one) counted array):
402 * N elements:
403 * 1 - N - HLOO - HLOO - ... - HLOO - 0
404 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400405 * Returns zero on success, or a negative errno if a failure occurred.
406 * @xdr is advanced to the next position in the stream.
407 *
408 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400409 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400410static noinline int
Chuck Lever94f58c52016-05-02 14:41:30 -0400411rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400412 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400413{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400414 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400415 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400416 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400417 int n, nsegs, nchunks;
418 __be32 *segcount;
419
Chuck Lever5ab81422016-06-29 13:54:25 -0400420 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500421 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400422 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500423 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400424 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400425 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400426
Chuck Lever39f4cd92017-08-10 12:47:36 -0400427 if (encode_item_present(xdr) < 0)
428 return -EMSGSIZE;
429 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
430 if (unlikely(!segcount))
431 return -EMSGSIZE;
432 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400433
434 nchunks = 0;
435 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400436 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
437 true, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400438 if (n < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400439 return n;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500440 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400441
Chuck Lever39f4cd92017-08-10 12:47:36 -0400442 if (encode_rdma_segment(xdr, mw) < 0)
443 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400444
Chuck Lever9d6b0402016-06-29 13:54:16 -0400445 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400446 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400447 mw->mw_length, (unsigned long long)mw->mw_offset,
448 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400449
450 r_xprt->rx_stats.write_chunk_count++;
451 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400452 nchunks++;
453 seg += n;
454 nsegs -= n;
455 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400456
457 /* Update count of segments in this Write chunk */
458 *segcount = cpu_to_be32(nchunks);
459
Chuck Lever39f4cd92017-08-10 12:47:36 -0400460 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400461}
462
Chuck Lever39f4cd92017-08-10 12:47:36 -0400463/* Register and XDR encode the Reply chunk. Supports encoding an array
464 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400465 *
466 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
467 *
468 * Reply chunk (a counted array):
469 * N elements:
470 * 1 - N - HLOO - HLOO - ... - HLOO
471 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400472 * Returns zero on success, or a negative errno if a failure occurred.
473 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400474 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400475static noinline int
476rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
477 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400478{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400479 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400480 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400481 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400482 int n, nsegs, nchunks;
483 __be32 *segcount;
484
Chuck Lever5ab81422016-06-29 13:54:25 -0400485 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500486 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400487 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400488 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400489
Chuck Lever39f4cd92017-08-10 12:47:36 -0400490 if (encode_item_present(xdr) < 0)
491 return -EMSGSIZE;
492 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
493 if (unlikely(!segcount))
494 return -EMSGSIZE;
495 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400496
497 nchunks = 0;
498 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400499 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
500 true, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400501 if (n < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400502 return n;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500503 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400504
Chuck Lever39f4cd92017-08-10 12:47:36 -0400505 if (encode_rdma_segment(xdr, mw) < 0)
506 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400507
Chuck Lever9d6b0402016-06-29 13:54:16 -0400508 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400509 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400510 mw->mw_length, (unsigned long long)mw->mw_offset,
511 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400512
513 r_xprt->rx_stats.reply_chunk_count++;
514 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400515 nchunks++;
516 seg += n;
517 nsegs -= n;
518 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400519
520 /* Update count of segments in the Reply chunk */
521 *segcount = cpu_to_be32(nchunks);
522
Chuck Lever39f4cd92017-08-10 12:47:36 -0400523 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400524}
525
Chuck Lever655fec62016-09-15 10:57:24 -0400526/* Prepare the RPC-over-RDMA header SGE.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400527 */
Chuck Lever655fec62016-09-15 10:57:24 -0400528static bool
529rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
530 u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400531{
Chuck Lever655fec62016-09-15 10:57:24 -0400532 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
533 struct ib_sge *sge = &req->rl_send_sge[0];
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400534
Chuck Lever655fec62016-09-15 10:57:24 -0400535 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
536 if (!__rpcrdma_dma_map_regbuf(ia, rb))
537 return false;
538 sge->addr = rdmab_addr(rb);
539 sge->lkey = rdmab_lkey(rb);
540 }
541 sge->length = len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400542
Chuck Lever91a10c52017-04-11 13:23:02 -0400543 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400544 sge->length, DMA_TO_DEVICE);
545 req->rl_send_wr.num_sge++;
546 return true;
547}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400548
Chuck Lever655fec62016-09-15 10:57:24 -0400549/* Prepare the Send SGEs. The head and tail iovec, and each entry
550 * in the page list, gets its own SGE.
551 */
552static bool
553rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
554 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
555{
556 unsigned int sge_no, page_base, len, remaining;
557 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
558 struct ib_device *device = ia->ri_device;
559 struct ib_sge *sge = req->rl_send_sge;
560 u32 lkey = ia->ri_pd->local_dma_lkey;
561 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400562
Chuck Lever655fec62016-09-15 10:57:24 -0400563 /* The head iovec is straightforward, as it is already
564 * DMA-mapped. Sync the content that has changed.
565 */
566 if (!rpcrdma_dma_map_regbuf(ia, rb))
567 return false;
568 sge_no = 1;
569 sge[sge_no].addr = rdmab_addr(rb);
570 sge[sge_no].length = xdr->head[0].iov_len;
571 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400572 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400573 sge[sge_no].length, DMA_TO_DEVICE);
574
575 /* If there is a Read chunk, the page list is being handled
576 * via explicit RDMA, and thus is skipped here. However, the
577 * tail iovec may include an XDR pad for the page list, as
578 * well as additional content, and may not reside in the
579 * same page as the head iovec.
580 */
581 if (rtype == rpcrdma_readch) {
582 len = xdr->tail[0].iov_len;
583
584 /* Do not include the tail if it is only an XDR pad */
585 if (len < 4)
586 goto out;
587
588 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400589 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400590
591 /* If the content in the page list is an odd length,
592 * xdr_write_pages() has added a pad at the beginning
593 * of the tail iovec. Force the tail's non-pad content
594 * to land at the next XDR position in the Send message.
595 */
596 page_base += len & 3;
597 len -= len & 3;
598 goto map_tail;
599 }
600
601 /* If there is a page list present, temporarily DMA map
602 * and prepare an SGE for each page to be sent.
603 */
604 if (xdr->page_len) {
605 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400606 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400607 remaining = xdr->page_len;
608 while (remaining) {
609 sge_no++;
610 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
611 goto out_mapping_overflow;
612
613 len = min_t(u32, PAGE_SIZE - page_base, remaining);
614 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
615 page_base, len,
616 DMA_TO_DEVICE);
617 if (ib_dma_mapping_error(device, sge[sge_no].addr))
618 goto out_mapping_err;
619 sge[sge_no].length = len;
620 sge[sge_no].lkey = lkey;
621
622 req->rl_mapped_sges++;
623 ppages++;
624 remaining -= len;
625 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400626 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400627 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000628
Chuck Lever655fec62016-09-15 10:57:24 -0400629 /* The tail iovec is not always constructed in the same
630 * page where the head iovec resides (see, for example,
631 * gss_wrap_req_priv). To neatly accommodate that case,
632 * DMA map it separately.
633 */
634 if (xdr->tail[0].iov_len) {
635 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400636 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400637 len = xdr->tail[0].iov_len;
638
639map_tail:
640 sge_no++;
641 sge[sge_no].addr = ib_dma_map_page(device, page,
642 page_base, len,
643 DMA_TO_DEVICE);
644 if (ib_dma_mapping_error(device, sge[sge_no].addr))
645 goto out_mapping_err;
646 sge[sge_no].length = len;
647 sge[sge_no].lkey = lkey;
648 req->rl_mapped_sges++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400649 }
Chuck Lever655fec62016-09-15 10:57:24 -0400650
651out:
652 req->rl_send_wr.num_sge = sge_no + 1;
653 return true;
654
655out_mapping_overflow:
656 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
657 return false;
658
659out_mapping_err:
660 pr_err("rpcrdma: Send mapping error\n");
661 return false;
662}
663
664bool
665rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
666 u32 hdrlen, struct xdr_buf *xdr,
667 enum rpcrdma_chunktype rtype)
668{
669 req->rl_send_wr.num_sge = 0;
670 req->rl_mapped_sges = 0;
671
672 if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
673 goto out_map;
674
675 if (rtype != rpcrdma_areadch)
676 if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
677 goto out_map;
678
679 return true;
680
681out_map:
682 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
683 return false;
684}
685
686void
687rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
688{
689 struct ib_device *device = ia->ri_device;
690 struct ib_sge *sge;
691 int count;
692
693 sge = &req->rl_send_sge[2];
694 for (count = req->rl_mapped_sges; count--; sge++)
695 ib_dma_unmap_page(device, sge->addr, sge->length,
696 DMA_TO_DEVICE);
697 req->rl_mapped_sges = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400698}
699
Chuck Lever09e60642017-08-10 12:47:12 -0400700/**
701 * rpcrdma_marshal_req - Marshal and send one RPC request
702 * @r_xprt: controlling transport
703 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400704 *
Chuck Lever09e60642017-08-10 12:47:12 -0400705 * For the RPC in "rqst", this function:
706 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
707 * - Registers Read, Write, and Reply chunks
708 * - Constructs the transport header
709 * - Posts a Send WR to send the transport header and request
710 *
711 * Returns:
712 * %0 if the RPC was sent successfully,
713 * %-ENOTCONN if the connection was lost,
714 * %-EAGAIN if not enough pages are available for on-demand reply buffer,
715 * %-ENOBUFS if no MRs are available to register chunks,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400716 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400717 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400718 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400719int
Chuck Lever09e60642017-08-10 12:47:12 -0400720rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400721{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400722 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400723 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400724 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400725 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400726 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400727 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400728
Chuck Lever83128a62015-10-24 17:27:59 -0400729#if defined(CONFIG_SUNRPC_BACKCHANNEL)
730 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
731 return rpcrdma_bc_marshal_reply(rqst);
732#endif
733
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400734 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
735 xdr_init_encode(xdr, &req->rl_hdrbuf,
736 req->rl_rdmabuf->rg_base);
737
738 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400739 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400740 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
741 if (!p)
742 goto out_err;
743 *p++ = rqst->rq_xid;
744 *p++ = rpcrdma_version;
745 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400746
Chuck Lever65b80172016-06-29 13:55:06 -0400747 /* When the ULP employs a GSS flavor that guarantees integrity
748 * or privacy, direct data placement of individual data items
749 * is not allowed.
750 */
751 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
752 RPCAUTH_AUTH_DATATOUCH);
753
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400754 /*
755 * Chunks needed for results?
756 *
757 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400758 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400759 * o Large read ops return data as write chunk(s), header as
760 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400761 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400762 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400763 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400764 wtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400765 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levercce6dee2016-05-02 14:41:14 -0400766 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400767 else
Chuck Levere2377942015-03-30 14:33:53 -0400768 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400769
770 /*
771 * Chunks needed for arguments?
772 *
773 * o If the total request is under the inline threshold, all ops
774 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400775 * o Large write ops transmit data as read chunk(s), header as
776 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400777 * o Large non-write ops are sent with the entire message as a
778 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400779 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400780 * This assumes that the upper layer does not present a request
781 * that both has a data payload, and whose non-data arguments
782 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400783 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400784 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400785 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400786 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400787 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400788 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400789 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400790 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400791 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400792 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400793 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400794 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400795
Chuck Lever431af642017-06-08 11:52:20 -0400796 req->rl_xid = rqst->rq_xid;
797 rpcrdma_insert_req(&r_xprt->rx_buf, req);
798
Chuck Lever94f58c52016-05-02 14:41:30 -0400799 /* This implementation supports the following combinations
800 * of chunk lists in one RPC-over-RDMA Call message:
801 *
802 * - Read list
803 * - Write list
804 * - Reply chunk
805 * - Read list + Reply chunk
806 *
807 * It might not yet support the following combinations:
808 *
809 * - Read list + Write list
810 *
811 * It does not support the following combinations:
812 *
813 * - Write list + Reply chunk
814 * - Read list + Write list + Reply chunk
815 *
816 * This implementation supports only a single chunk in each
817 * Read or Write list. Thus for example the client cannot
818 * send a Call message with a Position Zero Read chunk and a
819 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400820 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400821 if (rtype != rpcrdma_noch) {
822 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
823 if (ret)
824 goto out_err;
825 }
826 ret = encode_item_not_present(xdr);
827 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500828 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400829
Chuck Lever39f4cd92017-08-10 12:47:36 -0400830 if (wtype == rpcrdma_writech) {
831 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
832 if (ret)
833 goto out_err;
834 }
835 ret = encode_item_not_present(xdr);
836 if (ret)
837 goto out_err;
838
839 if (wtype != rpcrdma_replych)
840 ret = encode_item_not_present(xdr);
841 else
842 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
843 if (ret)
844 goto out_err;
845
846 dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400847 rqst->rq_task->tk_pid, __func__,
848 transfertypes[rtype], transfertypes[wtype],
Chuck Lever39f4cd92017-08-10 12:47:36 -0400849 xdr_stream_pos(xdr));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400850
Chuck Lever39f4cd92017-08-10 12:47:36 -0400851 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req,
852 xdr_stream_pos(xdr),
Chuck Lever655fec62016-09-15 10:57:24 -0400853 &rqst->rq_snd_buf, rtype)) {
Chuck Lever39f4cd92017-08-10 12:47:36 -0400854 ret = -EIO;
Chuck Lever18c0fb32017-02-08 17:00:27 -0500855 goto out_err;
Chuck Lever655fec62016-09-15 10:57:24 -0400856 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400857 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400858
Chuck Lever18c0fb32017-02-08 17:00:27 -0500859out_err:
Chuck Lever39f4cd92017-08-10 12:47:36 -0400860 if (ret != -ENOBUFS) {
861 pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
Chuck Lever0031e472017-04-11 13:23:51 -0400862 r_xprt->rx_stats.failed_marshal_count++;
863 }
Chuck Lever39f4cd92017-08-10 12:47:36 -0400864 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400865}
866
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400867/**
868 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
869 * @rqst: controlling RPC request
870 * @srcp: points to RPC message payload in receive buffer
871 * @copy_len: remaining length of receive buffer content
872 * @pad: Write chunk pad bytes needed (zero for pure inline)
873 *
874 * The upper layer has set the maximum number of bytes it can
875 * receive in each component of rq_rcv_buf. These values are set in
876 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400877 *
878 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
879 * many cases this function simply updates iov_base pointers in
880 * rq_rcv_buf to point directly to the received reply data, to
881 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400882 *
883 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400884 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400885static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400886rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400887{
Chuck Lever64695bde2016-06-29 13:54:58 -0400888 unsigned long fixup_copy_count;
889 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400890 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000891 struct page **ppages;
892 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400893
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400894 /* The head iovec is redirected to the RPC reply message
895 * in the receive buffer, to avoid a memcopy.
896 */
897 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400898 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400899
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400900 /* The contents of the receive buffer that follow
901 * head.iov_len bytes are copied into the page list.
902 */
903 curlen = rqst->rq_rcv_buf.head[0].iov_len;
904 if (curlen > copy_len)
905 curlen = copy_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400906 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
907 __func__, srcp, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400908 srcp += curlen;
909 copy_len -= curlen;
910
Chuck Leverd933cc32017-06-08 11:53:16 -0400911 ppages = rqst->rq_rcv_buf.pages +
912 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
913 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400914 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400915 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400916 int pagelist_len;
917
918 pagelist_len = rqst->rq_rcv_buf.page_len;
919 if (pagelist_len > copy_len)
920 pagelist_len = copy_len;
921 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400922 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000923 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400924 if (curlen > pagelist_len)
925 curlen = pagelist_len;
926
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400927 dprintk("RPC: %s: page %d"
928 " srcp 0x%p len %d curlen %d\n",
929 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800930 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000931 memcpy(destp + page_base, srcp, curlen);
932 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800933 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400934 srcp += curlen;
935 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400936 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400937 pagelist_len -= curlen;
938 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400939 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000940 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400941 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400942
943 /* Implicit padding for the last segment in a Write
944 * chunk is inserted inline at the front of the tail
945 * iovec. The upper layer ignores the content of
946 * the pad. Simply ensure inline content in the tail
947 * that follows the Write chunk is properly aligned.
948 */
949 if (pad)
950 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400951 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400952
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400953 /* The tail iovec is redirected to the remaining data
954 * in the receive buffer, to avoid a memcopy.
955 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400956 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400957 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400958 rqst->rq_private_buf.tail[0].iov_base = srcp;
959 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400960
Chuck Lever64695bde2016-06-29 13:54:58 -0400961 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400962}
963
Chuck Lever4b196dc62017-06-08 11:51:56 -0400964/* Caller must guarantee @rep remains stable during this call.
965 */
966static void
967rpcrdma_mark_remote_invalidation(struct list_head *mws,
968 struct rpcrdma_rep *rep)
969{
970 struct rpcrdma_mw *mw;
971
972 if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
973 return;
974
975 list_for_each_entry(mw, mws, mw_list)
976 if (mw->mw_handle == rep->rr_inv_rkey) {
977 mw->mw_flags = RPCRDMA_MW_F_RI;
978 break; /* only one invalidated MR per RPC */
979 }
980}
981
Chuck Lever63cae472015-10-24 17:28:08 -0400982/* By convention, backchannel calls arrive via rdma_msg type
983 * messages, and never populate the chunk lists. This makes
984 * the RPC/RDMA header small and fixed in size, so it is
985 * straightforward to check the RPC header's direction field.
986 */
987static bool
Chuck Lever41c8f702017-08-03 14:30:11 -0400988rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
989 __be32 xid, __be32 proc)
990#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -0400991{
Chuck Lever41c8f702017-08-03 14:30:11 -0400992 struct xdr_stream *xdr = &rep->rr_stream;
993 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -0400994
Chuck Lever41c8f702017-08-03 14:30:11 -0400995 if (proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -0400996 return false;
997
Chuck Lever41c8f702017-08-03 14:30:11 -0400998 /* Peek at stream contents without advancing. */
999 p = xdr_inline_decode(xdr, 0);
1000
1001 /* Chunk lists */
1002 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001003 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -04001004 if (*p++ != xdr_zero)
1005 return false;
1006 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001007 return false;
1008
Chuck Lever41c8f702017-08-03 14:30:11 -04001009 /* RPC header */
1010 if (*p++ != xid)
1011 return false;
1012 if (*p != cpu_to_be32(RPC_CALL))
1013 return false;
1014
1015 /* Now that we are sure this is a backchannel call,
1016 * advance to the RPC header.
1017 */
1018 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1019 if (unlikely(!p))
1020 goto out_short;
1021
1022 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001023 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001024
1025out_short:
1026 pr_warn("RPC/RDMA short backward direction call\n");
1027 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1028 xprt_disconnect_done(&r_xprt->rx_xprt);
1029 return true;
1030}
1031#else /* CONFIG_SUNRPC_BACKCHANNEL */
1032{
1033 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001034}
1035#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1036
Chuck Lever264b0cd2017-08-03 14:30:27 -04001037static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1038{
1039 __be32 *p;
1040
1041 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1042 if (unlikely(!p))
1043 return -EIO;
1044
1045 ifdebug(FACILITY) {
1046 u64 offset;
1047 u32 handle;
1048
1049 handle = be32_to_cpup(p++);
1050 *length = be32_to_cpup(p++);
1051 xdr_decode_hyper(p, &offset);
1052 dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
1053 __func__, *length, (unsigned long long)offset,
1054 handle);
1055 } else {
1056 *length = be32_to_cpup(p + 1);
1057 }
1058
1059 return 0;
1060}
1061
1062static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1063{
1064 u32 segcount, seglength;
1065 __be32 *p;
1066
1067 p = xdr_inline_decode(xdr, sizeof(*p));
1068 if (unlikely(!p))
1069 return -EIO;
1070
1071 *length = 0;
1072 segcount = be32_to_cpup(p);
1073 while (segcount--) {
1074 if (decode_rdma_segment(xdr, &seglength))
1075 return -EIO;
1076 *length += seglength;
1077 }
1078
1079 dprintk("RPC: %s: segcount=%u, %u bytes\n",
1080 __func__, be32_to_cpup(p), *length);
1081 return 0;
1082}
1083
1084/* In RPC-over-RDMA Version One replies, a Read list is never
1085 * expected. This decoder is a stub that returns an error if
1086 * a Read list is present.
1087 */
1088static int decode_read_list(struct xdr_stream *xdr)
1089{
1090 __be32 *p;
1091
1092 p = xdr_inline_decode(xdr, sizeof(*p));
1093 if (unlikely(!p))
1094 return -EIO;
1095 if (unlikely(*p != xdr_zero))
1096 return -EIO;
1097 return 0;
1098}
1099
1100/* Supports only one Write chunk in the Write list
1101 */
1102static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1103{
1104 u32 chunklen;
1105 bool first;
1106 __be32 *p;
1107
1108 *length = 0;
1109 first = true;
1110 do {
1111 p = xdr_inline_decode(xdr, sizeof(*p));
1112 if (unlikely(!p))
1113 return -EIO;
1114 if (*p == xdr_zero)
1115 break;
1116 if (!first)
1117 return -EIO;
1118
1119 if (decode_write_chunk(xdr, &chunklen))
1120 return -EIO;
1121 *length += chunklen;
1122 first = false;
1123 } while (true);
1124 return 0;
1125}
1126
1127static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1128{
1129 __be32 *p;
1130
1131 p = xdr_inline_decode(xdr, sizeof(*p));
1132 if (unlikely(!p))
1133 return -EIO;
1134
1135 *length = 0;
1136 if (*p != xdr_zero)
1137 if (decode_write_chunk(xdr, length))
1138 return -EIO;
1139 return 0;
1140}
1141
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001142static int
1143rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1144 struct rpc_rqst *rqst)
1145{
1146 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001147 u32 writelist, replychunk, rpclen;
1148 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001149
Chuck Lever264b0cd2017-08-03 14:30:27 -04001150 /* Decode the chunk lists */
1151 if (decode_read_list(xdr))
1152 return -EIO;
1153 if (decode_write_list(xdr, &writelist))
1154 return -EIO;
1155 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001156 return -EIO;
1157
Chuck Lever264b0cd2017-08-03 14:30:27 -04001158 /* RDMA_MSG sanity checks */
1159 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001160 return -EIO;
1161
Chuck Lever264b0cd2017-08-03 14:30:27 -04001162 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1163 base = (char *)xdr_inline_decode(xdr, 0);
1164 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001165 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001166 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001167
Chuck Lever264b0cd2017-08-03 14:30:27 -04001168 r_xprt->rx_stats.total_rdma_reply += writelist;
1169 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001170}
1171
1172static noinline int
1173rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1174{
1175 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001176 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001177
Chuck Lever264b0cd2017-08-03 14:30:27 -04001178 /* Decode the chunk lists */
1179 if (decode_read_list(xdr))
1180 return -EIO;
1181 if (decode_write_list(xdr, &writelist))
1182 return -EIO;
1183 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001184 return -EIO;
1185
Chuck Lever264b0cd2017-08-03 14:30:27 -04001186 /* RDMA_NOMSG sanity checks */
1187 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001188 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001189 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001190 return -EIO;
1191
Chuck Lever264b0cd2017-08-03 14:30:27 -04001192 /* Reply chunk buffer already is the reply vector */
1193 r_xprt->rx_stats.total_rdma_reply += replychunk;
1194 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001195}
1196
1197static noinline int
1198rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1199 struct rpc_rqst *rqst)
1200{
1201 struct xdr_stream *xdr = &rep->rr_stream;
1202 __be32 *p;
1203
1204 p = xdr_inline_decode(xdr, sizeof(*p));
1205 if (unlikely(!p))
1206 return -EIO;
1207
1208 switch (*p) {
1209 case err_vers:
1210 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1211 if (!p)
1212 break;
1213 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1214 rqst->rq_task->tk_pid, __func__,
1215 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1216 break;
1217 case err_chunk:
1218 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1219 rqst->rq_task->tk_pid, __func__);
1220 break;
1221 default:
1222 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1223 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1224 }
1225
1226 r_xprt->rx_stats.bad_reply_count++;
1227 return -EREMOTEIO;
1228}
1229
Chuck Leverfe97b472015-10-24 17:27:10 -04001230/* Process received RPC/RDMA messages.
1231 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001232 * Errors must result in the RPC task either being awakened, or
1233 * allowed to timeout, to discover the errors at that time.
1234 */
1235void
Chuck Lever496b77a2016-09-15 10:57:57 -04001236rpcrdma_reply_handler(struct work_struct *work)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001237{
Chuck Lever496b77a2016-09-15 10:57:57 -04001238 struct rpcrdma_rep *rep =
1239 container_of(work, struct rpcrdma_rep, rr_work);
Chuck Lever431af642017-06-08 11:52:20 -04001240 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1241 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1242 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever96f87782017-08-03 14:30:03 -04001243 struct xdr_stream *xdr = &rep->rr_stream;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001244 struct rpcrdma_req *req;
1245 struct rpc_rqst *rqst;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001246 __be32 *p, xid, vers, proc;
Chuck Levere7ce7102014-05-28 10:34:57 -04001247 unsigned long cwnd;
Chuck Lever451d26e2017-06-08 11:52:04 -04001248 struct list_head mws;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001249 int status;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001250
Chuck Leverb0e178a2015-10-24 17:26:54 -04001251 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
1252
Chuck Levere2a67192017-08-03 14:30:44 -04001253 if (rep->rr_hdrbuf.head[0].iov_len == 0)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001254 goto out_badstatus;
Chuck Lever96f87782017-08-03 14:30:03 -04001255
1256 xdr_init_decode(xdr, &rep->rr_hdrbuf,
1257 rep->rr_hdrbuf.head[0].iov_base);
1258
1259 /* Fixed transport header fields */
1260 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1261 if (unlikely(!p))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001262 goto out_shortreply;
Chuck Lever96f87782017-08-03 14:30:03 -04001263 xid = *p++;
1264 vers = *p++;
1265 p++; /* credits */
1266 proc = *p++;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001267
Chuck Lever41c8f702017-08-03 14:30:11 -04001268 if (rpcrdma_is_bcall(r_xprt, rep, xid, proc))
1269 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001270
Chuck Leverfe97b472015-10-24 17:27:10 -04001271 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1272 * get context for handling any incoming chunks.
1273 */
Chuck Lever431af642017-06-08 11:52:20 -04001274 spin_lock(&buf->rb_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001275 req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf, xid);
Chuck Lever431af642017-06-08 11:52:20 -04001276 if (!req)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001277 goto out_nomatch;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001278 if (req->rl_reply)
1279 goto out_duplicate;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001280
Chuck Lever451d26e2017-06-08 11:52:04 -04001281 list_replace_init(&req->rl_registered, &mws);
1282 rpcrdma_mark_remote_invalidation(&mws, rep);
Chuck Lever431af642017-06-08 11:52:20 -04001283
1284 /* Avoid races with signals and duplicate replies
1285 * by marking this req as matched.
1286 */
Chuck Lever4b196dc62017-06-08 11:51:56 -04001287 req->rl_reply = rep;
Chuck Lever431af642017-06-08 11:52:20 -04001288 spin_unlock(&buf->rb_lock);
1289
Chuck Leveraf0f16e2016-03-04 11:27:43 -05001290 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
Chuck Lever96f87782017-08-03 14:30:03 -04001291 __func__, rep, req, be32_to_cpu(xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001292
Chuck Lever431af642017-06-08 11:52:20 -04001293 /* Invalidate and unmap the data payloads before waking the
1294 * waiting application. This guarantees the memory regions
1295 * are properly fenced from the server before the application
1296 * accesses the data. It also ensures proper send flow control:
1297 * waking the next RPC waits until this RPC has relinquished
1298 * all its Send Queue entries.
1299 */
1300 if (!list_empty(&mws))
1301 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001302
Chuck Lever431af642017-06-08 11:52:20 -04001303 /* Perform XID lookup, reconstruction of the RPC reply, and
1304 * RPC completion while holding the transport lock to ensure
1305 * the rep, rqst, and rq_task pointers remain stable.
1306 */
1307 spin_lock_bh(&xprt->transport_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001308 rqst = xprt_lookup_rqst(xprt, xid);
Chuck Lever431af642017-06-08 11:52:20 -04001309 if (!rqst)
1310 goto out_norqst;
1311 xprt->reestablish_timeout = 0;
Chuck Lever96f87782017-08-03 14:30:03 -04001312 if (vers != rpcrdma_version)
Chuck Lever59aa1f92016-03-04 11:28:18 -05001313 goto out_badversion;
1314
Chuck Lever96f87782017-08-03 14:30:03 -04001315 switch (proc) {
Chuck Lever284f4902015-01-21 11:02:13 -05001316 case rdma_msg:
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001317 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001318 break;
Chuck Lever284f4902015-01-21 11:02:13 -05001319 case rdma_nomsg:
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001320 status = rpcrdma_decode_nomsg(r_xprt, rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001321 break;
Chuck Lever59aa1f92016-03-04 11:28:18 -05001322 case rdma_error:
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001323 status = rpcrdma_decode_error(r_xprt, rep, rqst);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001324 break;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001325 default:
1326 status = -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001327 }
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001328 if (status < 0)
1329 goto out_badheader;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001330
Chuck Lever59aa1f92016-03-04 11:28:18 -05001331out:
Chuck Levere7ce7102014-05-28 10:34:57 -04001332 cwnd = xprt->cwnd;
Chuck Lever23826c72016-03-04 11:28:27 -05001333 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
Chuck Levere7ce7102014-05-28 10:34:57 -04001334 if (xprt->cwnd > cwnd)
1335 xprt_release_rqst_cong(rqst->rq_task);
1336
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001337 xprt_complete_rqst(rqst->rq_task, status);
Chuck Leverfe97b472015-10-24 17:27:10 -04001338 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001339 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
Chuck Lever431af642017-06-08 11:52:20 -04001340 __func__, xprt, rqst, status);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001341 return;
1342
1343out_badstatus:
1344 rpcrdma_recv_buffer_put(rep);
1345 if (r_xprt->rx_ep.rep_connected == 1) {
1346 r_xprt->rx_ep.rep_connected = -EIO;
1347 rpcrdma_conn_func(&r_xprt->rx_ep);
1348 }
1349 return;
1350
Chuck Lever59aa1f92016-03-04 11:28:18 -05001351/* If the incoming reply terminated a pending RPC, the next
1352 * RPC call will post a replacement receive buffer as it is
1353 * being marshaled.
1354 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001355out_badversion:
1356 dprintk("RPC: %s: invalid version %d\n",
Chuck Lever96f87782017-08-03 14:30:03 -04001357 __func__, be32_to_cpu(vers));
Chuck Lever59aa1f92016-03-04 11:28:18 -05001358 status = -EIO;
1359 r_xprt->rx_stats.bad_reply_count++;
1360 goto out;
1361
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001362out_badheader:
1363 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1364 rqst->rq_task->tk_pid, __func__, be32_to_cpu(proc));
Chuck Lever59aa1f92016-03-04 11:28:18 -05001365 r_xprt->rx_stats.bad_reply_count++;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001366 status = -EIO;
Chuck Lever59aa1f92016-03-04 11:28:18 -05001367 goto out;
1368
Chuck Lever431af642017-06-08 11:52:20 -04001369/* The req was still available, but by the time the transport_lock
1370 * was acquired, the rqst and task had been released. Thus the RPC
1371 * has already been terminated.
Chuck Lever59aa1f92016-03-04 11:28:18 -05001372 */
Chuck Lever431af642017-06-08 11:52:20 -04001373out_norqst:
1374 spin_unlock_bh(&xprt->transport_lock);
1375 rpcrdma_buffer_put(req);
1376 dprintk("RPC: %s: race, no rqst left for req %p\n",
1377 __func__, req);
1378 return;
1379
Chuck Lever59aa1f92016-03-04 11:28:18 -05001380out_shortreply:
1381 dprintk("RPC: %s: short/invalid reply\n", __func__);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001382 goto repost;
1383
1384out_nomatch:
Chuck Lever431af642017-06-08 11:52:20 -04001385 spin_unlock(&buf->rb_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001386 dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
1387 __func__, be32_to_cpu(xid));
Chuck Leverb0e178a2015-10-24 17:26:54 -04001388 goto repost;
1389
1390out_duplicate:
Chuck Lever431af642017-06-08 11:52:20 -04001391 spin_unlock(&buf->rb_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001392 dprintk("RPC: %s: "
1393 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
Chuck Lever96f87782017-08-03 14:30:03 -04001394 __func__, rep, req, be32_to_cpu(xid));
Chuck Leverb0e178a2015-10-24 17:26:54 -04001395
Chuck Lever431af642017-06-08 11:52:20 -04001396/* If no pending RPC transaction was matched, post a replacement
1397 * receive buffer before returning.
1398 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001399repost:
1400 r_xprt->rx_stats.bad_reply_count++;
Chuck Leverb1573802016-09-15 10:56:35 -04001401 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001402 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001403}