blob: 211ac4b7979d589a81dbef2615c6306dcd0b0b41 [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040046 */
47
48#include "xprt_rdma.h"
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Jeff Laytonf895b252014-11-17 16:58:04 -050052#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040053# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040056static const char transfertypes[][12] = {
Chuck Lever94f58c52016-05-02 14:41:30 -040057 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040061 "reply chunk" /* entire reply via rdma write */
62};
Chuck Lever302d3de2016-05-02 14:41:05 -040063
64/* Returns size of largest RPC-over-RDMA header in a Call message
65 *
Chuck Lever94f58c52016-05-02 14:41:30 -040066 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040068 */
69static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70{
71 unsigned int size;
72
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
75
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
79
Chuck Lever94f58c52016-05-02 14:41:30 -040080 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
84
Chuck Lever302d3de2016-05-02 14:41:05 -040085 dprintk("RPC: %s: max call header size = %u\n",
86 __func__, size);
87 return size;
88}
89
90/* Returns size of largest RPC-over-RDMA header in a Reply message
91 *
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
94 */
95static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
96{
97 unsigned int size;
98
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
101
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
107
108 dprintk("RPC: %s: max reply header size = %u\n",
109 __func__, size);
110 return size;
111}
112
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400113void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400114{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
118
Chuck Lever302d3de2016-05-02 14:41:05 -0400119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
123}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400124
Chuck Lever5457ced2015-08-03 13:03:49 -0400125/* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500128 * a Read chunk for this operation.
129 *
130 * A Read chunk is also required if sending the RPC call inline would
131 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400132 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400133static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
134 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400135{
Chuck Lever16f906d2017-02-08 17:00:10 -0500136 struct xdr_buf *xdr = &rqst->rq_snd_buf;
137 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400138
Chuck Lever16f906d2017-02-08 17:00:10 -0500139 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
140 return false;
141
142 if (xdr->page_len) {
143 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400144 offset = offset_in_page(xdr->page_base);
Chuck Lever16f906d2017-02-08 17:00:10 -0500145 count = 0;
146 while (remaining) {
147 remaining -= min_t(unsigned int,
148 PAGE_SIZE - offset, remaining);
149 offset = 0;
150 if (++count > r_xprt->rx_ia.ri_max_send_sges)
151 return false;
152 }
153 }
154
155 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400156}
157
158/* The client can't know how large the actual reply will be. Thus it
159 * plans for the largest possible reply for that particular ULP
160 * operation. If the maximum combined reply message size exceeds that
161 * limit, the client must provide a write list or a reply chunk for
162 * this request.
163 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400164static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
165 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400166{
Chuck Lever302d3de2016-05-02 14:41:05 -0400167 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400168
Chuck Lever302d3de2016-05-02 14:41:05 -0400169 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400170}
171
Chuck Lever28d9d562017-08-14 15:38:22 -0400172/* Split @vec on page boundaries into SGEs. FMR registers pages, not
173 * a byte range. Other modes coalesce these SGEs into a single MR
174 * when they can.
175 *
176 * Returns pointer to next available SGE, and bumps the total number
177 * of SGEs consumed.
Chuck Lever821c7912016-03-04 11:27:52 -0500178 */
Chuck Lever28d9d562017-08-14 15:38:22 -0400179static struct rpcrdma_mr_seg *
180rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
181 unsigned int *n)
Chuck Lever821c7912016-03-04 11:27:52 -0500182{
Chuck Lever28d9d562017-08-14 15:38:22 -0400183 u32 remaining, page_offset;
Chuck Lever821c7912016-03-04 11:27:52 -0500184 char *base;
185
186 base = vec->iov_base;
187 page_offset = offset_in_page(base);
188 remaining = vec->iov_len;
Chuck Lever28d9d562017-08-14 15:38:22 -0400189 while (remaining) {
190 seg->mr_page = NULL;
191 seg->mr_offset = base;
192 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
193 remaining -= seg->mr_len;
194 base += seg->mr_len;
195 ++seg;
196 ++(*n);
Chuck Lever821c7912016-03-04 11:27:52 -0500197 page_offset = 0;
198 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400199 return seg;
Chuck Lever821c7912016-03-04 11:27:52 -0500200}
201
Chuck Lever28d9d562017-08-14 15:38:22 -0400202/* Convert @xdrbuf into SGEs no larger than a page each. As they
203 * are registered, these SGEs are then coalesced into RDMA segments
204 * when the selected memreg mode supports it.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400205 *
Chuck Lever28d9d562017-08-14 15:38:22 -0400206 * Returns positive number of SGEs consumed, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400207 */
208
209static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500210rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
211 unsigned int pos, enum rpcrdma_chunktype type,
212 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400213{
Chuck Lever28d9d562017-08-14 15:38:22 -0400214 unsigned long page_base;
215 unsigned int len, n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000216 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400217
Chuck Lever5ab81422016-06-29 13:54:25 -0400218 n = 0;
Chuck Lever28d9d562017-08-14 15:38:22 -0400219 if (pos == 0)
220 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400221
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000222 len = xdrbuf->page_len;
223 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400224 page_base = offset_in_page(xdrbuf->page_base);
Chuck Lever28d9d562017-08-14 15:38:22 -0400225 while (len) {
226 if (unlikely(!*ppages)) {
227 /* XXX: Certain upper layer operations do
228 * not provide receive buffer pages.
229 */
230 *ppages = alloc_page(GFP_ATOMIC);
231 if (!*ppages)
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400232 return -EAGAIN;
Shirley Ma196c6992014-05-28 10:34:24 -0400233 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400234 seg->mr_page = *ppages;
235 seg->mr_offset = (char *)page_base;
236 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
237 len -= seg->mr_len;
238 ++ppages;
239 ++seg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400240 ++n;
Chuck Lever28d9d562017-08-14 15:38:22 -0400241 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400242 }
243
Chuck Lever24abdf12017-02-08 16:59:46 -0500244 /* When encoding a Read chunk, the tail iovec contains an
245 * XDR pad and may be omitted.
246 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500247 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400248 goto out;
Chuck Lever677eb172015-08-03 13:04:17 -0400249
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500250 /* When encoding a Write chunk, some servers need to see an
251 * extra segment for non-XDR-aligned Write chunks. The upper
252 * layer provides space in the tail iovec that may be used
253 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400254 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500255 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400256 goto out;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400257
Chuck Lever28d9d562017-08-14 15:38:22 -0400258 if (xdrbuf->tail[0].iov_len)
259 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400260
Chuck Lever28d9d562017-08-14 15:38:22 -0400261out:
262 if (unlikely(n > RPCRDMA_MAX_SEGS))
263 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400264 return n;
265}
266
Chuck Lever39f4cd92017-08-10 12:47:36 -0400267static inline int
268encode_item_present(struct xdr_stream *xdr)
269{
270 __be32 *p;
271
272 p = xdr_reserve_space(xdr, sizeof(*p));
273 if (unlikely(!p))
274 return -EMSGSIZE;
275
276 *p = xdr_one;
277 return 0;
278}
279
280static inline int
281encode_item_not_present(struct xdr_stream *xdr)
282{
283 __be32 *p;
284
285 p = xdr_reserve_space(xdr, sizeof(*p));
286 if (unlikely(!p))
287 return -EMSGSIZE;
288
289 *p = xdr_zero;
290 return 0;
291}
292
293static void
Chuck Lever9d6b0402016-06-29 13:54:16 -0400294xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
Chuck Lever94f58c52016-05-02 14:41:30 -0400295{
Chuck Lever9d6b0402016-06-29 13:54:16 -0400296 *iptr++ = cpu_to_be32(mw->mw_handle);
297 *iptr++ = cpu_to_be32(mw->mw_length);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400298 xdr_encode_hyper(iptr, mw->mw_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400299}
300
Chuck Lever39f4cd92017-08-10 12:47:36 -0400301static int
302encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
303{
304 __be32 *p;
305
306 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
307 if (unlikely(!p))
308 return -EMSGSIZE;
309
310 xdr_encode_rdma_segment(p, mw);
311 return 0;
312}
313
314static int
315encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
316 u32 position)
317{
318 __be32 *p;
319
320 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
321 if (unlikely(!p))
322 return -EMSGSIZE;
323
324 *p++ = xdr_one; /* Item present */
325 *p++ = cpu_to_be32(position);
326 xdr_encode_rdma_segment(p, mw);
327 return 0;
328}
329
330/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400331 * segments that belong to a single read chunk.
332 *
333 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
334 *
335 * Read chunklist (a linked list):
336 * N elements, position P (same P for all chunks of same arg!):
337 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
338 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400339 * Returns zero on success, or a negative errno if a failure occurred.
340 * @xdr is advanced to the next position in the stream.
341 *
342 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400343 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400344static noinline int
345rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
346 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400347{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400348 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400349 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400350 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400351 unsigned int pos;
352 int n, nsegs;
353
Chuck Lever94f58c52016-05-02 14:41:30 -0400354 pos = rqst->rq_snd_buf.head[0].iov_len;
355 if (rtype == rpcrdma_areadch)
356 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400357 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500358 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
359 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400360 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400361 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400362
363 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400364 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
365 false, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400366 if (n < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400367 return n;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500368 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400369
Chuck Lever39f4cd92017-08-10 12:47:36 -0400370 if (encode_read_segment(xdr, mw, pos) < 0)
371 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400372
Chuck Lever9d6b0402016-06-29 13:54:16 -0400373 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400374 rqst->rq_task->tk_pid, __func__, pos,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400375 mw->mw_length, (unsigned long long)mw->mw_offset,
376 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400377
378 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever94f58c52016-05-02 14:41:30 -0400379 seg += n;
380 nsegs -= n;
381 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400382
Chuck Lever39f4cd92017-08-10 12:47:36 -0400383 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400384}
385
Chuck Lever39f4cd92017-08-10 12:47:36 -0400386/* Register and XDR encode the Write list. Supports encoding a list
387 * containing one array of plain segments that belong to a single
388 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400389 *
390 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
391 *
392 * Write chunklist (a list of (one) counted array):
393 * N elements:
394 * 1 - N - HLOO - HLOO - ... - HLOO - 0
395 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400396 * Returns zero on success, or a negative errno if a failure occurred.
397 * @xdr is advanced to the next position in the stream.
398 *
399 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400400 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400401static noinline int
Chuck Lever94f58c52016-05-02 14:41:30 -0400402rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400403 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400404{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400405 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400406 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400407 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400408 int n, nsegs, nchunks;
409 __be32 *segcount;
410
Chuck Lever5ab81422016-06-29 13:54:25 -0400411 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500412 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400413 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500414 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400415 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400416 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400417
Chuck Lever39f4cd92017-08-10 12:47:36 -0400418 if (encode_item_present(xdr) < 0)
419 return -EMSGSIZE;
420 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
421 if (unlikely(!segcount))
422 return -EMSGSIZE;
423 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400424
425 nchunks = 0;
426 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400427 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
428 true, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400429 if (n < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400430 return n;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500431 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400432
Chuck Lever39f4cd92017-08-10 12:47:36 -0400433 if (encode_rdma_segment(xdr, mw) < 0)
434 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400435
Chuck Lever9d6b0402016-06-29 13:54:16 -0400436 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400437 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400438 mw->mw_length, (unsigned long long)mw->mw_offset,
439 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400440
441 r_xprt->rx_stats.write_chunk_count++;
442 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400443 nchunks++;
444 seg += n;
445 nsegs -= n;
446 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400447
448 /* Update count of segments in this Write chunk */
449 *segcount = cpu_to_be32(nchunks);
450
Chuck Lever39f4cd92017-08-10 12:47:36 -0400451 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400452}
453
Chuck Lever39f4cd92017-08-10 12:47:36 -0400454/* Register and XDR encode the Reply chunk. Supports encoding an array
455 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400456 *
457 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
458 *
459 * Reply chunk (a counted array):
460 * N elements:
461 * 1 - N - HLOO - HLOO - ... - HLOO
462 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400463 * Returns zero on success, or a negative errno if a failure occurred.
464 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400465 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400466static noinline int
467rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
468 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400469{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400470 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400471 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400472 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400473 int n, nsegs, nchunks;
474 __be32 *segcount;
475
Chuck Lever5ab81422016-06-29 13:54:25 -0400476 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500477 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400478 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400479 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400480
Chuck Lever39f4cd92017-08-10 12:47:36 -0400481 if (encode_item_present(xdr) < 0)
482 return -EMSGSIZE;
483 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
484 if (unlikely(!segcount))
485 return -EMSGSIZE;
486 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400487
488 nchunks = 0;
489 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400490 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
491 true, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400492 if (n < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400493 return n;
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500494 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400495
Chuck Lever39f4cd92017-08-10 12:47:36 -0400496 if (encode_rdma_segment(xdr, mw) < 0)
497 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400498
Chuck Lever9d6b0402016-06-29 13:54:16 -0400499 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400500 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400501 mw->mw_length, (unsigned long long)mw->mw_offset,
502 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400503
504 r_xprt->rx_stats.reply_chunk_count++;
505 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400506 nchunks++;
507 seg += n;
508 nsegs -= n;
509 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400510
511 /* Update count of segments in the Reply chunk */
512 *segcount = cpu_to_be32(nchunks);
513
Chuck Lever39f4cd92017-08-10 12:47:36 -0400514 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400515}
516
Chuck Lever655fec62016-09-15 10:57:24 -0400517/* Prepare the RPC-over-RDMA header SGE.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400518 */
Chuck Lever655fec62016-09-15 10:57:24 -0400519static bool
520rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
521 u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400522{
Chuck Lever655fec62016-09-15 10:57:24 -0400523 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
524 struct ib_sge *sge = &req->rl_send_sge[0];
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400525
Chuck Lever655fec62016-09-15 10:57:24 -0400526 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
527 if (!__rpcrdma_dma_map_regbuf(ia, rb))
528 return false;
529 sge->addr = rdmab_addr(rb);
530 sge->lkey = rdmab_lkey(rb);
531 }
532 sge->length = len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400533
Chuck Lever91a10c52017-04-11 13:23:02 -0400534 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400535 sge->length, DMA_TO_DEVICE);
536 req->rl_send_wr.num_sge++;
537 return true;
538}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400539
Chuck Lever655fec62016-09-15 10:57:24 -0400540/* Prepare the Send SGEs. The head and tail iovec, and each entry
541 * in the page list, gets its own SGE.
542 */
543static bool
544rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
545 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
546{
547 unsigned int sge_no, page_base, len, remaining;
548 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
549 struct ib_device *device = ia->ri_device;
550 struct ib_sge *sge = req->rl_send_sge;
551 u32 lkey = ia->ri_pd->local_dma_lkey;
552 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400553
Chuck Lever655fec62016-09-15 10:57:24 -0400554 /* The head iovec is straightforward, as it is already
555 * DMA-mapped. Sync the content that has changed.
556 */
557 if (!rpcrdma_dma_map_regbuf(ia, rb))
558 return false;
559 sge_no = 1;
560 sge[sge_no].addr = rdmab_addr(rb);
561 sge[sge_no].length = xdr->head[0].iov_len;
562 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400563 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400564 sge[sge_no].length, DMA_TO_DEVICE);
565
566 /* If there is a Read chunk, the page list is being handled
567 * via explicit RDMA, and thus is skipped here. However, the
568 * tail iovec may include an XDR pad for the page list, as
569 * well as additional content, and may not reside in the
570 * same page as the head iovec.
571 */
572 if (rtype == rpcrdma_readch) {
573 len = xdr->tail[0].iov_len;
574
575 /* Do not include the tail if it is only an XDR pad */
576 if (len < 4)
577 goto out;
578
579 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400580 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400581
582 /* If the content in the page list is an odd length,
583 * xdr_write_pages() has added a pad at the beginning
584 * of the tail iovec. Force the tail's non-pad content
585 * to land at the next XDR position in the Send message.
586 */
587 page_base += len & 3;
588 len -= len & 3;
589 goto map_tail;
590 }
591
592 /* If there is a page list present, temporarily DMA map
593 * and prepare an SGE for each page to be sent.
594 */
595 if (xdr->page_len) {
596 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400597 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400598 remaining = xdr->page_len;
599 while (remaining) {
600 sge_no++;
601 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
602 goto out_mapping_overflow;
603
604 len = min_t(u32, PAGE_SIZE - page_base, remaining);
605 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
606 page_base, len,
607 DMA_TO_DEVICE);
608 if (ib_dma_mapping_error(device, sge[sge_no].addr))
609 goto out_mapping_err;
610 sge[sge_no].length = len;
611 sge[sge_no].lkey = lkey;
612
613 req->rl_mapped_sges++;
614 ppages++;
615 remaining -= len;
616 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400617 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400618 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000619
Chuck Lever655fec62016-09-15 10:57:24 -0400620 /* The tail iovec is not always constructed in the same
621 * page where the head iovec resides (see, for example,
622 * gss_wrap_req_priv). To neatly accommodate that case,
623 * DMA map it separately.
624 */
625 if (xdr->tail[0].iov_len) {
626 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400627 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400628 len = xdr->tail[0].iov_len;
629
630map_tail:
631 sge_no++;
632 sge[sge_no].addr = ib_dma_map_page(device, page,
633 page_base, len,
634 DMA_TO_DEVICE);
635 if (ib_dma_mapping_error(device, sge[sge_no].addr))
636 goto out_mapping_err;
637 sge[sge_no].length = len;
638 sge[sge_no].lkey = lkey;
639 req->rl_mapped_sges++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400640 }
Chuck Lever655fec62016-09-15 10:57:24 -0400641
642out:
643 req->rl_send_wr.num_sge = sge_no + 1;
644 return true;
645
646out_mapping_overflow:
647 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
648 return false;
649
650out_mapping_err:
651 pr_err("rpcrdma: Send mapping error\n");
652 return false;
653}
654
655bool
656rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
657 u32 hdrlen, struct xdr_buf *xdr,
658 enum rpcrdma_chunktype rtype)
659{
660 req->rl_send_wr.num_sge = 0;
661 req->rl_mapped_sges = 0;
662
663 if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
664 goto out_map;
665
666 if (rtype != rpcrdma_areadch)
667 if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
668 goto out_map;
669
670 return true;
671
672out_map:
673 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
674 return false;
675}
676
677void
678rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
679{
680 struct ib_device *device = ia->ri_device;
681 struct ib_sge *sge;
682 int count;
683
684 sge = &req->rl_send_sge[2];
685 for (count = req->rl_mapped_sges; count--; sge++)
686 ib_dma_unmap_page(device, sge->addr, sge->length,
687 DMA_TO_DEVICE);
688 req->rl_mapped_sges = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400689}
690
Chuck Lever09e60642017-08-10 12:47:12 -0400691/**
692 * rpcrdma_marshal_req - Marshal and send one RPC request
693 * @r_xprt: controlling transport
694 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400695 *
Chuck Lever09e60642017-08-10 12:47:12 -0400696 * For the RPC in "rqst", this function:
697 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
698 * - Registers Read, Write, and Reply chunks
699 * - Constructs the transport header
700 * - Posts a Send WR to send the transport header and request
701 *
702 * Returns:
703 * %0 if the RPC was sent successfully,
704 * %-ENOTCONN if the connection was lost,
705 * %-EAGAIN if not enough pages are available for on-demand reply buffer,
706 * %-ENOBUFS if no MRs are available to register chunks,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400707 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400708 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400709 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400710int
Chuck Lever09e60642017-08-10 12:47:12 -0400711rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400712{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400713 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400714 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400715 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400716 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400717 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400718 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400719
Chuck Lever83128a62015-10-24 17:27:59 -0400720#if defined(CONFIG_SUNRPC_BACKCHANNEL)
721 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
722 return rpcrdma_bc_marshal_reply(rqst);
723#endif
724
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400725 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
726 xdr_init_encode(xdr, &req->rl_hdrbuf,
727 req->rl_rdmabuf->rg_base);
728
729 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400730 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400731 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
732 if (!p)
733 goto out_err;
734 *p++ = rqst->rq_xid;
735 *p++ = rpcrdma_version;
736 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400737
Chuck Lever65b80172016-06-29 13:55:06 -0400738 /* When the ULP employs a GSS flavor that guarantees integrity
739 * or privacy, direct data placement of individual data items
740 * is not allowed.
741 */
742 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
743 RPCAUTH_AUTH_DATATOUCH);
744
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400745 /*
746 * Chunks needed for results?
747 *
748 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400749 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400750 * o Large read ops return data as write chunk(s), header as
751 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400752 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400753 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400754 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400755 wtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400756 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levercce6dee2016-05-02 14:41:14 -0400757 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400758 else
Chuck Levere2377942015-03-30 14:33:53 -0400759 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400760
761 /*
762 * Chunks needed for arguments?
763 *
764 * o If the total request is under the inline threshold, all ops
765 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400766 * o Large write ops transmit data as read chunk(s), header as
767 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400768 * o Large non-write ops are sent with the entire message as a
769 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400770 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400771 * This assumes that the upper layer does not present a request
772 * that both has a data payload, and whose non-data arguments
773 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400774 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400775 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400776 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400777 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400778 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400779 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400780 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400781 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400782 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400783 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400784 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400785 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400786
Chuck Lever431af642017-06-08 11:52:20 -0400787 req->rl_xid = rqst->rq_xid;
788 rpcrdma_insert_req(&r_xprt->rx_buf, req);
789
Chuck Lever94f58c52016-05-02 14:41:30 -0400790 /* This implementation supports the following combinations
791 * of chunk lists in one RPC-over-RDMA Call message:
792 *
793 * - Read list
794 * - Write list
795 * - Reply chunk
796 * - Read list + Reply chunk
797 *
798 * It might not yet support the following combinations:
799 *
800 * - Read list + Write list
801 *
802 * It does not support the following combinations:
803 *
804 * - Write list + Reply chunk
805 * - Read list + Write list + Reply chunk
806 *
807 * This implementation supports only a single chunk in each
808 * Read or Write list. Thus for example the client cannot
809 * send a Call message with a Position Zero Read chunk and a
810 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400811 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400812 if (rtype != rpcrdma_noch) {
813 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
814 if (ret)
815 goto out_err;
816 }
817 ret = encode_item_not_present(xdr);
818 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500819 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400820
Chuck Lever39f4cd92017-08-10 12:47:36 -0400821 if (wtype == rpcrdma_writech) {
822 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
823 if (ret)
824 goto out_err;
825 }
826 ret = encode_item_not_present(xdr);
827 if (ret)
828 goto out_err;
829
830 if (wtype != rpcrdma_replych)
831 ret = encode_item_not_present(xdr);
832 else
833 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
834 if (ret)
835 goto out_err;
836
837 dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400838 rqst->rq_task->tk_pid, __func__,
839 transfertypes[rtype], transfertypes[wtype],
Chuck Lever39f4cd92017-08-10 12:47:36 -0400840 xdr_stream_pos(xdr));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400841
Chuck Lever39f4cd92017-08-10 12:47:36 -0400842 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req,
843 xdr_stream_pos(xdr),
Chuck Lever655fec62016-09-15 10:57:24 -0400844 &rqst->rq_snd_buf, rtype)) {
Chuck Lever39f4cd92017-08-10 12:47:36 -0400845 ret = -EIO;
Chuck Lever18c0fb32017-02-08 17:00:27 -0500846 goto out_err;
Chuck Lever655fec62016-09-15 10:57:24 -0400847 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400848 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400849
Chuck Lever18c0fb32017-02-08 17:00:27 -0500850out_err:
Chuck Lever39f4cd92017-08-10 12:47:36 -0400851 if (ret != -ENOBUFS) {
852 pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
Chuck Lever0031e472017-04-11 13:23:51 -0400853 r_xprt->rx_stats.failed_marshal_count++;
854 }
Chuck Lever39f4cd92017-08-10 12:47:36 -0400855 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400856}
857
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400858/**
859 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
860 * @rqst: controlling RPC request
861 * @srcp: points to RPC message payload in receive buffer
862 * @copy_len: remaining length of receive buffer content
863 * @pad: Write chunk pad bytes needed (zero for pure inline)
864 *
865 * The upper layer has set the maximum number of bytes it can
866 * receive in each component of rq_rcv_buf. These values are set in
867 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400868 *
869 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
870 * many cases this function simply updates iov_base pointers in
871 * rq_rcv_buf to point directly to the received reply data, to
872 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400873 *
874 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400875 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400876static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400877rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400878{
Chuck Lever64695bde2016-06-29 13:54:58 -0400879 unsigned long fixup_copy_count;
880 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400881 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000882 struct page **ppages;
883 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400884
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400885 /* The head iovec is redirected to the RPC reply message
886 * in the receive buffer, to avoid a memcopy.
887 */
888 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400889 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400890
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400891 /* The contents of the receive buffer that follow
892 * head.iov_len bytes are copied into the page list.
893 */
894 curlen = rqst->rq_rcv_buf.head[0].iov_len;
895 if (curlen > copy_len)
896 curlen = copy_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400897 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
898 __func__, srcp, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400899 srcp += curlen;
900 copy_len -= curlen;
901
Chuck Leverd933cc32017-06-08 11:53:16 -0400902 ppages = rqst->rq_rcv_buf.pages +
903 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
904 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400905 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400906 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400907 int pagelist_len;
908
909 pagelist_len = rqst->rq_rcv_buf.page_len;
910 if (pagelist_len > copy_len)
911 pagelist_len = copy_len;
912 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400913 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000914 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400915 if (curlen > pagelist_len)
916 curlen = pagelist_len;
917
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400918 dprintk("RPC: %s: page %d"
919 " srcp 0x%p len %d curlen %d\n",
920 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800921 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000922 memcpy(destp + page_base, srcp, curlen);
923 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800924 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400925 srcp += curlen;
926 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400927 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400928 pagelist_len -= curlen;
929 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400930 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000931 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400932 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400933
934 /* Implicit padding for the last segment in a Write
935 * chunk is inserted inline at the front of the tail
936 * iovec. The upper layer ignores the content of
937 * the pad. Simply ensure inline content in the tail
938 * that follows the Write chunk is properly aligned.
939 */
940 if (pad)
941 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400942 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400943
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400944 /* The tail iovec is redirected to the remaining data
945 * in the receive buffer, to avoid a memcopy.
946 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400947 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400948 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400949 rqst->rq_private_buf.tail[0].iov_base = srcp;
950 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400951
Chuck Lever64695bde2016-06-29 13:54:58 -0400952 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400953}
954
Chuck Lever4b196dc62017-06-08 11:51:56 -0400955/* Caller must guarantee @rep remains stable during this call.
956 */
957static void
958rpcrdma_mark_remote_invalidation(struct list_head *mws,
959 struct rpcrdma_rep *rep)
960{
961 struct rpcrdma_mw *mw;
962
963 if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
964 return;
965
966 list_for_each_entry(mw, mws, mw_list)
967 if (mw->mw_handle == rep->rr_inv_rkey) {
968 mw->mw_flags = RPCRDMA_MW_F_RI;
969 break; /* only one invalidated MR per RPC */
970 }
971}
972
Chuck Lever63cae472015-10-24 17:28:08 -0400973/* By convention, backchannel calls arrive via rdma_msg type
974 * messages, and never populate the chunk lists. This makes
975 * the RPC/RDMA header small and fixed in size, so it is
976 * straightforward to check the RPC header's direction field.
977 */
978static bool
Chuck Lever41c8f702017-08-03 14:30:11 -0400979rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
980 __be32 xid, __be32 proc)
981#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -0400982{
Chuck Lever41c8f702017-08-03 14:30:11 -0400983 struct xdr_stream *xdr = &rep->rr_stream;
984 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -0400985
Chuck Lever41c8f702017-08-03 14:30:11 -0400986 if (proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -0400987 return false;
988
Chuck Lever41c8f702017-08-03 14:30:11 -0400989 /* Peek at stream contents without advancing. */
990 p = xdr_inline_decode(xdr, 0);
991
992 /* Chunk lists */
993 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -0400994 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -0400995 if (*p++ != xdr_zero)
996 return false;
997 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -0400998 return false;
999
Chuck Lever41c8f702017-08-03 14:30:11 -04001000 /* RPC header */
1001 if (*p++ != xid)
1002 return false;
1003 if (*p != cpu_to_be32(RPC_CALL))
1004 return false;
1005
1006 /* Now that we are sure this is a backchannel call,
1007 * advance to the RPC header.
1008 */
1009 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1010 if (unlikely(!p))
1011 goto out_short;
1012
1013 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001014 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001015
1016out_short:
1017 pr_warn("RPC/RDMA short backward direction call\n");
1018 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1019 xprt_disconnect_done(&r_xprt->rx_xprt);
1020 return true;
1021}
1022#else /* CONFIG_SUNRPC_BACKCHANNEL */
1023{
1024 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001025}
1026#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1027
Chuck Lever264b0cd2017-08-03 14:30:27 -04001028static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1029{
1030 __be32 *p;
1031
1032 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1033 if (unlikely(!p))
1034 return -EIO;
1035
1036 ifdebug(FACILITY) {
1037 u64 offset;
1038 u32 handle;
1039
1040 handle = be32_to_cpup(p++);
1041 *length = be32_to_cpup(p++);
1042 xdr_decode_hyper(p, &offset);
1043 dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
1044 __func__, *length, (unsigned long long)offset,
1045 handle);
1046 } else {
1047 *length = be32_to_cpup(p + 1);
1048 }
1049
1050 return 0;
1051}
1052
1053static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1054{
1055 u32 segcount, seglength;
1056 __be32 *p;
1057
1058 p = xdr_inline_decode(xdr, sizeof(*p));
1059 if (unlikely(!p))
1060 return -EIO;
1061
1062 *length = 0;
1063 segcount = be32_to_cpup(p);
1064 while (segcount--) {
1065 if (decode_rdma_segment(xdr, &seglength))
1066 return -EIO;
1067 *length += seglength;
1068 }
1069
1070 dprintk("RPC: %s: segcount=%u, %u bytes\n",
1071 __func__, be32_to_cpup(p), *length);
1072 return 0;
1073}
1074
1075/* In RPC-over-RDMA Version One replies, a Read list is never
1076 * expected. This decoder is a stub that returns an error if
1077 * a Read list is present.
1078 */
1079static int decode_read_list(struct xdr_stream *xdr)
1080{
1081 __be32 *p;
1082
1083 p = xdr_inline_decode(xdr, sizeof(*p));
1084 if (unlikely(!p))
1085 return -EIO;
1086 if (unlikely(*p != xdr_zero))
1087 return -EIO;
1088 return 0;
1089}
1090
1091/* Supports only one Write chunk in the Write list
1092 */
1093static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1094{
1095 u32 chunklen;
1096 bool first;
1097 __be32 *p;
1098
1099 *length = 0;
1100 first = true;
1101 do {
1102 p = xdr_inline_decode(xdr, sizeof(*p));
1103 if (unlikely(!p))
1104 return -EIO;
1105 if (*p == xdr_zero)
1106 break;
1107 if (!first)
1108 return -EIO;
1109
1110 if (decode_write_chunk(xdr, &chunklen))
1111 return -EIO;
1112 *length += chunklen;
1113 first = false;
1114 } while (true);
1115 return 0;
1116}
1117
1118static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1119{
1120 __be32 *p;
1121
1122 p = xdr_inline_decode(xdr, sizeof(*p));
1123 if (unlikely(!p))
1124 return -EIO;
1125
1126 *length = 0;
1127 if (*p != xdr_zero)
1128 if (decode_write_chunk(xdr, length))
1129 return -EIO;
1130 return 0;
1131}
1132
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001133static int
1134rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1135 struct rpc_rqst *rqst)
1136{
1137 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001138 u32 writelist, replychunk, rpclen;
1139 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001140
Chuck Lever264b0cd2017-08-03 14:30:27 -04001141 /* Decode the chunk lists */
1142 if (decode_read_list(xdr))
1143 return -EIO;
1144 if (decode_write_list(xdr, &writelist))
1145 return -EIO;
1146 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001147 return -EIO;
1148
Chuck Lever264b0cd2017-08-03 14:30:27 -04001149 /* RDMA_MSG sanity checks */
1150 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001151 return -EIO;
1152
Chuck Lever264b0cd2017-08-03 14:30:27 -04001153 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1154 base = (char *)xdr_inline_decode(xdr, 0);
1155 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001156 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001157 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001158
Chuck Lever264b0cd2017-08-03 14:30:27 -04001159 r_xprt->rx_stats.total_rdma_reply += writelist;
1160 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001161}
1162
1163static noinline int
1164rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1165{
1166 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001167 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001168
Chuck Lever264b0cd2017-08-03 14:30:27 -04001169 /* Decode the chunk lists */
1170 if (decode_read_list(xdr))
1171 return -EIO;
1172 if (decode_write_list(xdr, &writelist))
1173 return -EIO;
1174 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001175 return -EIO;
1176
Chuck Lever264b0cd2017-08-03 14:30:27 -04001177 /* RDMA_NOMSG sanity checks */
1178 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001179 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001180 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001181 return -EIO;
1182
Chuck Lever264b0cd2017-08-03 14:30:27 -04001183 /* Reply chunk buffer already is the reply vector */
1184 r_xprt->rx_stats.total_rdma_reply += replychunk;
1185 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001186}
1187
1188static noinline int
1189rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1190 struct rpc_rqst *rqst)
1191{
1192 struct xdr_stream *xdr = &rep->rr_stream;
1193 __be32 *p;
1194
1195 p = xdr_inline_decode(xdr, sizeof(*p));
1196 if (unlikely(!p))
1197 return -EIO;
1198
1199 switch (*p) {
1200 case err_vers:
1201 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1202 if (!p)
1203 break;
1204 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1205 rqst->rq_task->tk_pid, __func__,
1206 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1207 break;
1208 case err_chunk:
1209 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1210 rqst->rq_task->tk_pid, __func__);
1211 break;
1212 default:
1213 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1214 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1215 }
1216
1217 r_xprt->rx_stats.bad_reply_count++;
1218 return -EREMOTEIO;
1219}
1220
Chuck Leverfe97b472015-10-24 17:27:10 -04001221/* Process received RPC/RDMA messages.
1222 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001223 * Errors must result in the RPC task either being awakened, or
1224 * allowed to timeout, to discover the errors at that time.
1225 */
1226void
Chuck Lever496b77a2016-09-15 10:57:57 -04001227rpcrdma_reply_handler(struct work_struct *work)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001228{
Chuck Lever496b77a2016-09-15 10:57:57 -04001229 struct rpcrdma_rep *rep =
1230 container_of(work, struct rpcrdma_rep, rr_work);
Chuck Lever431af642017-06-08 11:52:20 -04001231 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1232 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1233 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Lever96f87782017-08-03 14:30:03 -04001234 struct xdr_stream *xdr = &rep->rr_stream;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001235 struct rpcrdma_req *req;
1236 struct rpc_rqst *rqst;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001237 __be32 *p, xid, vers, proc;
Chuck Levere7ce7102014-05-28 10:34:57 -04001238 unsigned long cwnd;
Chuck Lever451d26e2017-06-08 11:52:04 -04001239 struct list_head mws;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001240 int status;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001241
Chuck Leverb0e178a2015-10-24 17:26:54 -04001242 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
1243
Chuck Levere2a67192017-08-03 14:30:44 -04001244 if (rep->rr_hdrbuf.head[0].iov_len == 0)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001245 goto out_badstatus;
Chuck Lever96f87782017-08-03 14:30:03 -04001246
1247 xdr_init_decode(xdr, &rep->rr_hdrbuf,
1248 rep->rr_hdrbuf.head[0].iov_base);
1249
1250 /* Fixed transport header fields */
1251 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1252 if (unlikely(!p))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001253 goto out_shortreply;
Chuck Lever96f87782017-08-03 14:30:03 -04001254 xid = *p++;
1255 vers = *p++;
1256 p++; /* credits */
1257 proc = *p++;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001258
Chuck Lever41c8f702017-08-03 14:30:11 -04001259 if (rpcrdma_is_bcall(r_xprt, rep, xid, proc))
1260 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001261
Chuck Leverfe97b472015-10-24 17:27:10 -04001262 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1263 * get context for handling any incoming chunks.
1264 */
Chuck Lever431af642017-06-08 11:52:20 -04001265 spin_lock(&buf->rb_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001266 req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf, xid);
Chuck Lever431af642017-06-08 11:52:20 -04001267 if (!req)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001268 goto out_nomatch;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001269 if (req->rl_reply)
1270 goto out_duplicate;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001271
Chuck Lever451d26e2017-06-08 11:52:04 -04001272 list_replace_init(&req->rl_registered, &mws);
1273 rpcrdma_mark_remote_invalidation(&mws, rep);
Chuck Lever431af642017-06-08 11:52:20 -04001274
1275 /* Avoid races with signals and duplicate replies
1276 * by marking this req as matched.
1277 */
Chuck Lever4b196dc62017-06-08 11:51:56 -04001278 req->rl_reply = rep;
Chuck Lever431af642017-06-08 11:52:20 -04001279 spin_unlock(&buf->rb_lock);
1280
Chuck Leveraf0f16e2016-03-04 11:27:43 -05001281 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
Chuck Lever96f87782017-08-03 14:30:03 -04001282 __func__, rep, req, be32_to_cpu(xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001283
Chuck Lever431af642017-06-08 11:52:20 -04001284 /* Invalidate and unmap the data payloads before waking the
1285 * waiting application. This guarantees the memory regions
1286 * are properly fenced from the server before the application
1287 * accesses the data. It also ensures proper send flow control:
1288 * waking the next RPC waits until this RPC has relinquished
1289 * all its Send Queue entries.
1290 */
1291 if (!list_empty(&mws))
1292 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001293
Chuck Lever431af642017-06-08 11:52:20 -04001294 /* Perform XID lookup, reconstruction of the RPC reply, and
1295 * RPC completion while holding the transport lock to ensure
1296 * the rep, rqst, and rq_task pointers remain stable.
1297 */
1298 spin_lock_bh(&xprt->transport_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001299 rqst = xprt_lookup_rqst(xprt, xid);
Chuck Lever431af642017-06-08 11:52:20 -04001300 if (!rqst)
1301 goto out_norqst;
1302 xprt->reestablish_timeout = 0;
Chuck Lever96f87782017-08-03 14:30:03 -04001303 if (vers != rpcrdma_version)
Chuck Lever59aa1f92016-03-04 11:28:18 -05001304 goto out_badversion;
1305
Chuck Lever96f87782017-08-03 14:30:03 -04001306 switch (proc) {
Chuck Lever284f4902015-01-21 11:02:13 -05001307 case rdma_msg:
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001308 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001309 break;
Chuck Lever284f4902015-01-21 11:02:13 -05001310 case rdma_nomsg:
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001311 status = rpcrdma_decode_nomsg(r_xprt, rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001312 break;
Chuck Lever59aa1f92016-03-04 11:28:18 -05001313 case rdma_error:
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001314 status = rpcrdma_decode_error(r_xprt, rep, rqst);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001315 break;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001316 default:
1317 status = -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001318 }
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001319 if (status < 0)
1320 goto out_badheader;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001321
Chuck Lever59aa1f92016-03-04 11:28:18 -05001322out:
Chuck Levere7ce7102014-05-28 10:34:57 -04001323 cwnd = xprt->cwnd;
Chuck Lever23826c72016-03-04 11:28:27 -05001324 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
Chuck Levere7ce7102014-05-28 10:34:57 -04001325 if (xprt->cwnd > cwnd)
1326 xprt_release_rqst_cong(rqst->rq_task);
1327
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001328 xprt_complete_rqst(rqst->rq_task, status);
Chuck Leverfe97b472015-10-24 17:27:10 -04001329 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001330 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
Chuck Lever431af642017-06-08 11:52:20 -04001331 __func__, xprt, rqst, status);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001332 return;
1333
1334out_badstatus:
1335 rpcrdma_recv_buffer_put(rep);
1336 if (r_xprt->rx_ep.rep_connected == 1) {
1337 r_xprt->rx_ep.rep_connected = -EIO;
1338 rpcrdma_conn_func(&r_xprt->rx_ep);
1339 }
1340 return;
1341
Chuck Lever59aa1f92016-03-04 11:28:18 -05001342/* If the incoming reply terminated a pending RPC, the next
1343 * RPC call will post a replacement receive buffer as it is
1344 * being marshaled.
1345 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001346out_badversion:
1347 dprintk("RPC: %s: invalid version %d\n",
Chuck Lever96f87782017-08-03 14:30:03 -04001348 __func__, be32_to_cpu(vers));
Chuck Lever59aa1f92016-03-04 11:28:18 -05001349 status = -EIO;
1350 r_xprt->rx_stats.bad_reply_count++;
1351 goto out;
1352
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001353out_badheader:
1354 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1355 rqst->rq_task->tk_pid, __func__, be32_to_cpu(proc));
Chuck Lever59aa1f92016-03-04 11:28:18 -05001356 r_xprt->rx_stats.bad_reply_count++;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001357 status = -EIO;
Chuck Lever59aa1f92016-03-04 11:28:18 -05001358 goto out;
1359
Chuck Lever431af642017-06-08 11:52:20 -04001360/* The req was still available, but by the time the transport_lock
1361 * was acquired, the rqst and task had been released. Thus the RPC
1362 * has already been terminated.
Chuck Lever59aa1f92016-03-04 11:28:18 -05001363 */
Chuck Lever431af642017-06-08 11:52:20 -04001364out_norqst:
1365 spin_unlock_bh(&xprt->transport_lock);
1366 rpcrdma_buffer_put(req);
1367 dprintk("RPC: %s: race, no rqst left for req %p\n",
1368 __func__, req);
1369 return;
1370
Chuck Lever59aa1f92016-03-04 11:28:18 -05001371out_shortreply:
1372 dprintk("RPC: %s: short/invalid reply\n", __func__);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001373 goto repost;
1374
1375out_nomatch:
Chuck Lever431af642017-06-08 11:52:20 -04001376 spin_unlock(&buf->rb_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001377 dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
1378 __func__, be32_to_cpu(xid));
Chuck Leverb0e178a2015-10-24 17:26:54 -04001379 goto repost;
1380
1381out_duplicate:
Chuck Lever431af642017-06-08 11:52:20 -04001382 spin_unlock(&buf->rb_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001383 dprintk("RPC: %s: "
1384 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
Chuck Lever96f87782017-08-03 14:30:03 -04001385 __func__, rep, req, be32_to_cpu(xid));
Chuck Leverb0e178a2015-10-24 17:26:54 -04001386
Chuck Lever431af642017-06-08 11:52:20 -04001387/* If no pending RPC transaction was matched, post a replacement
1388 * receive buffer before returning.
1389 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001390repost:
1391 r_xprt->rx_stats.bad_reply_count++;
Chuck Leverb1573802016-09-15 10:56:35 -04001392 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001393 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001394}