blob: 7fd102960a819654cd54eb68686f051822301761 [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040046 */
47
48#include "xprt_rdma.h"
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Jeff Laytonf895b252014-11-17 16:58:04 -050052#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040053# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040056static const char transfertypes[][12] = {
Chuck Lever94f58c52016-05-02 14:41:30 -040057 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040061 "reply chunk" /* entire reply via rdma write */
62};
Chuck Lever302d3de2016-05-02 14:41:05 -040063
64/* Returns size of largest RPC-over-RDMA header in a Call message
65 *
Chuck Lever94f58c52016-05-02 14:41:30 -040066 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040068 */
69static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70{
71 unsigned int size;
72
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
75
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
79
Chuck Lever94f58c52016-05-02 14:41:30 -040080 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
84
Chuck Lever302d3de2016-05-02 14:41:05 -040085 dprintk("RPC: %s: max call header size = %u\n",
86 __func__, size);
87 return size;
88}
89
90/* Returns size of largest RPC-over-RDMA header in a Reply message
91 *
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
94 */
95static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
96{
97 unsigned int size;
98
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
101
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
107
108 dprintk("RPC: %s: max reply header size = %u\n",
109 __func__, size);
110 return size;
111}
112
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400113void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400114{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
118
Chuck Lever302d3de2016-05-02 14:41:05 -0400119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
123}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400124
Chuck Lever5457ced2015-08-03 13:03:49 -0400125/* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500128 * a Read chunk for this operation.
129 *
130 * A Read chunk is also required if sending the RPC call inline would
131 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400132 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400133static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
134 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400135{
Chuck Lever16f906d2017-02-08 17:00:10 -0500136 struct xdr_buf *xdr = &rqst->rq_snd_buf;
137 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400138
Chuck Lever16f906d2017-02-08 17:00:10 -0500139 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
140 return false;
141
142 if (xdr->page_len) {
143 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400144 offset = offset_in_page(xdr->page_base);
Chuck Lever16f906d2017-02-08 17:00:10 -0500145 count = 0;
146 while (remaining) {
147 remaining -= min_t(unsigned int,
148 PAGE_SIZE - offset, remaining);
149 offset = 0;
150 if (++count > r_xprt->rx_ia.ri_max_send_sges)
151 return false;
152 }
153 }
154
155 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400156}
157
158/* The client can't know how large the actual reply will be. Thus it
159 * plans for the largest possible reply for that particular ULP
160 * operation. If the maximum combined reply message size exceeds that
161 * limit, the client must provide a write list or a reply chunk for
162 * this request.
163 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400164static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
165 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400166{
Chuck Lever302d3de2016-05-02 14:41:05 -0400167 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400168
Chuck Lever302d3de2016-05-02 14:41:05 -0400169 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400170}
171
Chuck Lever28d9d562017-08-14 15:38:22 -0400172/* Split @vec on page boundaries into SGEs. FMR registers pages, not
173 * a byte range. Other modes coalesce these SGEs into a single MR
174 * when they can.
175 *
176 * Returns pointer to next available SGE, and bumps the total number
177 * of SGEs consumed.
Chuck Lever821c7912016-03-04 11:27:52 -0500178 */
Chuck Lever28d9d562017-08-14 15:38:22 -0400179static struct rpcrdma_mr_seg *
180rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
181 unsigned int *n)
Chuck Lever821c7912016-03-04 11:27:52 -0500182{
Chuck Lever28d9d562017-08-14 15:38:22 -0400183 u32 remaining, page_offset;
Chuck Lever821c7912016-03-04 11:27:52 -0500184 char *base;
185
186 base = vec->iov_base;
187 page_offset = offset_in_page(base);
188 remaining = vec->iov_len;
Chuck Lever28d9d562017-08-14 15:38:22 -0400189 while (remaining) {
190 seg->mr_page = NULL;
191 seg->mr_offset = base;
192 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
193 remaining -= seg->mr_len;
194 base += seg->mr_len;
195 ++seg;
196 ++(*n);
Chuck Lever821c7912016-03-04 11:27:52 -0500197 page_offset = 0;
198 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400199 return seg;
Chuck Lever821c7912016-03-04 11:27:52 -0500200}
201
Chuck Lever28d9d562017-08-14 15:38:22 -0400202/* Convert @xdrbuf into SGEs no larger than a page each. As they
203 * are registered, these SGEs are then coalesced into RDMA segments
204 * when the selected memreg mode supports it.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400205 *
Chuck Lever28d9d562017-08-14 15:38:22 -0400206 * Returns positive number of SGEs consumed, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400207 */
208
209static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500210rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
211 unsigned int pos, enum rpcrdma_chunktype type,
212 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400213{
Chuck Lever28d9d562017-08-14 15:38:22 -0400214 unsigned long page_base;
215 unsigned int len, n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000216 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400217
Chuck Lever5ab81422016-06-29 13:54:25 -0400218 n = 0;
Chuck Lever28d9d562017-08-14 15:38:22 -0400219 if (pos == 0)
220 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400221
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000222 len = xdrbuf->page_len;
223 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400224 page_base = offset_in_page(xdrbuf->page_base);
Chuck Lever28d9d562017-08-14 15:38:22 -0400225 while (len) {
226 if (unlikely(!*ppages)) {
227 /* XXX: Certain upper layer operations do
228 * not provide receive buffer pages.
229 */
230 *ppages = alloc_page(GFP_ATOMIC);
231 if (!*ppages)
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400232 return -EAGAIN;
Shirley Ma196c6992014-05-28 10:34:24 -0400233 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400234 seg->mr_page = *ppages;
235 seg->mr_offset = (char *)page_base;
236 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
237 len -= seg->mr_len;
238 ++ppages;
239 ++seg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400240 ++n;
Chuck Lever28d9d562017-08-14 15:38:22 -0400241 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400242 }
243
Chuck Lever24abdf12017-02-08 16:59:46 -0500244 /* When encoding a Read chunk, the tail iovec contains an
245 * XDR pad and may be omitted.
246 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500247 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400248 goto out;
Chuck Lever677eb172015-08-03 13:04:17 -0400249
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500250 /* When encoding a Write chunk, some servers need to see an
251 * extra segment for non-XDR-aligned Write chunks. The upper
252 * layer provides space in the tail iovec that may be used
253 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400254 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500255 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400256 goto out;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400257
Chuck Lever28d9d562017-08-14 15:38:22 -0400258 if (xdrbuf->tail[0].iov_len)
259 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400260
Chuck Lever28d9d562017-08-14 15:38:22 -0400261out:
262 if (unlikely(n > RPCRDMA_MAX_SEGS))
263 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400264 return n;
265}
266
Chuck Lever39f4cd92017-08-10 12:47:36 -0400267static inline int
268encode_item_present(struct xdr_stream *xdr)
269{
270 __be32 *p;
271
272 p = xdr_reserve_space(xdr, sizeof(*p));
273 if (unlikely(!p))
274 return -EMSGSIZE;
275
276 *p = xdr_one;
277 return 0;
278}
279
280static inline int
281encode_item_not_present(struct xdr_stream *xdr)
282{
283 __be32 *p;
284
285 p = xdr_reserve_space(xdr, sizeof(*p));
286 if (unlikely(!p))
287 return -EMSGSIZE;
288
289 *p = xdr_zero;
290 return 0;
291}
292
293static void
Chuck Lever9d6b0402016-06-29 13:54:16 -0400294xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
Chuck Lever94f58c52016-05-02 14:41:30 -0400295{
Chuck Lever9d6b0402016-06-29 13:54:16 -0400296 *iptr++ = cpu_to_be32(mw->mw_handle);
297 *iptr++ = cpu_to_be32(mw->mw_length);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400298 xdr_encode_hyper(iptr, mw->mw_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400299}
300
Chuck Lever39f4cd92017-08-10 12:47:36 -0400301static int
302encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
303{
304 __be32 *p;
305
306 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
307 if (unlikely(!p))
308 return -EMSGSIZE;
309
310 xdr_encode_rdma_segment(p, mw);
311 return 0;
312}
313
314static int
315encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
316 u32 position)
317{
318 __be32 *p;
319
320 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
321 if (unlikely(!p))
322 return -EMSGSIZE;
323
324 *p++ = xdr_one; /* Item present */
325 *p++ = cpu_to_be32(position);
326 xdr_encode_rdma_segment(p, mw);
327 return 0;
328}
329
330/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400331 * segments that belong to a single read chunk.
332 *
333 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
334 *
335 * Read chunklist (a linked list):
336 * N elements, position P (same P for all chunks of same arg!):
337 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
338 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400339 * Returns zero on success, or a negative errno if a failure occurred.
340 * @xdr is advanced to the next position in the stream.
341 *
342 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400343 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400344static noinline int
345rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
346 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400347{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400348 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400349 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400350 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400351 unsigned int pos;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400352 int nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400353
354 pos = rqst->rq_snd_buf.head[0].iov_len;
355 if (rtype == rpcrdma_areadch)
356 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400357 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500358 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
359 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400360 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400361 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400362
363 do {
Chuck Lever6748b0c2017-08-14 15:38:30 -0400364 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
365 false, &mw);
366 if (IS_ERR(seg))
367 return PTR_ERR(seg);
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500368 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400369
Chuck Lever39f4cd92017-08-10 12:47:36 -0400370 if (encode_read_segment(xdr, mw, pos) < 0)
371 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400372
Chuck Lever9d6b0402016-06-29 13:54:16 -0400373 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400374 rqst->rq_task->tk_pid, __func__, pos,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400375 mw->mw_length, (unsigned long long)mw->mw_offset,
Chuck Lever6748b0c2017-08-14 15:38:30 -0400376 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400377
378 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400379 nsegs -= mw->mw_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400380 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400381
Chuck Lever39f4cd92017-08-10 12:47:36 -0400382 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400383}
384
Chuck Lever39f4cd92017-08-10 12:47:36 -0400385/* Register and XDR encode the Write list. Supports encoding a list
386 * containing one array of plain segments that belong to a single
387 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400388 *
389 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
390 *
391 * Write chunklist (a list of (one) counted array):
392 * N elements:
393 * 1 - N - HLOO - HLOO - ... - HLOO - 0
394 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400395 * Returns zero on success, or a negative errno if a failure occurred.
396 * @xdr is advanced to the next position in the stream.
397 *
398 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400399 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400400static noinline int
Chuck Lever94f58c52016-05-02 14:41:30 -0400401rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400402 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400403{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400404 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400405 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400406 struct rpcrdma_mw *mw;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400407 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400408 __be32 *segcount;
409
Chuck Lever5ab81422016-06-29 13:54:25 -0400410 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500411 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400412 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500413 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400414 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400415 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400416
Chuck Lever39f4cd92017-08-10 12:47:36 -0400417 if (encode_item_present(xdr) < 0)
418 return -EMSGSIZE;
419 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
420 if (unlikely(!segcount))
421 return -EMSGSIZE;
422 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400423
424 nchunks = 0;
425 do {
Chuck Lever6748b0c2017-08-14 15:38:30 -0400426 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
427 true, &mw);
428 if (IS_ERR(seg))
429 return PTR_ERR(seg);
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500430 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400431
Chuck Lever39f4cd92017-08-10 12:47:36 -0400432 if (encode_rdma_segment(xdr, mw) < 0)
433 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400434
Chuck Lever9d6b0402016-06-29 13:54:16 -0400435 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400436 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400437 mw->mw_length, (unsigned long long)mw->mw_offset,
Chuck Lever6748b0c2017-08-14 15:38:30 -0400438 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400439
440 r_xprt->rx_stats.write_chunk_count++;
441 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400442 nchunks++;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400443 nsegs -= mw->mw_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400444 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400445
446 /* Update count of segments in this Write chunk */
447 *segcount = cpu_to_be32(nchunks);
448
Chuck Lever39f4cd92017-08-10 12:47:36 -0400449 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400450}
451
Chuck Lever39f4cd92017-08-10 12:47:36 -0400452/* Register and XDR encode the Reply chunk. Supports encoding an array
453 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400454 *
455 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
456 *
457 * Reply chunk (a counted array):
458 * N elements:
459 * 1 - N - HLOO - HLOO - ... - HLOO
460 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400461 * Returns zero on success, or a negative errno if a failure occurred.
462 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400463 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400464static noinline int
465rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
466 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400467{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400468 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400469 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400470 struct rpcrdma_mw *mw;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400471 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400472 __be32 *segcount;
473
Chuck Lever5ab81422016-06-29 13:54:25 -0400474 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500475 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400476 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400477 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400478
Chuck Lever39f4cd92017-08-10 12:47:36 -0400479 if (encode_item_present(xdr) < 0)
480 return -EMSGSIZE;
481 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
482 if (unlikely(!segcount))
483 return -EMSGSIZE;
484 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400485
486 nchunks = 0;
487 do {
Chuck Lever6748b0c2017-08-14 15:38:30 -0400488 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
489 true, &mw);
490 if (IS_ERR(seg))
491 return PTR_ERR(seg);
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500492 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400493
Chuck Lever39f4cd92017-08-10 12:47:36 -0400494 if (encode_rdma_segment(xdr, mw) < 0)
495 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400496
Chuck Lever9d6b0402016-06-29 13:54:16 -0400497 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400498 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400499 mw->mw_length, (unsigned long long)mw->mw_offset,
Chuck Lever6748b0c2017-08-14 15:38:30 -0400500 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400501
502 r_xprt->rx_stats.reply_chunk_count++;
503 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400504 nchunks++;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400505 nsegs -= mw->mw_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400506 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400507
508 /* Update count of segments in the Reply chunk */
509 *segcount = cpu_to_be32(nchunks);
510
Chuck Lever39f4cd92017-08-10 12:47:36 -0400511 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400512}
513
Chuck Lever394b2c72017-10-20 10:47:47 -0400514/**
515 * rpcrdma_unmap_sges - DMA-unmap Send buffers
516 * @ia: interface adapter (device)
517 * @req: req with possibly some SGEs to be DMA unmapped
518 *
519 */
520void
521rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
522{
523 struct ib_sge *sge;
524 unsigned int count;
525
526 /* The first two SGEs contain the transport header and
527 * the inline buffer. These are always left mapped so
528 * they can be cheaply re-used.
529 */
530 sge = &req->rl_send_sge[2];
531 for (count = req->rl_mapped_sges; count--; sge++)
532 ib_dma_unmap_page(ia->ri_device,
533 sge->addr, sge->length, DMA_TO_DEVICE);
534}
535
Chuck Levera062a2a2017-10-20 10:48:03 -0400536/* Prepare an SGE for the RPC-over-RDMA transport header.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400537 */
Chuck Lever655fec62016-09-15 10:57:24 -0400538static bool
539rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
540 u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400541{
Chuck Lever655fec62016-09-15 10:57:24 -0400542 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
543 struct ib_sge *sge = &req->rl_send_sge[0];
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400544
Chuck Levera062a2a2017-10-20 10:48:03 -0400545 if (!rpcrdma_dma_map_regbuf(ia, rb))
546 goto out_regbuf;
547 sge->addr = rdmab_addr(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400548 sge->length = len;
Chuck Levera062a2a2017-10-20 10:48:03 -0400549 sge->lkey = rdmab_lkey(rb);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400550
Chuck Lever91a10c52017-04-11 13:23:02 -0400551 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400552 sge->length, DMA_TO_DEVICE);
553 req->rl_send_wr.num_sge++;
554 return true;
Chuck Lever857f9ac2017-10-20 10:47:55 -0400555
556out_regbuf:
557 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
558 return false;
Chuck Lever655fec62016-09-15 10:57:24 -0400559}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400560
Chuck Lever655fec62016-09-15 10:57:24 -0400561/* Prepare the Send SGEs. The head and tail iovec, and each entry
562 * in the page list, gets its own SGE.
563 */
564static bool
565rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
566 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
567{
568 unsigned int sge_no, page_base, len, remaining;
569 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
570 struct ib_device *device = ia->ri_device;
571 struct ib_sge *sge = req->rl_send_sge;
572 u32 lkey = ia->ri_pd->local_dma_lkey;
573 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400574
Chuck Lever655fec62016-09-15 10:57:24 -0400575 /* The head iovec is straightforward, as it is already
576 * DMA-mapped. Sync the content that has changed.
577 */
578 if (!rpcrdma_dma_map_regbuf(ia, rb))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400579 goto out_regbuf;
Chuck Lever655fec62016-09-15 10:57:24 -0400580 sge_no = 1;
581 sge[sge_no].addr = rdmab_addr(rb);
582 sge[sge_no].length = xdr->head[0].iov_len;
583 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400584 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400585 sge[sge_no].length, DMA_TO_DEVICE);
586
587 /* If there is a Read chunk, the page list is being handled
588 * via explicit RDMA, and thus is skipped here. However, the
589 * tail iovec may include an XDR pad for the page list, as
590 * well as additional content, and may not reside in the
591 * same page as the head iovec.
592 */
593 if (rtype == rpcrdma_readch) {
594 len = xdr->tail[0].iov_len;
595
596 /* Do not include the tail if it is only an XDR pad */
597 if (len < 4)
598 goto out;
599
600 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400601 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400602
603 /* If the content in the page list is an odd length,
604 * xdr_write_pages() has added a pad at the beginning
605 * of the tail iovec. Force the tail's non-pad content
606 * to land at the next XDR position in the Send message.
607 */
608 page_base += len & 3;
609 len -= len & 3;
610 goto map_tail;
611 }
612
613 /* If there is a page list present, temporarily DMA map
614 * and prepare an SGE for each page to be sent.
615 */
616 if (xdr->page_len) {
617 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400618 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400619 remaining = xdr->page_len;
620 while (remaining) {
621 sge_no++;
622 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
623 goto out_mapping_overflow;
624
625 len = min_t(u32, PAGE_SIZE - page_base, remaining);
626 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
627 page_base, len,
628 DMA_TO_DEVICE);
629 if (ib_dma_mapping_error(device, sge[sge_no].addr))
630 goto out_mapping_err;
631 sge[sge_no].length = len;
632 sge[sge_no].lkey = lkey;
633
634 req->rl_mapped_sges++;
635 ppages++;
636 remaining -= len;
637 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400638 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400639 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000640
Chuck Lever655fec62016-09-15 10:57:24 -0400641 /* The tail iovec is not always constructed in the same
642 * page where the head iovec resides (see, for example,
643 * gss_wrap_req_priv). To neatly accommodate that case,
644 * DMA map it separately.
645 */
646 if (xdr->tail[0].iov_len) {
647 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400648 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400649 len = xdr->tail[0].iov_len;
650
651map_tail:
652 sge_no++;
653 sge[sge_no].addr = ib_dma_map_page(device, page,
654 page_base, len,
655 DMA_TO_DEVICE);
656 if (ib_dma_mapping_error(device, sge[sge_no].addr))
657 goto out_mapping_err;
658 sge[sge_no].length = len;
659 sge[sge_no].lkey = lkey;
660 req->rl_mapped_sges++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400661 }
Chuck Lever655fec62016-09-15 10:57:24 -0400662
663out:
Chuck Leverad99f052017-10-20 10:47:39 -0400664 req->rl_send_wr.num_sge += sge_no;
Chuck Lever655fec62016-09-15 10:57:24 -0400665 return true;
666
Chuck Lever857f9ac2017-10-20 10:47:55 -0400667out_regbuf:
668 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
669 return false;
670
Chuck Lever655fec62016-09-15 10:57:24 -0400671out_mapping_overflow:
Chuck Lever394b2c72017-10-20 10:47:47 -0400672 rpcrdma_unmap_sges(ia, req);
Chuck Lever655fec62016-09-15 10:57:24 -0400673 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
674 return false;
675
676out_mapping_err:
Chuck Lever394b2c72017-10-20 10:47:47 -0400677 rpcrdma_unmap_sges(ia, req);
Chuck Lever655fec62016-09-15 10:57:24 -0400678 pr_err("rpcrdma: Send mapping error\n");
679 return false;
680}
681
Chuck Lever857f9ac2017-10-20 10:47:55 -0400682/**
683 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
684 * @r_xprt: controlling transport
685 * @req: context of RPC Call being marshalled
686 * @hdrlen: size of transport header, in bytes
687 * @xdr: xdr_buf containing RPC Call
688 * @rtype: chunk type being encoded
689 *
690 * Returns 0 on success; otherwise a negative errno is returned.
691 */
692int
693rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
694 struct rpcrdma_req *req, u32 hdrlen,
695 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400696{
697 req->rl_send_wr.num_sge = 0;
698 req->rl_mapped_sges = 0;
699
Chuck Lever857f9ac2017-10-20 10:47:55 -0400700 if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
701 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400702
703 if (rtype != rpcrdma_areadch)
Chuck Lever857f9ac2017-10-20 10:47:55 -0400704 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
705 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400706
Chuck Lever857f9ac2017-10-20 10:47:55 -0400707 return 0;
Chuck Lever655fec62016-09-15 10:57:24 -0400708}
709
Chuck Lever09e60642017-08-10 12:47:12 -0400710/**
711 * rpcrdma_marshal_req - Marshal and send one RPC request
712 * @r_xprt: controlling transport
713 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400714 *
Chuck Lever09e60642017-08-10 12:47:12 -0400715 * For the RPC in "rqst", this function:
716 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
717 * - Registers Read, Write, and Reply chunks
718 * - Constructs the transport header
719 * - Posts a Send WR to send the transport header and request
720 *
721 * Returns:
722 * %0 if the RPC was sent successfully,
723 * %-ENOTCONN if the connection was lost,
724 * %-EAGAIN if not enough pages are available for on-demand reply buffer,
725 * %-ENOBUFS if no MRs are available to register chunks,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400726 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400727 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400728 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400729int
Chuck Lever09e60642017-08-10 12:47:12 -0400730rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400731{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400732 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400733 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400734 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400735 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400736 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400737 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400738
Chuck Lever83128a62015-10-24 17:27:59 -0400739#if defined(CONFIG_SUNRPC_BACKCHANNEL)
740 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
741 return rpcrdma_bc_marshal_reply(rqst);
742#endif
743
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400744 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
745 xdr_init_encode(xdr, &req->rl_hdrbuf,
746 req->rl_rdmabuf->rg_base);
747
748 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400749 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400750 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
751 if (!p)
752 goto out_err;
753 *p++ = rqst->rq_xid;
754 *p++ = rpcrdma_version;
755 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400756
Chuck Lever65b80172016-06-29 13:55:06 -0400757 /* When the ULP employs a GSS flavor that guarantees integrity
758 * or privacy, direct data placement of individual data items
759 * is not allowed.
760 */
761 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
762 RPCAUTH_AUTH_DATATOUCH);
763
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400764 /*
765 * Chunks needed for results?
766 *
767 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400768 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400769 * o Large read ops return data as write chunk(s), header as
770 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400771 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400772 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400773 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400774 wtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400775 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levercce6dee2016-05-02 14:41:14 -0400776 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400777 else
Chuck Levere2377942015-03-30 14:33:53 -0400778 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400779
780 /*
781 * Chunks needed for arguments?
782 *
783 * o If the total request is under the inline threshold, all ops
784 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400785 * o Large write ops transmit data as read chunk(s), header as
786 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400787 * o Large non-write ops are sent with the entire message as a
788 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400789 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400790 * This assumes that the upper layer does not present a request
791 * that both has a data payload, and whose non-data arguments
792 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400793 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400794 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400795 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400796 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400797 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400798 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400799 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400800 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400801 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400802 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400803 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400804 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400805
Chuck Lever94f58c52016-05-02 14:41:30 -0400806 /* This implementation supports the following combinations
807 * of chunk lists in one RPC-over-RDMA Call message:
808 *
809 * - Read list
810 * - Write list
811 * - Reply chunk
812 * - Read list + Reply chunk
813 *
814 * It might not yet support the following combinations:
815 *
816 * - Read list + Write list
817 *
818 * It does not support the following combinations:
819 *
820 * - Write list + Reply chunk
821 * - Read list + Write list + Reply chunk
822 *
823 * This implementation supports only a single chunk in each
824 * Read or Write list. Thus for example the client cannot
825 * send a Call message with a Position Zero Read chunk and a
826 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400827 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400828 if (rtype != rpcrdma_noch) {
829 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
830 if (ret)
831 goto out_err;
832 }
833 ret = encode_item_not_present(xdr);
834 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500835 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400836
Chuck Lever39f4cd92017-08-10 12:47:36 -0400837 if (wtype == rpcrdma_writech) {
838 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
839 if (ret)
840 goto out_err;
841 }
842 ret = encode_item_not_present(xdr);
843 if (ret)
844 goto out_err;
845
846 if (wtype != rpcrdma_replych)
847 ret = encode_item_not_present(xdr);
848 else
849 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
850 if (ret)
851 goto out_err;
852
853 dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400854 rqst->rq_task->tk_pid, __func__,
855 transfertypes[rtype], transfertypes[wtype],
Chuck Lever39f4cd92017-08-10 12:47:36 -0400856 xdr_stream_pos(xdr));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400857
Chuck Lever857f9ac2017-10-20 10:47:55 -0400858 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
859 &rqst->rq_snd_buf, rtype);
860 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500861 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400862 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400863
Chuck Lever18c0fb32017-02-08 17:00:27 -0500864out_err:
Chuck Lever39f4cd92017-08-10 12:47:36 -0400865 if (ret != -ENOBUFS) {
866 pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
Chuck Lever0031e472017-04-11 13:23:51 -0400867 r_xprt->rx_stats.failed_marshal_count++;
868 }
Chuck Lever39f4cd92017-08-10 12:47:36 -0400869 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400870}
871
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400872/**
873 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
874 * @rqst: controlling RPC request
875 * @srcp: points to RPC message payload in receive buffer
876 * @copy_len: remaining length of receive buffer content
877 * @pad: Write chunk pad bytes needed (zero for pure inline)
878 *
879 * The upper layer has set the maximum number of bytes it can
880 * receive in each component of rq_rcv_buf. These values are set in
881 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400882 *
883 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
884 * many cases this function simply updates iov_base pointers in
885 * rq_rcv_buf to point directly to the received reply data, to
886 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400887 *
888 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400889 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400890static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400891rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400892{
Chuck Lever64695bde2016-06-29 13:54:58 -0400893 unsigned long fixup_copy_count;
894 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400895 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000896 struct page **ppages;
897 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400898
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400899 /* The head iovec is redirected to the RPC reply message
900 * in the receive buffer, to avoid a memcopy.
901 */
902 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400903 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400904
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400905 /* The contents of the receive buffer that follow
906 * head.iov_len bytes are copied into the page list.
907 */
908 curlen = rqst->rq_rcv_buf.head[0].iov_len;
909 if (curlen > copy_len)
910 curlen = copy_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400911 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
912 __func__, srcp, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400913 srcp += curlen;
914 copy_len -= curlen;
915
Chuck Leverd933cc32017-06-08 11:53:16 -0400916 ppages = rqst->rq_rcv_buf.pages +
917 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
918 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400919 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400920 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400921 int pagelist_len;
922
923 pagelist_len = rqst->rq_rcv_buf.page_len;
924 if (pagelist_len > copy_len)
925 pagelist_len = copy_len;
926 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400927 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000928 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400929 if (curlen > pagelist_len)
930 curlen = pagelist_len;
931
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400932 dprintk("RPC: %s: page %d"
933 " srcp 0x%p len %d curlen %d\n",
934 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800935 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000936 memcpy(destp + page_base, srcp, curlen);
937 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800938 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400939 srcp += curlen;
940 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400941 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400942 pagelist_len -= curlen;
943 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400944 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000945 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400946 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400947
948 /* Implicit padding for the last segment in a Write
949 * chunk is inserted inline at the front of the tail
950 * iovec. The upper layer ignores the content of
951 * the pad. Simply ensure inline content in the tail
952 * that follows the Write chunk is properly aligned.
953 */
954 if (pad)
955 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400956 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400957
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400958 /* The tail iovec is redirected to the remaining data
959 * in the receive buffer, to avoid a memcopy.
960 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400961 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400962 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400963 rqst->rq_private_buf.tail[0].iov_base = srcp;
964 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400965
Chuck Lever64695bde2016-06-29 13:54:58 -0400966 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400967}
968
Chuck Lever4b196dc62017-06-08 11:51:56 -0400969/* Caller must guarantee @rep remains stable during this call.
970 */
971static void
972rpcrdma_mark_remote_invalidation(struct list_head *mws,
973 struct rpcrdma_rep *rep)
974{
975 struct rpcrdma_mw *mw;
976
977 if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
978 return;
979
980 list_for_each_entry(mw, mws, mw_list)
981 if (mw->mw_handle == rep->rr_inv_rkey) {
982 mw->mw_flags = RPCRDMA_MW_F_RI;
983 break; /* only one invalidated MR per RPC */
984 }
985}
986
Chuck Lever63cae472015-10-24 17:28:08 -0400987/* By convention, backchannel calls arrive via rdma_msg type
988 * messages, and never populate the chunk lists. This makes
989 * the RPC/RDMA header small and fixed in size, so it is
990 * straightforward to check the RPC header's direction field.
991 */
992static bool
Chuck Lever5381e0e2017-10-16 15:01:14 -0400993rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
Chuck Lever41c8f702017-08-03 14:30:11 -0400994#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -0400995{
Chuck Lever41c8f702017-08-03 14:30:11 -0400996 struct xdr_stream *xdr = &rep->rr_stream;
997 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -0400998
Chuck Lever5381e0e2017-10-16 15:01:14 -0400999 if (rep->rr_proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -04001000 return false;
1001
Chuck Lever41c8f702017-08-03 14:30:11 -04001002 /* Peek at stream contents without advancing. */
1003 p = xdr_inline_decode(xdr, 0);
1004
1005 /* Chunk lists */
1006 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001007 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -04001008 if (*p++ != xdr_zero)
1009 return false;
1010 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001011 return false;
1012
Chuck Lever41c8f702017-08-03 14:30:11 -04001013 /* RPC header */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001014 if (*p++ != rep->rr_xid)
Chuck Lever41c8f702017-08-03 14:30:11 -04001015 return false;
1016 if (*p != cpu_to_be32(RPC_CALL))
1017 return false;
1018
1019 /* Now that we are sure this is a backchannel call,
1020 * advance to the RPC header.
1021 */
1022 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1023 if (unlikely(!p))
1024 goto out_short;
1025
1026 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001027 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001028
1029out_short:
1030 pr_warn("RPC/RDMA short backward direction call\n");
1031 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1032 xprt_disconnect_done(&r_xprt->rx_xprt);
Chuck Lever63cae472015-10-24 17:28:08 -04001033 return true;
1034}
Chuck Lever41c8f702017-08-03 14:30:11 -04001035#else /* CONFIG_SUNRPC_BACKCHANNEL */
1036{
1037 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001038}
1039#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1040
Chuck Lever264b0cd2017-08-03 14:30:27 -04001041static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1042{
1043 __be32 *p;
1044
1045 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1046 if (unlikely(!p))
1047 return -EIO;
1048
1049 ifdebug(FACILITY) {
1050 u64 offset;
1051 u32 handle;
1052
1053 handle = be32_to_cpup(p++);
1054 *length = be32_to_cpup(p++);
1055 xdr_decode_hyper(p, &offset);
1056 dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
1057 __func__, *length, (unsigned long long)offset,
1058 handle);
1059 } else {
1060 *length = be32_to_cpup(p + 1);
1061 }
1062
1063 return 0;
1064}
1065
1066static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1067{
1068 u32 segcount, seglength;
1069 __be32 *p;
1070
1071 p = xdr_inline_decode(xdr, sizeof(*p));
1072 if (unlikely(!p))
1073 return -EIO;
1074
1075 *length = 0;
1076 segcount = be32_to_cpup(p);
1077 while (segcount--) {
1078 if (decode_rdma_segment(xdr, &seglength))
1079 return -EIO;
1080 *length += seglength;
1081 }
1082
1083 dprintk("RPC: %s: segcount=%u, %u bytes\n",
1084 __func__, be32_to_cpup(p), *length);
1085 return 0;
1086}
1087
1088/* In RPC-over-RDMA Version One replies, a Read list is never
1089 * expected. This decoder is a stub that returns an error if
1090 * a Read list is present.
1091 */
1092static int decode_read_list(struct xdr_stream *xdr)
1093{
1094 __be32 *p;
1095
1096 p = xdr_inline_decode(xdr, sizeof(*p));
1097 if (unlikely(!p))
1098 return -EIO;
1099 if (unlikely(*p != xdr_zero))
1100 return -EIO;
1101 return 0;
1102}
1103
1104/* Supports only one Write chunk in the Write list
1105 */
1106static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1107{
1108 u32 chunklen;
1109 bool first;
1110 __be32 *p;
1111
1112 *length = 0;
1113 first = true;
1114 do {
1115 p = xdr_inline_decode(xdr, sizeof(*p));
1116 if (unlikely(!p))
1117 return -EIO;
1118 if (*p == xdr_zero)
1119 break;
1120 if (!first)
1121 return -EIO;
1122
1123 if (decode_write_chunk(xdr, &chunklen))
1124 return -EIO;
1125 *length += chunklen;
1126 first = false;
1127 } while (true);
1128 return 0;
1129}
1130
1131static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1132{
1133 __be32 *p;
1134
1135 p = xdr_inline_decode(xdr, sizeof(*p));
1136 if (unlikely(!p))
1137 return -EIO;
1138
1139 *length = 0;
1140 if (*p != xdr_zero)
1141 if (decode_write_chunk(xdr, length))
1142 return -EIO;
1143 return 0;
1144}
1145
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001146static int
1147rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1148 struct rpc_rqst *rqst)
1149{
1150 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001151 u32 writelist, replychunk, rpclen;
1152 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001153
Chuck Lever264b0cd2017-08-03 14:30:27 -04001154 /* Decode the chunk lists */
1155 if (decode_read_list(xdr))
1156 return -EIO;
1157 if (decode_write_list(xdr, &writelist))
1158 return -EIO;
1159 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001160 return -EIO;
1161
Chuck Lever264b0cd2017-08-03 14:30:27 -04001162 /* RDMA_MSG sanity checks */
1163 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001164 return -EIO;
1165
Chuck Lever264b0cd2017-08-03 14:30:27 -04001166 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1167 base = (char *)xdr_inline_decode(xdr, 0);
1168 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001169 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001170 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001171
Chuck Lever264b0cd2017-08-03 14:30:27 -04001172 r_xprt->rx_stats.total_rdma_reply += writelist;
1173 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001174}
1175
1176static noinline int
1177rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1178{
1179 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001180 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001181
Chuck Lever264b0cd2017-08-03 14:30:27 -04001182 /* Decode the chunk lists */
1183 if (decode_read_list(xdr))
1184 return -EIO;
1185 if (decode_write_list(xdr, &writelist))
1186 return -EIO;
1187 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001188 return -EIO;
1189
Chuck Lever264b0cd2017-08-03 14:30:27 -04001190 /* RDMA_NOMSG sanity checks */
1191 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001192 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001193 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001194 return -EIO;
1195
Chuck Lever264b0cd2017-08-03 14:30:27 -04001196 /* Reply chunk buffer already is the reply vector */
1197 r_xprt->rx_stats.total_rdma_reply += replychunk;
1198 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001199}
1200
1201static noinline int
1202rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1203 struct rpc_rqst *rqst)
1204{
1205 struct xdr_stream *xdr = &rep->rr_stream;
1206 __be32 *p;
1207
1208 p = xdr_inline_decode(xdr, sizeof(*p));
1209 if (unlikely(!p))
1210 return -EIO;
1211
1212 switch (*p) {
1213 case err_vers:
1214 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1215 if (!p)
1216 break;
1217 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1218 rqst->rq_task->tk_pid, __func__,
1219 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1220 break;
1221 case err_chunk:
1222 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1223 rqst->rq_task->tk_pid, __func__);
1224 break;
1225 default:
1226 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1227 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1228 }
1229
1230 r_xprt->rx_stats.bad_reply_count++;
1231 return -EREMOTEIO;
1232}
1233
Chuck Levere1352c92017-10-16 15:01:22 -04001234/* Perform XID lookup, reconstruction of the RPC reply, and
1235 * RPC completion while holding the transport lock to ensure
1236 * the rep, rqst, and rq_task pointers remain stable.
1237 */
1238void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1239{
1240 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1241 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1242 struct rpc_rqst *rqst = rep->rr_rqst;
1243 unsigned long cwnd;
1244 int status;
1245
1246 xprt->reestablish_timeout = 0;
1247
1248 switch (rep->rr_proc) {
1249 case rdma_msg:
1250 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1251 break;
1252 case rdma_nomsg:
1253 status = rpcrdma_decode_nomsg(r_xprt, rep);
1254 break;
1255 case rdma_error:
1256 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1257 break;
1258 default:
1259 status = -EIO;
1260 }
1261 if (status < 0)
1262 goto out_badheader;
1263
1264out:
1265 spin_lock(&xprt->recv_lock);
1266 cwnd = xprt->cwnd;
Chuck Leverbe798f92017-10-16 15:01:39 -04001267 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
Chuck Levere1352c92017-10-16 15:01:22 -04001268 if (xprt->cwnd > cwnd)
1269 xprt_release_rqst_cong(rqst->rq_task);
1270
1271 xprt_complete_rqst(rqst->rq_task, status);
1272 xprt_unpin_rqst(rqst);
1273 spin_unlock(&xprt->recv_lock);
1274 return;
1275
1276/* If the incoming reply terminated a pending RPC, the next
1277 * RPC call will post a replacement receive buffer as it is
1278 * being marshaled.
1279 */
1280out_badheader:
1281 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1282 rqst->rq_task->tk_pid, __func__, be32_to_cpu(rep->rr_proc));
1283 r_xprt->rx_stats.bad_reply_count++;
1284 status = -EIO;
1285 goto out;
1286}
1287
Chuck Leverd8f532d2017-10-16 15:01:30 -04001288/* Reply handling runs in the poll worker thread. Anything that
1289 * might wait is deferred to a separate workqueue.
1290 */
1291void rpcrdma_deferred_completion(struct work_struct *work)
1292{
1293 struct rpcrdma_rep *rep =
1294 container_of(work, struct rpcrdma_rep, rr_work);
1295 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1296 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1297
1298 /* Invalidate and unmap the data payloads before waking
1299 * the waiting application. This guarantees the memory
1300 * regions are properly fenced from the server before the
1301 * application accesses the data. It also ensures proper
1302 * send flow control: waking the next RPC waits until this
1303 * RPC has relinquished all its Send Queue entries.
1304 */
1305 rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
1306 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered);
1307
1308 rpcrdma_complete_rqst(rep);
1309}
1310
Chuck Leverfe97b472015-10-24 17:27:10 -04001311/* Process received RPC/RDMA messages.
1312 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001313 * Errors must result in the RPC task either being awakened, or
1314 * allowed to timeout, to discover the errors at that time.
1315 */
Chuck Leverd8f532d2017-10-16 15:01:30 -04001316void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001317{
Chuck Lever431af642017-06-08 11:52:20 -04001318 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Lever431af642017-06-08 11:52:20 -04001319 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Leverbe798f92017-10-16 15:01:39 -04001320 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001321 struct rpcrdma_req *req;
1322 struct rpc_rqst *rqst;
Chuck Leverbe798f92017-10-16 15:01:39 -04001323 u32 credits;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001324 __be32 *p;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001325
Chuck Leverb0e178a2015-10-24 17:26:54 -04001326 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
1327
Chuck Levere2a67192017-08-03 14:30:44 -04001328 if (rep->rr_hdrbuf.head[0].iov_len == 0)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001329 goto out_badstatus;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001330
Chuck Lever5381e0e2017-10-16 15:01:14 -04001331 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
Chuck Lever96f87782017-08-03 14:30:03 -04001332 rep->rr_hdrbuf.head[0].iov_base);
1333
1334 /* Fixed transport header fields */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001335 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
Chuck Lever96f87782017-08-03 14:30:03 -04001336 if (unlikely(!p))
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001337 goto out_shortreply;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001338 rep->rr_xid = *p++;
1339 rep->rr_vers = *p++;
Chuck Leverbe798f92017-10-16 15:01:39 -04001340 credits = be32_to_cpu(*p++);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001341 rep->rr_proc = *p++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001342
Chuck Lever5381e0e2017-10-16 15:01:14 -04001343 if (rep->rr_vers != rpcrdma_version)
Chuck Lever61433af2017-10-16 15:01:06 -04001344 goto out_badversion;
1345
Chuck Lever5381e0e2017-10-16 15:01:14 -04001346 if (rpcrdma_is_bcall(r_xprt, rep))
Chuck Lever41c8f702017-08-03 14:30:11 -04001347 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001348
Chuck Leverfe97b472015-10-24 17:27:10 -04001349 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1350 * get context for handling any incoming chunks.
1351 */
Chuck Lever9590d082017-08-23 17:05:58 -04001352 spin_lock(&xprt->recv_lock);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001353 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
Chuck Lever9590d082017-08-23 17:05:58 -04001354 if (!rqst)
1355 goto out_norqst;
1356 xprt_pin_rqst(rqst);
Chuck Leverbe798f92017-10-16 15:01:39 -04001357
1358 if (credits == 0)
1359 credits = 1; /* don't deadlock */
1360 else if (credits > buf->rb_max_requests)
1361 credits = buf->rb_max_requests;
1362 buf->rb_credits = credits;
1363
Chuck Lever9590d082017-08-23 17:05:58 -04001364 spin_unlock(&xprt->recv_lock);
Chuck Leverbe798f92017-10-16 15:01:39 -04001365
Chuck Lever9590d082017-08-23 17:05:58 -04001366 req = rpcr_to_rdmar(rqst);
Chuck Lever4b196dc62017-06-08 11:51:56 -04001367 req->rl_reply = rep;
Chuck Levere1352c92017-10-16 15:01:22 -04001368 rep->rr_rqst = rqst;
Chuck Lever431af642017-06-08 11:52:20 -04001369
Chuck Leveraf0f16e2016-03-04 11:27:43 -05001370 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
Chuck Lever5381e0e2017-10-16 15:01:14 -04001371 __func__, rep, req, be32_to_cpu(rep->rr_xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001372
Chuck Leverd8f532d2017-10-16 15:01:30 -04001373 if (list_empty(&req->rl_registered))
1374 rpcrdma_complete_rqst(rep);
1375 else
1376 queue_work(rpcrdma_receive_wq, &rep->rr_work);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001377 return;
1378
1379out_badstatus:
1380 rpcrdma_recv_buffer_put(rep);
1381 if (r_xprt->rx_ep.rep_connected == 1) {
1382 r_xprt->rx_ep.rep_connected = -EIO;
1383 rpcrdma_conn_func(&r_xprt->rx_ep);
1384 }
1385 return;
1386
Chuck Lever61433af2017-10-16 15:01:06 -04001387out_badversion:
1388 dprintk("RPC: %s: invalid version %d\n",
Chuck Lever5381e0e2017-10-16 15:01:14 -04001389 __func__, be32_to_cpu(rep->rr_vers));
Chuck Lever61433af2017-10-16 15:01:06 -04001390 goto repost;
1391
Chuck Levere1352c92017-10-16 15:01:22 -04001392/* The RPC transaction has already been terminated, or the header
1393 * is corrupt.
Chuck Lever59aa1f92016-03-04 11:28:18 -05001394 */
Chuck Lever431af642017-06-08 11:52:20 -04001395out_norqst:
Trond Myklebustce7c2522017-08-16 15:30:35 -04001396 spin_unlock(&xprt->recv_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001397 dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
Chuck Lever5381e0e2017-10-16 15:01:14 -04001398 __func__, be32_to_cpu(rep->rr_xid));
Chuck Leverb0e178a2015-10-24 17:26:54 -04001399 goto repost;
1400
Chuck Lever9590d082017-08-23 17:05:58 -04001401out_shortreply:
1402 dprintk("RPC: %s: short/invalid reply\n", __func__);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001403
Chuck Lever431af642017-06-08 11:52:20 -04001404/* If no pending RPC transaction was matched, post a replacement
1405 * receive buffer before returning.
1406 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001407repost:
1408 r_xprt->rx_stats.bad_reply_count++;
Chuck Leverb1573802016-09-15 10:56:35 -04001409 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001410 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001411}