blob: a3f2ab283aeba38b26514dd9eb0e948c71a9ee7e [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
Chuck Lever62b56a62017-10-30 16:22:14 -04002 * Copyright (c) 2014-2017 Oracle. All rights reserved.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04003 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41/*
42 * rpc_rdma.c
43 *
44 * This file contains the guts of the RPC RDMA protocol, and
45 * does marshaling/unmarshaling, etc. It is also where interfacing
46 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040047 */
48
49#include "xprt_rdma.h"
50
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040051#include <linux/highmem.h>
52
Jeff Laytonf895b252014-11-17 16:58:04 -050053#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040054# define RPCDBG_FACILITY RPCDBG_TRANS
55#endif
56
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040057static const char transfertypes[][12] = {
Chuck Lever94f58c52016-05-02 14:41:30 -040058 "inline", /* no chunks */
59 "read list", /* some argument via rdma read */
60 "*read list", /* entire request via rdma read */
61 "write list", /* some result via rdma write */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040062 "reply chunk" /* entire reply via rdma write */
63};
Chuck Lever302d3de2016-05-02 14:41:05 -040064
65/* Returns size of largest RPC-over-RDMA header in a Call message
66 *
Chuck Lever94f58c52016-05-02 14:41:30 -040067 * The largest Call header contains a full-size Read list and a
68 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040069 */
70static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
71{
72 unsigned int size;
73
74 /* Fixed header fields and list discriminators */
75 size = RPCRDMA_HDRLEN_MIN;
76
77 /* Maximum Read list size */
78 maxsegs += 2; /* segment for head and tail buffers */
Chuck Lever2232df52017-10-30 16:21:57 -040079 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -040080
Chuck Lever94f58c52016-05-02 14:41:30 -040081 /* Minimal Read chunk size */
82 size += sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -040083 size += rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever94f58c52016-05-02 14:41:30 -040084 size += sizeof(__be32); /* list discriminator */
85
Chuck Lever302d3de2016-05-02 14:41:05 -040086 dprintk("RPC: %s: max call header size = %u\n",
87 __func__, size);
88 return size;
89}
90
91/* Returns size of largest RPC-over-RDMA header in a Reply message
92 *
93 * There is only one Write list or one Reply chunk per Reply
94 * message. The larger list is the Write list.
95 */
96static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
97{
98 unsigned int size;
99
100 /* Fixed header fields and list discriminators */
101 size = RPCRDMA_HDRLEN_MIN;
102
103 /* Maximum Write list size */
104 maxsegs += 2; /* segment for head and tail buffers */
105 size = sizeof(__be32); /* segment count */
Chuck Lever2232df52017-10-30 16:21:57 -0400106 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
Chuck Lever302d3de2016-05-02 14:41:05 -0400107 size += sizeof(__be32); /* list discriminator */
108
109 dprintk("RPC: %s: max reply header size = %u\n",
110 __func__, size);
111 return size;
112}
113
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400114void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400115{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400116 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
117 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
118 unsigned int maxsegs = ia->ri_max_segs;
119
Chuck Lever302d3de2016-05-02 14:41:05 -0400120 ia->ri_max_inline_write = cdata->inline_wsize -
121 rpcrdma_max_call_header_size(maxsegs);
122 ia->ri_max_inline_read = cdata->inline_rsize -
123 rpcrdma_max_reply_header_size(maxsegs);
124}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400125
Chuck Lever5457ced2015-08-03 13:03:49 -0400126/* The client can send a request inline as long as the RPCRDMA header
127 * plus the RPC call fit under the transport's inline limit. If the
128 * combined call message size exceeds that limit, the client must use
Chuck Lever16f906d2017-02-08 17:00:10 -0500129 * a Read chunk for this operation.
130 *
131 * A Read chunk is also required if sending the RPC call inline would
132 * exceed this device's max_sge limit.
Chuck Lever5457ced2015-08-03 13:03:49 -0400133 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400134static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
135 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400136{
Chuck Lever16f906d2017-02-08 17:00:10 -0500137 struct xdr_buf *xdr = &rqst->rq_snd_buf;
138 unsigned int count, remaining, offset;
Chuck Lever5457ced2015-08-03 13:03:49 -0400139
Chuck Lever16f906d2017-02-08 17:00:10 -0500140 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
141 return false;
142
143 if (xdr->page_len) {
144 remaining = xdr->page_len;
Chuck Leverd933cc32017-06-08 11:53:16 -0400145 offset = offset_in_page(xdr->page_base);
Chuck Lever16f906d2017-02-08 17:00:10 -0500146 count = 0;
147 while (remaining) {
148 remaining -= min_t(unsigned int,
149 PAGE_SIZE - offset, remaining);
150 offset = 0;
151 if (++count > r_xprt->rx_ia.ri_max_send_sges)
152 return false;
153 }
154 }
155
156 return true;
Chuck Lever5457ced2015-08-03 13:03:49 -0400157}
158
159/* The client can't know how large the actual reply will be. Thus it
160 * plans for the largest possible reply for that particular ULP
161 * operation. If the maximum combined reply message size exceeds that
162 * limit, the client must provide a write list or a reply chunk for
163 * this request.
164 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400165static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
166 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400167{
Chuck Lever302d3de2016-05-02 14:41:05 -0400168 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400169
Chuck Lever302d3de2016-05-02 14:41:05 -0400170 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400171}
172
Chuck Lever28d9d562017-08-14 15:38:22 -0400173/* Split @vec on page boundaries into SGEs. FMR registers pages, not
174 * a byte range. Other modes coalesce these SGEs into a single MR
175 * when they can.
176 *
177 * Returns pointer to next available SGE, and bumps the total number
178 * of SGEs consumed.
Chuck Lever821c7912016-03-04 11:27:52 -0500179 */
Chuck Lever28d9d562017-08-14 15:38:22 -0400180static struct rpcrdma_mr_seg *
181rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
182 unsigned int *n)
Chuck Lever821c7912016-03-04 11:27:52 -0500183{
Chuck Lever28d9d562017-08-14 15:38:22 -0400184 u32 remaining, page_offset;
Chuck Lever821c7912016-03-04 11:27:52 -0500185 char *base;
186
187 base = vec->iov_base;
188 page_offset = offset_in_page(base);
189 remaining = vec->iov_len;
Chuck Lever28d9d562017-08-14 15:38:22 -0400190 while (remaining) {
191 seg->mr_page = NULL;
192 seg->mr_offset = base;
193 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
194 remaining -= seg->mr_len;
195 base += seg->mr_len;
196 ++seg;
197 ++(*n);
Chuck Lever821c7912016-03-04 11:27:52 -0500198 page_offset = 0;
199 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400200 return seg;
Chuck Lever821c7912016-03-04 11:27:52 -0500201}
202
Chuck Lever28d9d562017-08-14 15:38:22 -0400203/* Convert @xdrbuf into SGEs no larger than a page each. As they
204 * are registered, these SGEs are then coalesced into RDMA segments
205 * when the selected memreg mode supports it.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400206 *
Chuck Lever28d9d562017-08-14 15:38:22 -0400207 * Returns positive number of SGEs consumed, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400208 */
209
210static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500211rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
212 unsigned int pos, enum rpcrdma_chunktype type,
213 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400214{
Chuck Lever28d9d562017-08-14 15:38:22 -0400215 unsigned long page_base;
216 unsigned int len, n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000217 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400218
Chuck Lever5ab81422016-06-29 13:54:25 -0400219 n = 0;
Chuck Lever28d9d562017-08-14 15:38:22 -0400220 if (pos == 0)
221 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400222
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000223 len = xdrbuf->page_len;
224 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400225 page_base = offset_in_page(xdrbuf->page_base);
Chuck Lever28d9d562017-08-14 15:38:22 -0400226 while (len) {
227 if (unlikely(!*ppages)) {
228 /* XXX: Certain upper layer operations do
229 * not provide receive buffer pages.
230 */
231 *ppages = alloc_page(GFP_ATOMIC);
232 if (!*ppages)
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400233 return -EAGAIN;
Shirley Ma196c6992014-05-28 10:34:24 -0400234 }
Chuck Lever28d9d562017-08-14 15:38:22 -0400235 seg->mr_page = *ppages;
236 seg->mr_offset = (char *)page_base;
237 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
238 len -= seg->mr_len;
239 ++ppages;
240 ++seg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400241 ++n;
Chuck Lever28d9d562017-08-14 15:38:22 -0400242 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400243 }
244
Chuck Lever24abdf12017-02-08 16:59:46 -0500245 /* When encoding a Read chunk, the tail iovec contains an
246 * XDR pad and may be omitted.
247 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500248 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400249 goto out;
Chuck Lever677eb172015-08-03 13:04:17 -0400250
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500251 /* When encoding a Write chunk, some servers need to see an
252 * extra segment for non-XDR-aligned Write chunks. The upper
253 * layer provides space in the tail iovec that may be used
254 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400255 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500256 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever28d9d562017-08-14 15:38:22 -0400257 goto out;
Chuck Leverc8b920b2016-09-15 10:57:16 -0400258
Chuck Lever28d9d562017-08-14 15:38:22 -0400259 if (xdrbuf->tail[0].iov_len)
260 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400261
Chuck Lever28d9d562017-08-14 15:38:22 -0400262out:
263 if (unlikely(n > RPCRDMA_MAX_SEGS))
264 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400265 return n;
266}
267
Chuck Lever39f4cd92017-08-10 12:47:36 -0400268static inline int
269encode_item_present(struct xdr_stream *xdr)
270{
271 __be32 *p;
272
273 p = xdr_reserve_space(xdr, sizeof(*p));
274 if (unlikely(!p))
275 return -EMSGSIZE;
276
277 *p = xdr_one;
278 return 0;
279}
280
281static inline int
282encode_item_not_present(struct xdr_stream *xdr)
283{
284 __be32 *p;
285
286 p = xdr_reserve_space(xdr, sizeof(*p));
287 if (unlikely(!p))
288 return -EMSGSIZE;
289
290 *p = xdr_zero;
291 return 0;
292}
293
294static void
Chuck Lever9d6b0402016-06-29 13:54:16 -0400295xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
Chuck Lever94f58c52016-05-02 14:41:30 -0400296{
Chuck Lever9d6b0402016-06-29 13:54:16 -0400297 *iptr++ = cpu_to_be32(mw->mw_handle);
298 *iptr++ = cpu_to_be32(mw->mw_length);
Chuck Lever39f4cd92017-08-10 12:47:36 -0400299 xdr_encode_hyper(iptr, mw->mw_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400300}
301
Chuck Lever39f4cd92017-08-10 12:47:36 -0400302static int
303encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
304{
305 __be32 *p;
306
307 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
308 if (unlikely(!p))
309 return -EMSGSIZE;
310
311 xdr_encode_rdma_segment(p, mw);
312 return 0;
313}
314
315static int
316encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
317 u32 position)
318{
319 __be32 *p;
320
321 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
322 if (unlikely(!p))
323 return -EMSGSIZE;
324
325 *p++ = xdr_one; /* Item present */
326 *p++ = cpu_to_be32(position);
327 xdr_encode_rdma_segment(p, mw);
328 return 0;
329}
330
331/* Register and XDR encode the Read list. Supports encoding a list of read
Chuck Lever94f58c52016-05-02 14:41:30 -0400332 * segments that belong to a single read chunk.
333 *
334 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
335 *
336 * Read chunklist (a linked list):
337 * N elements, position P (same P for all chunks of same arg!):
338 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
339 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400340 * Returns zero on success, or a negative errno if a failure occurred.
341 * @xdr is advanced to the next position in the stream.
342 *
343 * Only a single @pos value is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400344 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400345static noinline int
346rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
347 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400348{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400349 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400350 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400351 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400352 unsigned int pos;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400353 int nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400354
355 pos = rqst->rq_snd_buf.head[0].iov_len;
356 if (rtype == rpcrdma_areadch)
357 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400358 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500359 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
360 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400361 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400362 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400363
364 do {
Chuck Lever6748b0c2017-08-14 15:38:30 -0400365 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
366 false, &mw);
367 if (IS_ERR(seg))
368 return PTR_ERR(seg);
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500369 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400370
Chuck Lever39f4cd92017-08-10 12:47:36 -0400371 if (encode_read_segment(xdr, mw, pos) < 0)
372 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400373
Chuck Lever9d6b0402016-06-29 13:54:16 -0400374 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400375 rqst->rq_task->tk_pid, __func__, pos,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400376 mw->mw_length, (unsigned long long)mw->mw_offset,
Chuck Lever6748b0c2017-08-14 15:38:30 -0400377 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400378
379 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400380 nsegs -= mw->mw_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400381 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400382
Chuck Lever39f4cd92017-08-10 12:47:36 -0400383 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400384}
385
Chuck Lever39f4cd92017-08-10 12:47:36 -0400386/* Register and XDR encode the Write list. Supports encoding a list
387 * containing one array of plain segments that belong to a single
388 * write chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400389 *
390 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
391 *
392 * Write chunklist (a list of (one) counted array):
393 * N elements:
394 * 1 - N - HLOO - HLOO - ... - HLOO - 0
395 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400396 * Returns zero on success, or a negative errno if a failure occurred.
397 * @xdr is advanced to the next position in the stream.
398 *
399 * Only a single Write chunk is currently supported.
Chuck Lever94f58c52016-05-02 14:41:30 -0400400 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400401static noinline int
Chuck Lever94f58c52016-05-02 14:41:30 -0400402rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
Chuck Lever39f4cd92017-08-10 12:47:36 -0400403 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400404{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400405 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400406 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400407 struct rpcrdma_mw *mw;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400408 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400409 __be32 *segcount;
410
Chuck Lever5ab81422016-06-29 13:54:25 -0400411 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500412 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400413 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500414 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400415 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400416 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400417
Chuck Lever39f4cd92017-08-10 12:47:36 -0400418 if (encode_item_present(xdr) < 0)
419 return -EMSGSIZE;
420 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
421 if (unlikely(!segcount))
422 return -EMSGSIZE;
423 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400424
425 nchunks = 0;
426 do {
Chuck Lever6748b0c2017-08-14 15:38:30 -0400427 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
428 true, &mw);
429 if (IS_ERR(seg))
430 return PTR_ERR(seg);
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500431 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400432
Chuck Lever39f4cd92017-08-10 12:47:36 -0400433 if (encode_rdma_segment(xdr, mw) < 0)
434 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400435
Chuck Lever9d6b0402016-06-29 13:54:16 -0400436 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400437 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400438 mw->mw_length, (unsigned long long)mw->mw_offset,
Chuck Lever6748b0c2017-08-14 15:38:30 -0400439 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400440
441 r_xprt->rx_stats.write_chunk_count++;
442 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400443 nchunks++;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400444 nsegs -= mw->mw_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400445 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400446
447 /* Update count of segments in this Write chunk */
448 *segcount = cpu_to_be32(nchunks);
449
Chuck Lever39f4cd92017-08-10 12:47:36 -0400450 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400451}
452
Chuck Lever39f4cd92017-08-10 12:47:36 -0400453/* Register and XDR encode the Reply chunk. Supports encoding an array
454 * of plain segments that belong to a single write (reply) chunk.
Chuck Lever94f58c52016-05-02 14:41:30 -0400455 *
456 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
457 *
458 * Reply chunk (a counted array):
459 * N elements:
460 * 1 - N - HLOO - HLOO - ... - HLOO
461 *
Chuck Lever39f4cd92017-08-10 12:47:36 -0400462 * Returns zero on success, or a negative errno if a failure occurred.
463 * @xdr is advanced to the next position in the stream.
Chuck Lever94f58c52016-05-02 14:41:30 -0400464 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400465static noinline int
466rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
467 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
Chuck Lever94f58c52016-05-02 14:41:30 -0400468{
Chuck Lever39f4cd92017-08-10 12:47:36 -0400469 struct xdr_stream *xdr = &req->rl_stream;
Chuck Lever5ab81422016-06-29 13:54:25 -0400470 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400471 struct rpcrdma_mw *mw;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400472 int nsegs, nchunks;
Chuck Lever94f58c52016-05-02 14:41:30 -0400473 __be32 *segcount;
474
Chuck Lever5ab81422016-06-29 13:54:25 -0400475 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500476 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400477 if (nsegs < 0)
Chuck Lever39f4cd92017-08-10 12:47:36 -0400478 return nsegs;
Chuck Lever94f58c52016-05-02 14:41:30 -0400479
Chuck Lever39f4cd92017-08-10 12:47:36 -0400480 if (encode_item_present(xdr) < 0)
481 return -EMSGSIZE;
482 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
483 if (unlikely(!segcount))
484 return -EMSGSIZE;
485 /* Actual value encoded below */
Chuck Lever94f58c52016-05-02 14:41:30 -0400486
487 nchunks = 0;
488 do {
Chuck Lever6748b0c2017-08-14 15:38:30 -0400489 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
490 true, &mw);
491 if (IS_ERR(seg))
492 return PTR_ERR(seg);
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500493 rpcrdma_push_mw(mw, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400494
Chuck Lever39f4cd92017-08-10 12:47:36 -0400495 if (encode_rdma_segment(xdr, mw) < 0)
496 return -EMSGSIZE;
Chuck Lever94f58c52016-05-02 14:41:30 -0400497
Chuck Lever9d6b0402016-06-29 13:54:16 -0400498 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400499 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400500 mw->mw_length, (unsigned long long)mw->mw_offset,
Chuck Lever6748b0c2017-08-14 15:38:30 -0400501 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400502
503 r_xprt->rx_stats.reply_chunk_count++;
504 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400505 nchunks++;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400506 nsegs -= mw->mw_nents;
Chuck Lever94f58c52016-05-02 14:41:30 -0400507 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400508
509 /* Update count of segments in the Reply chunk */
510 *segcount = cpu_to_be32(nchunks);
511
Chuck Lever39f4cd92017-08-10 12:47:36 -0400512 return 0;
Chuck Lever94f58c52016-05-02 14:41:30 -0400513}
514
Chuck Lever394b2c72017-10-20 10:47:47 -0400515/**
Chuck Leverae729502017-10-20 10:48:12 -0400516 * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
517 * @sc: sendctx containing SGEs to unmap
Chuck Lever394b2c72017-10-20 10:47:47 -0400518 *
519 */
520void
Chuck Leverae729502017-10-20 10:48:12 -0400521rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
Chuck Lever394b2c72017-10-20 10:47:47 -0400522{
Chuck Leverae729502017-10-20 10:48:12 -0400523 struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
Chuck Lever394b2c72017-10-20 10:47:47 -0400524 struct ib_sge *sge;
525 unsigned int count;
526
Chuck Leverae729502017-10-20 10:48:12 -0400527 dprintk("RPC: %s: unmapping %u sges for sc=%p\n",
528 __func__, sc->sc_unmap_count, sc);
529
Chuck Lever394b2c72017-10-20 10:47:47 -0400530 /* The first two SGEs contain the transport header and
531 * the inline buffer. These are always left mapped so
532 * they can be cheaply re-used.
533 */
Chuck Leverae729502017-10-20 10:48:12 -0400534 sge = &sc->sc_sges[2];
535 for (count = sc->sc_unmap_count; count; ++sge, --count)
Chuck Lever394b2c72017-10-20 10:47:47 -0400536 ib_dma_unmap_page(ia->ri_device,
537 sge->addr, sge->length, DMA_TO_DEVICE);
Chuck Lever01bb35c2017-10-20 10:48:36 -0400538
539 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
540 smp_mb__after_atomic();
541 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
542 }
Chuck Lever394b2c72017-10-20 10:47:47 -0400543}
544
Chuck Levera062a2a2017-10-20 10:48:03 -0400545/* Prepare an SGE for the RPC-over-RDMA transport header.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400546 */
Chuck Lever655fec62016-09-15 10:57:24 -0400547static bool
548rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
549 u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400550{
Chuck Leverae729502017-10-20 10:48:12 -0400551 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400552 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
Chuck Leverae729502017-10-20 10:48:12 -0400553 struct ib_sge *sge = sc->sc_sges;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400554
Chuck Levera062a2a2017-10-20 10:48:03 -0400555 if (!rpcrdma_dma_map_regbuf(ia, rb))
556 goto out_regbuf;
557 sge->addr = rdmab_addr(rb);
Chuck Lever655fec62016-09-15 10:57:24 -0400558 sge->length = len;
Chuck Levera062a2a2017-10-20 10:48:03 -0400559 sge->lkey = rdmab_lkey(rb);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400560
Chuck Lever91a10c52017-04-11 13:23:02 -0400561 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400562 sge->length, DMA_TO_DEVICE);
Chuck Leverae729502017-10-20 10:48:12 -0400563 sc->sc_wr.num_sge++;
Chuck Lever655fec62016-09-15 10:57:24 -0400564 return true;
Chuck Lever857f9ac2017-10-20 10:47:55 -0400565
566out_regbuf:
567 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
568 return false;
Chuck Lever655fec62016-09-15 10:57:24 -0400569}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400570
Chuck Lever655fec62016-09-15 10:57:24 -0400571/* Prepare the Send SGEs. The head and tail iovec, and each entry
572 * in the page list, gets its own SGE.
573 */
574static bool
575rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
576 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
577{
Chuck Leverae729502017-10-20 10:48:12 -0400578 struct rpcrdma_sendctx *sc = req->rl_sendctx;
Chuck Lever655fec62016-09-15 10:57:24 -0400579 unsigned int sge_no, page_base, len, remaining;
580 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
581 struct ib_device *device = ia->ri_device;
Chuck Leverae729502017-10-20 10:48:12 -0400582 struct ib_sge *sge = sc->sc_sges;
Chuck Lever655fec62016-09-15 10:57:24 -0400583 u32 lkey = ia->ri_pd->local_dma_lkey;
584 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400585
Chuck Lever655fec62016-09-15 10:57:24 -0400586 /* The head iovec is straightforward, as it is already
587 * DMA-mapped. Sync the content that has changed.
588 */
589 if (!rpcrdma_dma_map_regbuf(ia, rb))
Chuck Lever857f9ac2017-10-20 10:47:55 -0400590 goto out_regbuf;
Chuck Lever655fec62016-09-15 10:57:24 -0400591 sge_no = 1;
592 sge[sge_no].addr = rdmab_addr(rb);
593 sge[sge_no].length = xdr->head[0].iov_len;
594 sge[sge_no].lkey = rdmab_lkey(rb);
Chuck Lever91a10c52017-04-11 13:23:02 -0400595 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
Chuck Lever655fec62016-09-15 10:57:24 -0400596 sge[sge_no].length, DMA_TO_DEVICE);
597
598 /* If there is a Read chunk, the page list is being handled
599 * via explicit RDMA, and thus is skipped here. However, the
600 * tail iovec may include an XDR pad for the page list, as
601 * well as additional content, and may not reside in the
602 * same page as the head iovec.
603 */
604 if (rtype == rpcrdma_readch) {
605 len = xdr->tail[0].iov_len;
606
607 /* Do not include the tail if it is only an XDR pad */
608 if (len < 4)
609 goto out;
610
611 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400612 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400613
614 /* If the content in the page list is an odd length,
615 * xdr_write_pages() has added a pad at the beginning
616 * of the tail iovec. Force the tail's non-pad content
617 * to land at the next XDR position in the Send message.
618 */
619 page_base += len & 3;
620 len -= len & 3;
621 goto map_tail;
622 }
623
624 /* If there is a page list present, temporarily DMA map
625 * and prepare an SGE for each page to be sent.
626 */
627 if (xdr->page_len) {
628 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
Chuck Leverd933cc32017-06-08 11:53:16 -0400629 page_base = offset_in_page(xdr->page_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400630 remaining = xdr->page_len;
631 while (remaining) {
632 sge_no++;
633 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
634 goto out_mapping_overflow;
635
636 len = min_t(u32, PAGE_SIZE - page_base, remaining);
637 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
638 page_base, len,
639 DMA_TO_DEVICE);
640 if (ib_dma_mapping_error(device, sge[sge_no].addr))
641 goto out_mapping_err;
642 sge[sge_no].length = len;
643 sge[sge_no].lkey = lkey;
644
Chuck Leverae729502017-10-20 10:48:12 -0400645 sc->sc_unmap_count++;
Chuck Lever655fec62016-09-15 10:57:24 -0400646 ppages++;
647 remaining -= len;
648 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400649 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400650 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000651
Chuck Lever655fec62016-09-15 10:57:24 -0400652 /* The tail iovec is not always constructed in the same
653 * page where the head iovec resides (see, for example,
654 * gss_wrap_req_priv). To neatly accommodate that case,
655 * DMA map it separately.
656 */
657 if (xdr->tail[0].iov_len) {
658 page = virt_to_page(xdr->tail[0].iov_base);
Chuck Leverd933cc32017-06-08 11:53:16 -0400659 page_base = offset_in_page(xdr->tail[0].iov_base);
Chuck Lever655fec62016-09-15 10:57:24 -0400660 len = xdr->tail[0].iov_len;
661
662map_tail:
663 sge_no++;
664 sge[sge_no].addr = ib_dma_map_page(device, page,
665 page_base, len,
666 DMA_TO_DEVICE);
667 if (ib_dma_mapping_error(device, sge[sge_no].addr))
668 goto out_mapping_err;
669 sge[sge_no].length = len;
670 sge[sge_no].lkey = lkey;
Chuck Leverae729502017-10-20 10:48:12 -0400671 sc->sc_unmap_count++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400672 }
Chuck Lever655fec62016-09-15 10:57:24 -0400673
674out:
Chuck Leverae729502017-10-20 10:48:12 -0400675 sc->sc_wr.num_sge += sge_no;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400676 if (sc->sc_unmap_count)
677 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
Chuck Lever655fec62016-09-15 10:57:24 -0400678 return true;
679
Chuck Lever857f9ac2017-10-20 10:47:55 -0400680out_regbuf:
681 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
682 return false;
683
Chuck Lever655fec62016-09-15 10:57:24 -0400684out_mapping_overflow:
Chuck Leverae729502017-10-20 10:48:12 -0400685 rpcrdma_unmap_sendctx(sc);
Chuck Lever655fec62016-09-15 10:57:24 -0400686 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
687 return false;
688
689out_mapping_err:
Chuck Leverae729502017-10-20 10:48:12 -0400690 rpcrdma_unmap_sendctx(sc);
Chuck Lever655fec62016-09-15 10:57:24 -0400691 pr_err("rpcrdma: Send mapping error\n");
692 return false;
693}
694
Chuck Lever857f9ac2017-10-20 10:47:55 -0400695/**
696 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
697 * @r_xprt: controlling transport
698 * @req: context of RPC Call being marshalled
699 * @hdrlen: size of transport header, in bytes
700 * @xdr: xdr_buf containing RPC Call
701 * @rtype: chunk type being encoded
702 *
703 * Returns 0 on success; otherwise a negative errno is returned.
704 */
705int
706rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
707 struct rpcrdma_req *req, u32 hdrlen,
708 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
Chuck Lever655fec62016-09-15 10:57:24 -0400709{
Chuck Leverae729502017-10-20 10:48:12 -0400710 req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
711 if (!req->rl_sendctx)
712 return -ENOBUFS;
713 req->rl_sendctx->sc_wr.num_sge = 0;
714 req->rl_sendctx->sc_unmap_count = 0;
Chuck Lever01bb35c2017-10-20 10:48:36 -0400715 req->rl_sendctx->sc_req = req;
716 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
Chuck Lever655fec62016-09-15 10:57:24 -0400717
Chuck Lever857f9ac2017-10-20 10:47:55 -0400718 if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
719 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400720
721 if (rtype != rpcrdma_areadch)
Chuck Lever857f9ac2017-10-20 10:47:55 -0400722 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
723 return -EIO;
Chuck Lever655fec62016-09-15 10:57:24 -0400724
Chuck Lever857f9ac2017-10-20 10:47:55 -0400725 return 0;
Chuck Lever655fec62016-09-15 10:57:24 -0400726}
727
Chuck Lever09e60642017-08-10 12:47:12 -0400728/**
729 * rpcrdma_marshal_req - Marshal and send one RPC request
730 * @r_xprt: controlling transport
731 * @rqst: RPC request to be marshaled
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400732 *
Chuck Lever09e60642017-08-10 12:47:12 -0400733 * For the RPC in "rqst", this function:
734 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
735 * - Registers Read, Write, and Reply chunks
736 * - Constructs the transport header
737 * - Posts a Send WR to send the transport header and request
738 *
739 * Returns:
740 * %0 if the RPC was sent successfully,
741 * %-ENOTCONN if the connection was lost,
742 * %-EAGAIN if not enough pages are available for on-demand reply buffer,
743 * %-ENOBUFS if no MRs are available to register chunks,
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400744 * %-EMSGSIZE if the transport header is too small,
Chuck Lever09e60642017-08-10 12:47:12 -0400745 * %-EIO if a permanent problem occurred while marshaling.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400746 */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400747int
Chuck Lever09e60642017-08-10 12:47:12 -0400748rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400749{
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400750 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400751 struct xdr_stream *xdr = &req->rl_stream;
Chuck Levere2377942015-03-30 14:33:53 -0400752 enum rpcrdma_chunktype rtype, wtype;
Chuck Lever65b80172016-06-29 13:55:06 -0400753 bool ddp_allowed;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400754 __be32 *p;
Chuck Lever39f4cd92017-08-10 12:47:36 -0400755 int ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400756
Chuck Lever83128a62015-10-24 17:27:59 -0400757#if defined(CONFIG_SUNRPC_BACKCHANNEL)
758 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
759 return rpcrdma_bc_marshal_reply(rqst);
760#endif
761
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400762 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
763 xdr_init_encode(xdr, &req->rl_hdrbuf,
764 req->rl_rdmabuf->rg_base);
765
766 /* Fixed header fields */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400767 ret = -EMSGSIZE;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400768 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
769 if (!p)
770 goto out_err;
771 *p++ = rqst->rq_xid;
772 *p++ = rpcrdma_version;
773 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400774
Chuck Lever65b80172016-06-29 13:55:06 -0400775 /* When the ULP employs a GSS flavor that guarantees integrity
776 * or privacy, direct data placement of individual data items
777 * is not allowed.
778 */
779 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
780 RPCAUTH_AUTH_DATATOUCH);
781
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400782 /*
783 * Chunks needed for results?
784 *
785 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400786 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400787 * o Large read ops return data as write chunk(s), header as
788 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400789 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400790 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400791 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400792 wtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400793 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levercce6dee2016-05-02 14:41:14 -0400794 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400795 else
Chuck Levere2377942015-03-30 14:33:53 -0400796 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400797
798 /*
799 * Chunks needed for arguments?
800 *
801 * o If the total request is under the inline threshold, all ops
802 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400803 * o Large write ops transmit data as read chunk(s), header as
804 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400805 * o Large non-write ops are sent with the entire message as a
806 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400807 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400808 * This assumes that the upper layer does not present a request
809 * that both has a data payload, and whose non-data arguments
810 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400811 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400812 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400813 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400814 rtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400815 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400816 *p++ = rdma_msg;
Chuck Levere2377942015-03-30 14:33:53 -0400817 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400818 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400819 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever7a80f3f2017-08-10 12:47:28 -0400820 *p++ = rdma_nomsg;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400821 rtype = rpcrdma_areadch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400822 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400823
Chuck Lever94f58c52016-05-02 14:41:30 -0400824 /* This implementation supports the following combinations
825 * of chunk lists in one RPC-over-RDMA Call message:
826 *
827 * - Read list
828 * - Write list
829 * - Reply chunk
830 * - Read list + Reply chunk
831 *
832 * It might not yet support the following combinations:
833 *
834 * - Read list + Write list
835 *
836 * It does not support the following combinations:
837 *
838 * - Write list + Reply chunk
839 * - Read list + Write list + Reply chunk
840 *
841 * This implementation supports only a single chunk in each
842 * Read or Write list. Thus for example the client cannot
843 * send a Call message with a Position Zero Read chunk and a
844 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400845 */
Chuck Lever39f4cd92017-08-10 12:47:36 -0400846 if (rtype != rpcrdma_noch) {
847 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
848 if (ret)
849 goto out_err;
850 }
851 ret = encode_item_not_present(xdr);
852 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500853 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400854
Chuck Lever39f4cd92017-08-10 12:47:36 -0400855 if (wtype == rpcrdma_writech) {
856 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
857 if (ret)
858 goto out_err;
859 }
860 ret = encode_item_not_present(xdr);
861 if (ret)
862 goto out_err;
863
864 if (wtype != rpcrdma_replych)
865 ret = encode_item_not_present(xdr);
866 else
867 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
868 if (ret)
869 goto out_err;
870
871 dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400872 rqst->rq_task->tk_pid, __func__,
873 transfertypes[rtype], transfertypes[wtype],
Chuck Lever39f4cd92017-08-10 12:47:36 -0400874 xdr_stream_pos(xdr));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400875
Chuck Lever857f9ac2017-10-20 10:47:55 -0400876 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
877 &rqst->rq_snd_buf, rtype);
878 if (ret)
Chuck Lever18c0fb32017-02-08 17:00:27 -0500879 goto out_err;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400880 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400881
Chuck Lever18c0fb32017-02-08 17:00:27 -0500882out_err:
Chuck Lever39f4cd92017-08-10 12:47:36 -0400883 if (ret != -ENOBUFS) {
884 pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
Chuck Lever0031e472017-04-11 13:23:51 -0400885 r_xprt->rx_stats.failed_marshal_count++;
886 }
Chuck Lever39f4cd92017-08-10 12:47:36 -0400887 return ret;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400888}
889
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400890/**
891 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
892 * @rqst: controlling RPC request
893 * @srcp: points to RPC message payload in receive buffer
894 * @copy_len: remaining length of receive buffer content
895 * @pad: Write chunk pad bytes needed (zero for pure inline)
896 *
897 * The upper layer has set the maximum number of bytes it can
898 * receive in each component of rq_rcv_buf. These values are set in
899 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400900 *
901 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
902 * many cases this function simply updates iov_base pointers in
903 * rq_rcv_buf to point directly to the received reply data, to
904 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400905 *
906 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400907 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400908static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400909rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400910{
Chuck Lever64695bde2016-06-29 13:54:58 -0400911 unsigned long fixup_copy_count;
912 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400913 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000914 struct page **ppages;
915 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400916
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400917 /* The head iovec is redirected to the RPC reply message
918 * in the receive buffer, to avoid a memcopy.
919 */
920 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400921 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400922
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400923 /* The contents of the receive buffer that follow
924 * head.iov_len bytes are copied into the page list.
925 */
926 curlen = rqst->rq_rcv_buf.head[0].iov_len;
927 if (curlen > copy_len)
928 curlen = copy_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400929 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
930 __func__, srcp, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400931 srcp += curlen;
932 copy_len -= curlen;
933
Chuck Leverd933cc32017-06-08 11:53:16 -0400934 ppages = rqst->rq_rcv_buf.pages +
935 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
936 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
Chuck Lever64695bde2016-06-29 13:54:58 -0400937 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400938 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400939 int pagelist_len;
940
941 pagelist_len = rqst->rq_rcv_buf.page_len;
942 if (pagelist_len > copy_len)
943 pagelist_len = copy_len;
944 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400945 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000946 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400947 if (curlen > pagelist_len)
948 curlen = pagelist_len;
949
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400950 dprintk("RPC: %s: page %d"
951 " srcp 0x%p len %d curlen %d\n",
952 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800953 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000954 memcpy(destp + page_base, srcp, curlen);
955 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800956 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400957 srcp += curlen;
958 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400959 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400960 pagelist_len -= curlen;
961 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400962 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000963 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400964 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400965
966 /* Implicit padding for the last segment in a Write
967 * chunk is inserted inline at the front of the tail
968 * iovec. The upper layer ignores the content of
969 * the pad. Simply ensure inline content in the tail
970 * that follows the Write chunk is properly aligned.
971 */
972 if (pad)
973 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400974 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400975
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400976 /* The tail iovec is redirected to the remaining data
977 * in the receive buffer, to avoid a memcopy.
978 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400979 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400980 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400981 rqst->rq_private_buf.tail[0].iov_base = srcp;
982 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400983
Chuck Lever64695bde2016-06-29 13:54:58 -0400984 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400985}
986
Chuck Lever4b196dc62017-06-08 11:51:56 -0400987/* Caller must guarantee @rep remains stable during this call.
988 */
989static void
990rpcrdma_mark_remote_invalidation(struct list_head *mws,
991 struct rpcrdma_rep *rep)
992{
993 struct rpcrdma_mw *mw;
994
995 if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
996 return;
997
998 list_for_each_entry(mw, mws, mw_list)
999 if (mw->mw_handle == rep->rr_inv_rkey) {
1000 mw->mw_flags = RPCRDMA_MW_F_RI;
1001 break; /* only one invalidated MR per RPC */
1002 }
1003}
1004
Chuck Lever63cae472015-10-24 17:28:08 -04001005/* By convention, backchannel calls arrive via rdma_msg type
1006 * messages, and never populate the chunk lists. This makes
1007 * the RPC/RDMA header small and fixed in size, so it is
1008 * straightforward to check the RPC header's direction field.
1009 */
1010static bool
Chuck Lever5381e0e2017-10-16 15:01:14 -04001011rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
Chuck Lever41c8f702017-08-03 14:30:11 -04001012#if defined(CONFIG_SUNRPC_BACKCHANNEL)
Chuck Lever63cae472015-10-24 17:28:08 -04001013{
Chuck Lever41c8f702017-08-03 14:30:11 -04001014 struct xdr_stream *xdr = &rep->rr_stream;
1015 __be32 *p;
Chuck Lever63cae472015-10-24 17:28:08 -04001016
Chuck Lever5381e0e2017-10-16 15:01:14 -04001017 if (rep->rr_proc != rdma_msg)
Chuck Lever63cae472015-10-24 17:28:08 -04001018 return false;
1019
Chuck Lever41c8f702017-08-03 14:30:11 -04001020 /* Peek at stream contents without advancing. */
1021 p = xdr_inline_decode(xdr, 0);
1022
1023 /* Chunk lists */
1024 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001025 return false;
Chuck Lever41c8f702017-08-03 14:30:11 -04001026 if (*p++ != xdr_zero)
1027 return false;
1028 if (*p++ != xdr_zero)
Chuck Lever63cae472015-10-24 17:28:08 -04001029 return false;
1030
Chuck Lever41c8f702017-08-03 14:30:11 -04001031 /* RPC header */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001032 if (*p++ != rep->rr_xid)
Chuck Lever41c8f702017-08-03 14:30:11 -04001033 return false;
1034 if (*p != cpu_to_be32(RPC_CALL))
1035 return false;
1036
1037 /* Now that we are sure this is a backchannel call,
1038 * advance to the RPC header.
1039 */
1040 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1041 if (unlikely(!p))
1042 goto out_short;
1043
1044 rpcrdma_bc_receive_call(r_xprt, rep);
Chuck Lever63cae472015-10-24 17:28:08 -04001045 return true;
Chuck Lever41c8f702017-08-03 14:30:11 -04001046
1047out_short:
1048 pr_warn("RPC/RDMA short backward direction call\n");
1049 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1050 xprt_disconnect_done(&r_xprt->rx_xprt);
Chuck Lever63cae472015-10-24 17:28:08 -04001051 return true;
1052}
Chuck Lever41c8f702017-08-03 14:30:11 -04001053#else /* CONFIG_SUNRPC_BACKCHANNEL */
1054{
1055 return false;
Chuck Lever63cae472015-10-24 17:28:08 -04001056}
1057#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1058
Chuck Lever264b0cd2017-08-03 14:30:27 -04001059static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1060{
1061 __be32 *p;
1062
1063 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1064 if (unlikely(!p))
1065 return -EIO;
1066
1067 ifdebug(FACILITY) {
1068 u64 offset;
1069 u32 handle;
1070
1071 handle = be32_to_cpup(p++);
1072 *length = be32_to_cpup(p++);
1073 xdr_decode_hyper(p, &offset);
1074 dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
1075 __func__, *length, (unsigned long long)offset,
1076 handle);
1077 } else {
1078 *length = be32_to_cpup(p + 1);
1079 }
1080
1081 return 0;
1082}
1083
1084static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1085{
1086 u32 segcount, seglength;
1087 __be32 *p;
1088
1089 p = xdr_inline_decode(xdr, sizeof(*p));
1090 if (unlikely(!p))
1091 return -EIO;
1092
1093 *length = 0;
1094 segcount = be32_to_cpup(p);
1095 while (segcount--) {
1096 if (decode_rdma_segment(xdr, &seglength))
1097 return -EIO;
1098 *length += seglength;
1099 }
1100
1101 dprintk("RPC: %s: segcount=%u, %u bytes\n",
1102 __func__, be32_to_cpup(p), *length);
1103 return 0;
1104}
1105
1106/* In RPC-over-RDMA Version One replies, a Read list is never
1107 * expected. This decoder is a stub that returns an error if
1108 * a Read list is present.
1109 */
1110static int decode_read_list(struct xdr_stream *xdr)
1111{
1112 __be32 *p;
1113
1114 p = xdr_inline_decode(xdr, sizeof(*p));
1115 if (unlikely(!p))
1116 return -EIO;
1117 if (unlikely(*p != xdr_zero))
1118 return -EIO;
1119 return 0;
1120}
1121
1122/* Supports only one Write chunk in the Write list
1123 */
1124static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1125{
1126 u32 chunklen;
1127 bool first;
1128 __be32 *p;
1129
1130 *length = 0;
1131 first = true;
1132 do {
1133 p = xdr_inline_decode(xdr, sizeof(*p));
1134 if (unlikely(!p))
1135 return -EIO;
1136 if (*p == xdr_zero)
1137 break;
1138 if (!first)
1139 return -EIO;
1140
1141 if (decode_write_chunk(xdr, &chunklen))
1142 return -EIO;
1143 *length += chunklen;
1144 first = false;
1145 } while (true);
1146 return 0;
1147}
1148
1149static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1150{
1151 __be32 *p;
1152
1153 p = xdr_inline_decode(xdr, sizeof(*p));
1154 if (unlikely(!p))
1155 return -EIO;
1156
1157 *length = 0;
1158 if (*p != xdr_zero)
1159 if (decode_write_chunk(xdr, length))
1160 return -EIO;
1161 return 0;
1162}
1163
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001164static int
1165rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1166 struct rpc_rqst *rqst)
1167{
1168 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001169 u32 writelist, replychunk, rpclen;
1170 char *base;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001171
Chuck Lever264b0cd2017-08-03 14:30:27 -04001172 /* Decode the chunk lists */
1173 if (decode_read_list(xdr))
1174 return -EIO;
1175 if (decode_write_list(xdr, &writelist))
1176 return -EIO;
1177 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001178 return -EIO;
1179
Chuck Lever264b0cd2017-08-03 14:30:27 -04001180 /* RDMA_MSG sanity checks */
1181 if (unlikely(replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001182 return -EIO;
1183
Chuck Lever264b0cd2017-08-03 14:30:27 -04001184 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1185 base = (char *)xdr_inline_decode(xdr, 0);
1186 rpclen = xdr_stream_remaining(xdr);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001187 r_xprt->rx_stats.fixup_copy_count +=
Chuck Lever264b0cd2017-08-03 14:30:27 -04001188 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001189
Chuck Lever264b0cd2017-08-03 14:30:27 -04001190 r_xprt->rx_stats.total_rdma_reply += writelist;
1191 return rpclen + xdr_align_size(writelist);
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001192}
1193
1194static noinline int
1195rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1196{
1197 struct xdr_stream *xdr = &rep->rr_stream;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001198 u32 writelist, replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001199
Chuck Lever264b0cd2017-08-03 14:30:27 -04001200 /* Decode the chunk lists */
1201 if (decode_read_list(xdr))
1202 return -EIO;
1203 if (decode_write_list(xdr, &writelist))
1204 return -EIO;
1205 if (decode_reply_chunk(xdr, &replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001206 return -EIO;
1207
Chuck Lever264b0cd2017-08-03 14:30:27 -04001208 /* RDMA_NOMSG sanity checks */
1209 if (unlikely(writelist))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001210 return -EIO;
Chuck Lever264b0cd2017-08-03 14:30:27 -04001211 if (unlikely(!replychunk))
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001212 return -EIO;
1213
Chuck Lever264b0cd2017-08-03 14:30:27 -04001214 /* Reply chunk buffer already is the reply vector */
1215 r_xprt->rx_stats.total_rdma_reply += replychunk;
1216 return replychunk;
Chuck Lever07ff2dd2017-08-03 14:30:19 -04001217}
1218
1219static noinline int
1220rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1221 struct rpc_rqst *rqst)
1222{
1223 struct xdr_stream *xdr = &rep->rr_stream;
1224 __be32 *p;
1225
1226 p = xdr_inline_decode(xdr, sizeof(*p));
1227 if (unlikely(!p))
1228 return -EIO;
1229
1230 switch (*p) {
1231 case err_vers:
1232 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1233 if (!p)
1234 break;
1235 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1236 rqst->rq_task->tk_pid, __func__,
1237 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1238 break;
1239 case err_chunk:
1240 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1241 rqst->rq_task->tk_pid, __func__);
1242 break;
1243 default:
1244 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1245 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1246 }
1247
1248 r_xprt->rx_stats.bad_reply_count++;
1249 return -EREMOTEIO;
1250}
1251
Chuck Levere1352c92017-10-16 15:01:22 -04001252/* Perform XID lookup, reconstruction of the RPC reply, and
1253 * RPC completion while holding the transport lock to ensure
1254 * the rep, rqst, and rq_task pointers remain stable.
1255 */
1256void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1257{
1258 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1259 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1260 struct rpc_rqst *rqst = rep->rr_rqst;
1261 unsigned long cwnd;
1262 int status;
1263
1264 xprt->reestablish_timeout = 0;
1265
1266 switch (rep->rr_proc) {
1267 case rdma_msg:
1268 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1269 break;
1270 case rdma_nomsg:
1271 status = rpcrdma_decode_nomsg(r_xprt, rep);
1272 break;
1273 case rdma_error:
1274 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1275 break;
1276 default:
1277 status = -EIO;
1278 }
1279 if (status < 0)
1280 goto out_badheader;
1281
1282out:
1283 spin_lock(&xprt->recv_lock);
1284 cwnd = xprt->cwnd;
Chuck Leverbe798f92017-10-16 15:01:39 -04001285 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
Chuck Levere1352c92017-10-16 15:01:22 -04001286 if (xprt->cwnd > cwnd)
1287 xprt_release_rqst_cong(rqst->rq_task);
1288
1289 xprt_complete_rqst(rqst->rq_task, status);
1290 xprt_unpin_rqst(rqst);
1291 spin_unlock(&xprt->recv_lock);
1292 return;
1293
1294/* If the incoming reply terminated a pending RPC, the next
1295 * RPC call will post a replacement receive buffer as it is
1296 * being marshaled.
1297 */
1298out_badheader:
1299 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1300 rqst->rq_task->tk_pid, __func__, be32_to_cpu(rep->rr_proc));
1301 r_xprt->rx_stats.bad_reply_count++;
1302 status = -EIO;
1303 goto out;
1304}
1305
Chuck Lever0ba6f372017-10-20 10:48:28 -04001306void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1307{
1308 /* Invalidate and unmap the data payloads before waking
1309 * the waiting application. This guarantees the memory
1310 * regions are properly fenced from the server before the
1311 * application accesses the data. It also ensures proper
1312 * send flow control: waking the next RPC waits until this
1313 * RPC has relinquished all its Send Queue entries.
1314 */
1315 if (!list_empty(&req->rl_registered))
1316 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1317 &req->rl_registered);
Chuck Lever01bb35c2017-10-20 10:48:36 -04001318
1319 /* Ensure that any DMA mapped pages associated with
1320 * the Send of the RPC Call have been unmapped before
1321 * allowing the RPC to complete. This protects argument
1322 * memory not controlled by the RPC client from being
1323 * re-used before we're done with it.
1324 */
1325 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1326 r_xprt->rx_stats.reply_waits_for_send++;
1327 out_of_line_wait_on_bit(&req->rl_flags,
1328 RPCRDMA_REQ_F_TX_RESOURCES,
1329 bit_wait,
1330 TASK_UNINTERRUPTIBLE);
1331 }
Chuck Lever0ba6f372017-10-20 10:48:28 -04001332}
1333
Chuck Leverd8f532d2017-10-16 15:01:30 -04001334/* Reply handling runs in the poll worker thread. Anything that
1335 * might wait is deferred to a separate workqueue.
1336 */
1337void rpcrdma_deferred_completion(struct work_struct *work)
1338{
1339 struct rpcrdma_rep *rep =
1340 container_of(work, struct rpcrdma_rep, rr_work);
1341 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
Chuck Leverd8f532d2017-10-16 15:01:30 -04001342
Chuck Leverd8f532d2017-10-16 15:01:30 -04001343 rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
Chuck Lever0ba6f372017-10-20 10:48:28 -04001344 rpcrdma_release_rqst(rep->rr_rxprt, req);
Chuck Leverd8f532d2017-10-16 15:01:30 -04001345 rpcrdma_complete_rqst(rep);
1346}
1347
Chuck Leverfe97b472015-10-24 17:27:10 -04001348/* Process received RPC/RDMA messages.
1349 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001350 * Errors must result in the RPC task either being awakened, or
1351 * allowed to timeout, to discover the errors at that time.
1352 */
Chuck Leverd8f532d2017-10-16 15:01:30 -04001353void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001354{
Chuck Lever431af642017-06-08 11:52:20 -04001355 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
Chuck Lever431af642017-06-08 11:52:20 -04001356 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Chuck Leverbe798f92017-10-16 15:01:39 -04001357 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001358 struct rpcrdma_req *req;
1359 struct rpc_rqst *rqst;
Chuck Leverbe798f92017-10-16 15:01:39 -04001360 u32 credits;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001361 __be32 *p;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001362
Chuck Leverb0e178a2015-10-24 17:26:54 -04001363 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
1364
Chuck Levere2a67192017-08-03 14:30:44 -04001365 if (rep->rr_hdrbuf.head[0].iov_len == 0)
Chuck Leverb0e178a2015-10-24 17:26:54 -04001366 goto out_badstatus;
Chuck Leverb0e178a2015-10-24 17:26:54 -04001367
Chuck Lever5381e0e2017-10-16 15:01:14 -04001368 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
Chuck Lever96f87782017-08-03 14:30:03 -04001369 rep->rr_hdrbuf.head[0].iov_base);
1370
1371 /* Fixed transport header fields */
Chuck Lever5381e0e2017-10-16 15:01:14 -04001372 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
Chuck Lever96f87782017-08-03 14:30:03 -04001373 if (unlikely(!p))
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001374 goto out_shortreply;
Chuck Lever5381e0e2017-10-16 15:01:14 -04001375 rep->rr_xid = *p++;
1376 rep->rr_vers = *p++;
Chuck Leverbe798f92017-10-16 15:01:39 -04001377 credits = be32_to_cpu(*p++);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001378 rep->rr_proc = *p++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001379
Chuck Lever5381e0e2017-10-16 15:01:14 -04001380 if (rep->rr_vers != rpcrdma_version)
Chuck Lever61433af2017-10-16 15:01:06 -04001381 goto out_badversion;
1382
Chuck Lever5381e0e2017-10-16 15:01:14 -04001383 if (rpcrdma_is_bcall(r_xprt, rep))
Chuck Lever41c8f702017-08-03 14:30:11 -04001384 return;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001385
Chuck Leverfe97b472015-10-24 17:27:10 -04001386 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1387 * get context for handling any incoming chunks.
1388 */
Chuck Lever9590d082017-08-23 17:05:58 -04001389 spin_lock(&xprt->recv_lock);
Chuck Lever5381e0e2017-10-16 15:01:14 -04001390 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
Chuck Lever9590d082017-08-23 17:05:58 -04001391 if (!rqst)
1392 goto out_norqst;
1393 xprt_pin_rqst(rqst);
Chuck Leverbe798f92017-10-16 15:01:39 -04001394
1395 if (credits == 0)
1396 credits = 1; /* don't deadlock */
1397 else if (credits > buf->rb_max_requests)
1398 credits = buf->rb_max_requests;
1399 buf->rb_credits = credits;
1400
Chuck Lever9590d082017-08-23 17:05:58 -04001401 spin_unlock(&xprt->recv_lock);
Chuck Leverbe798f92017-10-16 15:01:39 -04001402
Chuck Lever9590d082017-08-23 17:05:58 -04001403 req = rpcr_to_rdmar(rqst);
Chuck Lever4b196dc62017-06-08 11:51:56 -04001404 req->rl_reply = rep;
Chuck Levere1352c92017-10-16 15:01:22 -04001405 rep->rr_rqst = rqst;
Chuck Lever0ba6f372017-10-20 10:48:28 -04001406 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
Chuck Lever431af642017-06-08 11:52:20 -04001407
Chuck Leveraf0f16e2016-03-04 11:27:43 -05001408 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
Chuck Lever5381e0e2017-10-16 15:01:14 -04001409 __func__, rep, req, be32_to_cpu(rep->rr_xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001410
Chuck Leverccede752017-12-04 14:04:04 -05001411 queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001412 return;
1413
1414out_badstatus:
1415 rpcrdma_recv_buffer_put(rep);
1416 if (r_xprt->rx_ep.rep_connected == 1) {
1417 r_xprt->rx_ep.rep_connected = -EIO;
1418 rpcrdma_conn_func(&r_xprt->rx_ep);
1419 }
1420 return;
1421
Chuck Lever61433af2017-10-16 15:01:06 -04001422out_badversion:
1423 dprintk("RPC: %s: invalid version %d\n",
Chuck Lever5381e0e2017-10-16 15:01:14 -04001424 __func__, be32_to_cpu(rep->rr_vers));
Chuck Lever61433af2017-10-16 15:01:06 -04001425 goto repost;
1426
Chuck Levere1352c92017-10-16 15:01:22 -04001427/* The RPC transaction has already been terminated, or the header
1428 * is corrupt.
Chuck Lever59aa1f92016-03-04 11:28:18 -05001429 */
Chuck Lever431af642017-06-08 11:52:20 -04001430out_norqst:
Trond Myklebustce7c2522017-08-16 15:30:35 -04001431 spin_unlock(&xprt->recv_lock);
Chuck Lever96f87782017-08-03 14:30:03 -04001432 dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
Chuck Lever5381e0e2017-10-16 15:01:14 -04001433 __func__, be32_to_cpu(rep->rr_xid));
Chuck Leverb0e178a2015-10-24 17:26:54 -04001434 goto repost;
1435
Chuck Lever9590d082017-08-23 17:05:58 -04001436out_shortreply:
1437 dprintk("RPC: %s: short/invalid reply\n", __func__);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001438
Chuck Lever431af642017-06-08 11:52:20 -04001439/* If no pending RPC transaction was matched, post a replacement
1440 * receive buffer before returning.
1441 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001442repost:
1443 r_xprt->rx_stats.bad_reply_count++;
Chuck Leverb1573802016-09-15 10:56:35 -04001444 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001445 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001446}