blob: c634f0f3f9ceeca55e1cf219fa0ebe06d4d566b8 [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040046 */
47
48#include "xprt_rdma.h"
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Jeff Laytonf895b252014-11-17 16:58:04 -050052#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040053# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040056static const char transfertypes[][12] = {
Chuck Lever94f58c52016-05-02 14:41:30 -040057 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040061 "reply chunk" /* entire reply via rdma write */
62};
Chuck Lever302d3de2016-05-02 14:41:05 -040063
64/* Returns size of largest RPC-over-RDMA header in a Call message
65 *
Chuck Lever94f58c52016-05-02 14:41:30 -040066 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
Chuck Lever302d3de2016-05-02 14:41:05 -040068 */
69static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70{
71 unsigned int size;
72
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
75
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
79
Chuck Lever94f58c52016-05-02 14:41:30 -040080 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
84
Chuck Lever302d3de2016-05-02 14:41:05 -040085 dprintk("RPC: %s: max call header size = %u\n",
86 __func__, size);
87 return size;
88}
89
90/* Returns size of largest RPC-over-RDMA header in a Reply message
91 *
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
94 */
95static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
96{
97 unsigned int size;
98
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
101
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
107
108 dprintk("RPC: %s: max reply header size = %u\n",
109 __func__, size);
110 return size;
111}
112
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400113void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
Chuck Lever302d3de2016-05-02 14:41:05 -0400114{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
118
Chuck Lever302d3de2016-05-02 14:41:05 -0400119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
123}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400124
Chuck Lever5457ced2015-08-03 13:03:49 -0400125/* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
128 * the read chunk list for this operation.
129 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400130static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
131 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400132{
Chuck Lever302d3de2016-05-02 14:41:05 -0400133 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400134
Chuck Lever302d3de2016-05-02 14:41:05 -0400135 return rqst->rq_snd_buf.len <= ia->ri_max_inline_write;
Chuck Lever5457ced2015-08-03 13:03:49 -0400136}
137
138/* The client can't know how large the actual reply will be. Thus it
139 * plans for the largest possible reply for that particular ULP
140 * operation. If the maximum combined reply message size exceeds that
141 * limit, the client must provide a write list or a reply chunk for
142 * this request.
143 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400144static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
145 struct rpc_rqst *rqst)
Chuck Lever5457ced2015-08-03 13:03:49 -0400146{
Chuck Lever302d3de2016-05-02 14:41:05 -0400147 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever5457ced2015-08-03 13:03:49 -0400148
Chuck Lever302d3de2016-05-02 14:41:05 -0400149 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
Chuck Lever5457ced2015-08-03 13:03:49 -0400150}
151
Chuck Lever821c7912016-03-04 11:27:52 -0500152/* Split "vec" on page boundaries into segments. FMR registers pages,
153 * not a byte range. Other modes coalesce these segments into a single
154 * MR when they can.
155 */
156static int
Chuck Lever5ab81422016-06-29 13:54:25 -0400157rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
Chuck Lever821c7912016-03-04 11:27:52 -0500158{
159 size_t page_offset;
160 u32 remaining;
161 char *base;
162
163 base = vec->iov_base;
164 page_offset = offset_in_page(base);
165 remaining = vec->iov_len;
Chuck Lever5ab81422016-06-29 13:54:25 -0400166 while (remaining && n < RPCRDMA_MAX_SEGS) {
Chuck Lever821c7912016-03-04 11:27:52 -0500167 seg[n].mr_page = NULL;
168 seg[n].mr_offset = base;
169 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
170 remaining -= seg[n].mr_len;
171 base += seg[n].mr_len;
172 ++n;
173 page_offset = 0;
174 }
175 return n;
176}
177
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400178/*
179 * Chunk assembly from upper layer xdr_buf.
180 *
181 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
182 * elements. Segments are then coalesced when registered, if possible
183 * within the selected memreg mode.
Chuck Leverc93c6222014-05-28 10:35:14 -0400184 *
185 * Returns positive number of segments converted, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400186 */
187
188static int
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500189rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
190 unsigned int pos, enum rpcrdma_chunktype type,
191 struct rpcrdma_mr_seg *seg)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400192{
Chuck Lever5ab81422016-06-29 13:54:25 -0400193 int len, n, p, page_base;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000194 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400195
Chuck Lever5ab81422016-06-29 13:54:25 -0400196 n = 0;
Chuck Lever821c7912016-03-04 11:27:52 -0500197 if (pos == 0) {
Chuck Lever5ab81422016-06-29 13:54:25 -0400198 n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n);
199 if (n == RPCRDMA_MAX_SEGS)
200 goto out_overflow;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400201 }
202
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000203 len = xdrbuf->page_len;
204 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
205 page_base = xdrbuf->page_base & ~PAGE_MASK;
206 p = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400207 while (len && n < RPCRDMA_MAX_SEGS) {
Shirley Ma196c6992014-05-28 10:34:24 -0400208 if (!ppages[p]) {
209 /* alloc the pagelist for receiving buffer */
210 ppages[p] = alloc_page(GFP_ATOMIC);
211 if (!ppages[p])
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400212 return -EAGAIN;
Shirley Ma196c6992014-05-28 10:34:24 -0400213 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000214 seg[n].mr_page = ppages[p];
215 seg[n].mr_offset = (void *)(unsigned long) page_base;
216 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
Chuck Leverc93c6222014-05-28 10:35:14 -0400217 if (seg[n].mr_len > PAGE_SIZE)
Chuck Lever5ab81422016-06-29 13:54:25 -0400218 goto out_overflow;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000219 len -= seg[n].mr_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400220 ++n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000221 ++p;
222 page_base = 0; /* page offset only applies to first page */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400223 }
224
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000225 /* Message overflows the seg array */
Chuck Lever5ab81422016-06-29 13:54:25 -0400226 if (len && n == RPCRDMA_MAX_SEGS)
227 goto out_overflow;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000228
Chuck Lever24abdf12017-02-08 16:59:46 -0500229 /* When encoding a Read chunk, the tail iovec contains an
230 * XDR pad and may be omitted.
231 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500232 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Lever677eb172015-08-03 13:04:17 -0400233 return n;
234
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500235 /* When encoding a Write chunk, some servers need to see an
236 * extra segment for non-XDR-aligned Write chunks. The upper
237 * layer provides space in the tail iovec that may be used
238 * for this purpose.
Chuck Leverc8b920b2016-09-15 10:57:16 -0400239 */
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500240 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
Chuck Leverc8b920b2016-09-15 10:57:16 -0400241 return n;
242
James Lentini50e10922007-12-10 11:24:48 -0500243 if (xdrbuf->tail[0].iov_len) {
Chuck Lever5ab81422016-06-29 13:54:25 -0400244 n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
245 if (n == RPCRDMA_MAX_SEGS)
246 goto out_overflow;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400247 }
248
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400249 return n;
Chuck Lever5ab81422016-06-29 13:54:25 -0400250
251out_overflow:
252 pr_err("rpcrdma: segment array overflow\n");
253 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400254}
255
Chuck Lever94f58c52016-05-02 14:41:30 -0400256static inline __be32 *
Chuck Lever9d6b0402016-06-29 13:54:16 -0400257xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
Chuck Lever94f58c52016-05-02 14:41:30 -0400258{
Chuck Lever9d6b0402016-06-29 13:54:16 -0400259 *iptr++ = cpu_to_be32(mw->mw_handle);
260 *iptr++ = cpu_to_be32(mw->mw_length);
261 return xdr_encode_hyper(iptr, mw->mw_offset);
Chuck Lever94f58c52016-05-02 14:41:30 -0400262}
263
264/* XDR-encode the Read list. Supports encoding a list of read
265 * segments that belong to a single read chunk.
266 *
267 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
268 *
269 * Read chunklist (a linked list):
270 * N elements, position P (same P for all chunks of same arg!):
271 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
272 *
273 * Returns a pointer to the XDR word in the RDMA header following
274 * the end of the Read list, or an error pointer.
275 */
276static __be32 *
277rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
278 struct rpcrdma_req *req, struct rpc_rqst *rqst,
279 __be32 *iptr, enum rpcrdma_chunktype rtype)
280{
Chuck Lever5ab81422016-06-29 13:54:25 -0400281 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400282 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400283 unsigned int pos;
284 int n, nsegs;
285
286 if (rtype == rpcrdma_noch) {
287 *iptr++ = xdr_zero; /* item not present */
288 return iptr;
289 }
290
291 pos = rqst->rq_snd_buf.head[0].iov_len;
292 if (rtype == rpcrdma_areadch)
293 pos = 0;
Chuck Lever5ab81422016-06-29 13:54:25 -0400294 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500295 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
296 rtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400297 if (nsegs < 0)
298 return ERR_PTR(nsegs);
299
300 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400301 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
302 false, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400303 if (n < 0)
Chuck Lever94f58c52016-05-02 14:41:30 -0400304 return ERR_PTR(n);
Chuck Lever9d6b0402016-06-29 13:54:16 -0400305 list_add(&mw->mw_list, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400306
307 *iptr++ = xdr_one; /* item present */
308
309 /* All read segments in this chunk
310 * have the same "position".
311 */
312 *iptr++ = cpu_to_be32(pos);
Chuck Lever9d6b0402016-06-29 13:54:16 -0400313 iptr = xdr_encode_rdma_segment(iptr, mw);
Chuck Lever94f58c52016-05-02 14:41:30 -0400314
Chuck Lever9d6b0402016-06-29 13:54:16 -0400315 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400316 rqst->rq_task->tk_pid, __func__, pos,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400317 mw->mw_length, (unsigned long long)mw->mw_offset,
318 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400319
320 r_xprt->rx_stats.read_chunk_count++;
Chuck Lever94f58c52016-05-02 14:41:30 -0400321 seg += n;
322 nsegs -= n;
323 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400324
325 /* Finish Read list */
326 *iptr++ = xdr_zero; /* Next item not present */
327 return iptr;
328}
329
330/* XDR-encode the Write list. Supports encoding a list containing
331 * one array of plain segments that belong to a single write chunk.
332 *
333 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
334 *
335 * Write chunklist (a list of (one) counted array):
336 * N elements:
337 * 1 - N - HLOO - HLOO - ... - HLOO - 0
338 *
339 * Returns a pointer to the XDR word in the RDMA header following
340 * the end of the Write list, or an error pointer.
341 */
342static __be32 *
343rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
344 struct rpc_rqst *rqst, __be32 *iptr,
345 enum rpcrdma_chunktype wtype)
346{
Chuck Lever5ab81422016-06-29 13:54:25 -0400347 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400348 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400349 int n, nsegs, nchunks;
350 __be32 *segcount;
351
352 if (wtype != rpcrdma_writech) {
353 *iptr++ = xdr_zero; /* no Write list present */
354 return iptr;
355 }
356
Chuck Lever5ab81422016-06-29 13:54:25 -0400357 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500358 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
Chuck Lever94f58c52016-05-02 14:41:30 -0400359 rqst->rq_rcv_buf.head[0].iov_len,
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500360 wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400361 if (nsegs < 0)
362 return ERR_PTR(nsegs);
363
364 *iptr++ = xdr_one; /* Write list present */
365 segcount = iptr++; /* save location of segment count */
366
367 nchunks = 0;
368 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400369 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
370 true, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400371 if (n < 0)
Chuck Lever94f58c52016-05-02 14:41:30 -0400372 return ERR_PTR(n);
Chuck Lever9d6b0402016-06-29 13:54:16 -0400373 list_add(&mw->mw_list, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400374
Chuck Lever9d6b0402016-06-29 13:54:16 -0400375 iptr = xdr_encode_rdma_segment(iptr, mw);
Chuck Lever94f58c52016-05-02 14:41:30 -0400376
Chuck Lever9d6b0402016-06-29 13:54:16 -0400377 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400378 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400379 mw->mw_length, (unsigned long long)mw->mw_offset,
380 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400381
382 r_xprt->rx_stats.write_chunk_count++;
383 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400384 nchunks++;
385 seg += n;
386 nsegs -= n;
387 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400388
389 /* Update count of segments in this Write chunk */
390 *segcount = cpu_to_be32(nchunks);
391
392 /* Finish Write list */
393 *iptr++ = xdr_zero; /* Next item not present */
394 return iptr;
395}
396
397/* XDR-encode the Reply chunk. Supports encoding an array of plain
398 * segments that belong to a single write (reply) chunk.
399 *
400 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
401 *
402 * Reply chunk (a counted array):
403 * N elements:
404 * 1 - N - HLOO - HLOO - ... - HLOO
405 *
406 * Returns a pointer to the XDR word in the RDMA header following
407 * the end of the Reply chunk, or an error pointer.
408 */
409static __be32 *
410rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
411 struct rpcrdma_req *req, struct rpc_rqst *rqst,
412 __be32 *iptr, enum rpcrdma_chunktype wtype)
413{
Chuck Lever5ab81422016-06-29 13:54:25 -0400414 struct rpcrdma_mr_seg *seg;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400415 struct rpcrdma_mw *mw;
Chuck Lever94f58c52016-05-02 14:41:30 -0400416 int n, nsegs, nchunks;
417 __be32 *segcount;
418
419 if (wtype != rpcrdma_replych) {
420 *iptr++ = xdr_zero; /* no Reply chunk present */
421 return iptr;
422 }
423
Chuck Lever5ab81422016-06-29 13:54:25 -0400424 seg = req->rl_segments;
Chuck Leverb5f0afb2017-02-08 16:59:54 -0500425 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
Chuck Lever94f58c52016-05-02 14:41:30 -0400426 if (nsegs < 0)
427 return ERR_PTR(nsegs);
428
429 *iptr++ = xdr_one; /* Reply chunk present */
430 segcount = iptr++; /* save location of segment count */
431
432 nchunks = 0;
433 do {
Chuck Lever9d6b0402016-06-29 13:54:16 -0400434 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
435 true, &mw);
Chuck Levera54d4052016-06-29 13:53:52 -0400436 if (n < 0)
Chuck Lever94f58c52016-05-02 14:41:30 -0400437 return ERR_PTR(n);
Chuck Lever9d6b0402016-06-29 13:54:16 -0400438 list_add(&mw->mw_list, &req->rl_registered);
Chuck Lever94f58c52016-05-02 14:41:30 -0400439
Chuck Lever9d6b0402016-06-29 13:54:16 -0400440 iptr = xdr_encode_rdma_segment(iptr, mw);
Chuck Lever94f58c52016-05-02 14:41:30 -0400441
Chuck Lever9d6b0402016-06-29 13:54:16 -0400442 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
Chuck Lever94f58c52016-05-02 14:41:30 -0400443 rqst->rq_task->tk_pid, __func__,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400444 mw->mw_length, (unsigned long long)mw->mw_offset,
445 mw->mw_handle, n < nsegs ? "more" : "last");
Chuck Lever94f58c52016-05-02 14:41:30 -0400446
447 r_xprt->rx_stats.reply_chunk_count++;
448 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
Chuck Lever94f58c52016-05-02 14:41:30 -0400449 nchunks++;
450 seg += n;
451 nsegs -= n;
452 } while (nsegs);
Chuck Lever94f58c52016-05-02 14:41:30 -0400453
454 /* Update count of segments in the Reply chunk */
455 *segcount = cpu_to_be32(nchunks);
456
457 return iptr;
458}
459
Chuck Lever655fec62016-09-15 10:57:24 -0400460/* Prepare the RPC-over-RDMA header SGE.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400461 */
Chuck Lever655fec62016-09-15 10:57:24 -0400462static bool
463rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
464 u32 len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400465{
Chuck Lever655fec62016-09-15 10:57:24 -0400466 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
467 struct ib_sge *sge = &req->rl_send_sge[0];
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400468
Chuck Lever655fec62016-09-15 10:57:24 -0400469 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
470 if (!__rpcrdma_dma_map_regbuf(ia, rb))
471 return false;
472 sge->addr = rdmab_addr(rb);
473 sge->lkey = rdmab_lkey(rb);
474 }
475 sge->length = len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400476
Chuck Lever655fec62016-09-15 10:57:24 -0400477 ib_dma_sync_single_for_device(ia->ri_device, sge->addr,
478 sge->length, DMA_TO_DEVICE);
479 req->rl_send_wr.num_sge++;
480 return true;
481}
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400482
Chuck Lever655fec62016-09-15 10:57:24 -0400483/* Prepare the Send SGEs. The head and tail iovec, and each entry
484 * in the page list, gets its own SGE.
485 */
486static bool
487rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
488 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
489{
490 unsigned int sge_no, page_base, len, remaining;
491 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
492 struct ib_device *device = ia->ri_device;
493 struct ib_sge *sge = req->rl_send_sge;
494 u32 lkey = ia->ri_pd->local_dma_lkey;
495 struct page *page, **ppages;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400496
Chuck Lever655fec62016-09-15 10:57:24 -0400497 /* The head iovec is straightforward, as it is already
498 * DMA-mapped. Sync the content that has changed.
499 */
500 if (!rpcrdma_dma_map_regbuf(ia, rb))
501 return false;
502 sge_no = 1;
503 sge[sge_no].addr = rdmab_addr(rb);
504 sge[sge_no].length = xdr->head[0].iov_len;
505 sge[sge_no].lkey = rdmab_lkey(rb);
506 ib_dma_sync_single_for_device(device, sge[sge_no].addr,
507 sge[sge_no].length, DMA_TO_DEVICE);
508
509 /* If there is a Read chunk, the page list is being handled
510 * via explicit RDMA, and thus is skipped here. However, the
511 * tail iovec may include an XDR pad for the page list, as
512 * well as additional content, and may not reside in the
513 * same page as the head iovec.
514 */
515 if (rtype == rpcrdma_readch) {
516 len = xdr->tail[0].iov_len;
517
518 /* Do not include the tail if it is only an XDR pad */
519 if (len < 4)
520 goto out;
521
522 page = virt_to_page(xdr->tail[0].iov_base);
523 page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
524
525 /* If the content in the page list is an odd length,
526 * xdr_write_pages() has added a pad at the beginning
527 * of the tail iovec. Force the tail's non-pad content
528 * to land at the next XDR position in the Send message.
529 */
530 page_base += len & 3;
531 len -= len & 3;
532 goto map_tail;
533 }
534
535 /* If there is a page list present, temporarily DMA map
536 * and prepare an SGE for each page to be sent.
537 */
538 if (xdr->page_len) {
539 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
540 page_base = xdr->page_base & ~PAGE_MASK;
541 remaining = xdr->page_len;
542 while (remaining) {
543 sge_no++;
544 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
545 goto out_mapping_overflow;
546
547 len = min_t(u32, PAGE_SIZE - page_base, remaining);
548 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
549 page_base, len,
550 DMA_TO_DEVICE);
551 if (ib_dma_mapping_error(device, sge[sge_no].addr))
552 goto out_mapping_err;
553 sge[sge_no].length = len;
554 sge[sge_no].lkey = lkey;
555
556 req->rl_mapped_sges++;
557 ppages++;
558 remaining -= len;
559 page_base = 0;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400560 }
Tom Talpeyb38ab402009-03-11 14:37:55 -0400561 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000562
Chuck Lever655fec62016-09-15 10:57:24 -0400563 /* The tail iovec is not always constructed in the same
564 * page where the head iovec resides (see, for example,
565 * gss_wrap_req_priv). To neatly accommodate that case,
566 * DMA map it separately.
567 */
568 if (xdr->tail[0].iov_len) {
569 page = virt_to_page(xdr->tail[0].iov_base);
570 page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
571 len = xdr->tail[0].iov_len;
572
573map_tail:
574 sge_no++;
575 sge[sge_no].addr = ib_dma_map_page(device, page,
576 page_base, len,
577 DMA_TO_DEVICE);
578 if (ib_dma_mapping_error(device, sge[sge_no].addr))
579 goto out_mapping_err;
580 sge[sge_no].length = len;
581 sge[sge_no].lkey = lkey;
582 req->rl_mapped_sges++;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400583 }
Chuck Lever655fec62016-09-15 10:57:24 -0400584
585out:
586 req->rl_send_wr.num_sge = sge_no + 1;
587 return true;
588
589out_mapping_overflow:
590 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
591 return false;
592
593out_mapping_err:
594 pr_err("rpcrdma: Send mapping error\n");
595 return false;
596}
597
598bool
599rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
600 u32 hdrlen, struct xdr_buf *xdr,
601 enum rpcrdma_chunktype rtype)
602{
603 req->rl_send_wr.num_sge = 0;
604 req->rl_mapped_sges = 0;
605
606 if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
607 goto out_map;
608
609 if (rtype != rpcrdma_areadch)
610 if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
611 goto out_map;
612
613 return true;
614
615out_map:
616 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
617 return false;
618}
619
620void
621rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
622{
623 struct ib_device *device = ia->ri_device;
624 struct ib_sge *sge;
625 int count;
626
627 sge = &req->rl_send_sge[2];
628 for (count = req->rl_mapped_sges; count--; sge++)
629 ib_dma_unmap_page(device, sge->addr, sge->length,
630 DMA_TO_DEVICE);
631 req->rl_mapped_sges = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400632}
633
634/*
635 * Marshal a request: the primary job of this routine is to choose
636 * the transfer modes. See comments below.
637 *
Chuck Leverc93c6222014-05-28 10:35:14 -0400638 * Returns zero on success, otherwise a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400639 */
640
641int
642rpcrdma_marshal_req(struct rpc_rqst *rqst)
643{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500644 struct rpc_xprt *xprt = rqst->rq_xprt;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400645 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
646 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Chuck Levere2377942015-03-30 14:33:53 -0400647 enum rpcrdma_chunktype rtype, wtype;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400648 struct rpcrdma_msg *headerp;
Chuck Lever65b80172016-06-29 13:55:06 -0400649 bool ddp_allowed;
Chuck Lever94f58c52016-05-02 14:41:30 -0400650 ssize_t hdrlen;
651 size_t rpclen;
652 __be32 *iptr;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400653
Chuck Lever83128a62015-10-24 17:27:59 -0400654#if defined(CONFIG_SUNRPC_BACKCHANNEL)
655 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
656 return rpcrdma_bc_marshal_reply(rqst);
657#endif
658
Chuck Lever85275c82015-01-21 11:04:16 -0500659 headerp = rdmab_to_msg(req->rl_rdmabuf);
Chuck Lever284f4902015-01-21 11:02:13 -0500660 /* don't byte-swap XID, it's already done in request */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400661 headerp->rm_xid = rqst->rq_xid;
Chuck Lever284f4902015-01-21 11:02:13 -0500662 headerp->rm_vers = rpcrdma_version;
663 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
664 headerp->rm_type = rdma_msg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400665
Chuck Lever65b80172016-06-29 13:55:06 -0400666 /* When the ULP employs a GSS flavor that guarantees integrity
667 * or privacy, direct data placement of individual data items
668 * is not allowed.
669 */
670 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
671 RPCAUTH_AUTH_DATATOUCH);
672
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400673 /*
674 * Chunks needed for results?
675 *
676 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400677 * return as inline.
Chuck Levercce6dee2016-05-02 14:41:14 -0400678 * o Large read ops return data as write chunk(s), header as
679 * inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400680 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400681 */
Chuck Levercce6dee2016-05-02 14:41:14 -0400682 if (rpcrdma_results_inline(r_xprt, rqst))
Chuck Lever02eb57d82015-08-03 13:03:58 -0400683 wtype = rpcrdma_noch;
Chuck Lever65b80172016-06-29 13:55:06 -0400684 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levercce6dee2016-05-02 14:41:14 -0400685 wtype = rpcrdma_writech;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400686 else
Chuck Levere2377942015-03-30 14:33:53 -0400687 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400688
689 /*
690 * Chunks needed for arguments?
691 *
692 * o If the total request is under the inline threshold, all ops
693 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400694 * o Large write ops transmit data as read chunk(s), header as
695 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400696 * o Large non-write ops are sent with the entire message as a
697 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400698 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400699 * This assumes that the upper layer does not present a request
700 * that both has a data payload, and whose non-data arguments
701 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400702 */
Chuck Lever302d3de2016-05-02 14:41:05 -0400703 if (rpcrdma_args_inline(r_xprt, rqst)) {
Chuck Levere2377942015-03-30 14:33:53 -0400704 rtype = rpcrdma_noch;
Chuck Lever655fec62016-09-15 10:57:24 -0400705 rpclen = rqst->rq_snd_buf.len;
Chuck Lever65b80172016-06-29 13:55:06 -0400706 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Levere2377942015-03-30 14:33:53 -0400707 rtype = rpcrdma_readch;
Chuck Lever655fec62016-09-15 10:57:24 -0400708 rpclen = rqst->rq_snd_buf.head[0].iov_len +
709 rqst->rq_snd_buf.tail[0].iov_len;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400710 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400711 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400712 headerp->rm_type = htonl(RDMA_NOMSG);
713 rtype = rpcrdma_areadch;
714 rpclen = 0;
715 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400716
Chuck Lever94f58c52016-05-02 14:41:30 -0400717 /* This implementation supports the following combinations
718 * of chunk lists in one RPC-over-RDMA Call message:
719 *
720 * - Read list
721 * - Write list
722 * - Reply chunk
723 * - Read list + Reply chunk
724 *
725 * It might not yet support the following combinations:
726 *
727 * - Read list + Write list
728 *
729 * It does not support the following combinations:
730 *
731 * - Write list + Reply chunk
732 * - Read list + Write list + Reply chunk
733 *
734 * This implementation supports only a single chunk in each
735 * Read or Write list. Thus for example the client cannot
736 * send a Call message with a Position Zero Read chunk and a
737 * regular Read chunk at the same time.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400738 */
Chuck Lever94f58c52016-05-02 14:41:30 -0400739 iptr = headerp->rm_body.rm_chunks;
740 iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
741 if (IS_ERR(iptr))
742 goto out_unmap;
743 iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
744 if (IS_ERR(iptr))
745 goto out_unmap;
746 iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
747 if (IS_ERR(iptr))
748 goto out_unmap;
749 hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400750
Chuck Lever94f58c52016-05-02 14:41:30 -0400751 dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
752 rqst->rq_task->tk_pid, __func__,
753 transfertypes[rtype], transfertypes[wtype],
754 hdrlen, rpclen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400755
Chuck Lever655fec62016-09-15 10:57:24 -0400756 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen,
757 &rqst->rq_snd_buf, rtype)) {
758 iptr = ERR_PTR(-EIO);
759 goto out_unmap;
760 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400761 return 0;
Chuck Lever302d3de2016-05-02 14:41:05 -0400762
Chuck Lever94f58c52016-05-02 14:41:30 -0400763out_unmap:
Chuck Leveread3f262016-05-02 14:42:46 -0400764 r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
Chuck Lever94f58c52016-05-02 14:41:30 -0400765 return PTR_ERR(iptr);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400766}
767
768/*
769 * Chase down a received write or reply chunklist to get length
770 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
771 */
772static int
Chuck Lever9d6b0402016-06-29 13:54:16 -0400773rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400774{
775 unsigned int i, total_len;
776 struct rpcrdma_write_chunk *cur_wchunk;
Chuck Lever6b1184c2015-01-21 11:04:25 -0500777 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400778
Chuck Lever284f4902015-01-21 11:02:13 -0500779 i = be32_to_cpu(**iptrp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400780 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
781 total_len = 0;
782 while (i--) {
783 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
784 ifdebug(FACILITY) {
785 u64 off;
Al Viro2d8a9722007-10-29 04:37:58 +0000786 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
Chuck Leverc351f942016-11-29 10:53:29 -0500787 dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n",
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400788 __func__,
Chuck Lever284f4902015-01-21 11:02:13 -0500789 be32_to_cpu(seg->rs_length),
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700790 (unsigned long long)off,
Chuck Lever284f4902015-01-21 11:02:13 -0500791 be32_to_cpu(seg->rs_handle));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400792 }
Chuck Lever284f4902015-01-21 11:02:13 -0500793 total_len += be32_to_cpu(seg->rs_length);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400794 ++cur_wchunk;
795 }
796 /* check and adjust for properly terminated write chunk */
797 if (wrchunk) {
Al Viro2d8a9722007-10-29 04:37:58 +0000798 __be32 *w = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400799 if (*w++ != xdr_zero)
800 return -1;
801 cur_wchunk = (struct rpcrdma_write_chunk *) w;
802 }
Chuck Lever6b1184c2015-01-21 11:04:25 -0500803 if ((char *)cur_wchunk > base + rep->rr_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400804 return -1;
805
Al Viro2d8a9722007-10-29 04:37:58 +0000806 *iptrp = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400807 return total_len;
808}
809
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400810/**
811 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
812 * @rqst: controlling RPC request
813 * @srcp: points to RPC message payload in receive buffer
814 * @copy_len: remaining length of receive buffer content
815 * @pad: Write chunk pad bytes needed (zero for pure inline)
816 *
817 * The upper layer has set the maximum number of bytes it can
818 * receive in each component of rq_rcv_buf. These values are set in
819 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
Chuck Levercfabe2c2016-06-29 13:54:49 -0400820 *
821 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
822 * many cases this function simply updates iov_base pointers in
823 * rq_rcv_buf to point directly to the received reply data, to
824 * avoid copying reply data.
Chuck Lever64695bde2016-06-29 13:54:58 -0400825 *
826 * Returns the count of bytes which had to be memcopied.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400827 */
Chuck Lever64695bde2016-06-29 13:54:58 -0400828static unsigned long
Tom Talpey9191ca32008-10-09 15:01:11 -0400829rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400830{
Chuck Lever64695bde2016-06-29 13:54:58 -0400831 unsigned long fixup_copy_count;
832 int i, npages, curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400833 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000834 struct page **ppages;
835 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400836
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400837 /* The head iovec is redirected to the RPC reply message
838 * in the receive buffer, to avoid a memcopy.
839 */
840 rqst->rq_rcv_buf.head[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400841 rqst->rq_private_buf.head[0].iov_base = srcp;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400842
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400843 /* The contents of the receive buffer that follow
844 * head.iov_len bytes are copied into the page list.
845 */
846 curlen = rqst->rq_rcv_buf.head[0].iov_len;
847 if (curlen > copy_len)
848 curlen = copy_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400849 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
850 __func__, srcp, copy_len, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400851 srcp += curlen;
852 copy_len -= curlen;
853
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000854 page_base = rqst->rq_rcv_buf.page_base;
855 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
856 page_base &= ~PAGE_MASK;
Chuck Lever64695bde2016-06-29 13:54:58 -0400857 fixup_copy_count = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400858 if (copy_len && rqst->rq_rcv_buf.page_len) {
Chuck Lever80414ab2016-06-29 13:54:33 -0400859 int pagelist_len;
860
861 pagelist_len = rqst->rq_rcv_buf.page_len;
862 if (pagelist_len > copy_len)
863 pagelist_len = copy_len;
864 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
Chuck Lever64695bde2016-06-29 13:54:58 -0400865 for (i = 0; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000866 curlen = PAGE_SIZE - page_base;
Chuck Lever80414ab2016-06-29 13:54:33 -0400867 if (curlen > pagelist_len)
868 curlen = pagelist_len;
869
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400870 dprintk("RPC: %s: page %d"
871 " srcp 0x%p len %d curlen %d\n",
872 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800873 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000874 memcpy(destp + page_base, srcp, curlen);
875 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800876 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400877 srcp += curlen;
878 copy_len -= curlen;
Chuck Lever64695bde2016-06-29 13:54:58 -0400879 fixup_copy_count += curlen;
Chuck Lever80414ab2016-06-29 13:54:33 -0400880 pagelist_len -= curlen;
881 if (!pagelist_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400882 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000883 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400884 }
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400885
886 /* Implicit padding for the last segment in a Write
887 * chunk is inserted inline at the front of the tail
888 * iovec. The upper layer ignores the content of
889 * the pad. Simply ensure inline content in the tail
890 * that follows the Write chunk is properly aligned.
891 */
892 if (pad)
893 srcp -= pad;
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400894 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400895
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400896 /* The tail iovec is redirected to the remaining data
897 * in the receive buffer, to avoid a memcopy.
898 */
Chuck Levercfabe2c2016-06-29 13:54:49 -0400899 if (copy_len || pad) {
Chuck Levercb0ae1f2016-06-29 13:54:41 -0400900 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
Chuck Levercfabe2c2016-06-29 13:54:49 -0400901 rqst->rq_private_buf.tail[0].iov_base = srcp;
902 }
Tom Talpey9191ca32008-10-09 15:01:11 -0400903
Chuck Lever64695bde2016-06-29 13:54:58 -0400904 return fixup_copy_count;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400905}
906
Chuck Lever63cae472015-10-24 17:28:08 -0400907#if defined(CONFIG_SUNRPC_BACKCHANNEL)
908/* By convention, backchannel calls arrive via rdma_msg type
909 * messages, and never populate the chunk lists. This makes
910 * the RPC/RDMA header small and fixed in size, so it is
911 * straightforward to check the RPC header's direction field.
912 */
913static bool
914rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
915{
916 __be32 *p = (__be32 *)headerp;
917
918 if (headerp->rm_type != rdma_msg)
919 return false;
920 if (headerp->rm_body.rm_chunks[0] != xdr_zero)
921 return false;
922 if (headerp->rm_body.rm_chunks[1] != xdr_zero)
923 return false;
924 if (headerp->rm_body.rm_chunks[2] != xdr_zero)
925 return false;
926
927 /* sanity */
928 if (p[7] != headerp->rm_xid)
929 return false;
930 /* call direction */
931 if (p[8] != cpu_to_be32(RPC_CALL))
932 return false;
933
934 return true;
935}
936#endif /* CONFIG_SUNRPC_BACKCHANNEL */
937
Chuck Leverfe97b472015-10-24 17:27:10 -0400938/* Process received RPC/RDMA messages.
939 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400940 * Errors must result in the RPC task either being awakened, or
941 * allowed to timeout, to discover the errors at that time.
942 */
943void
Chuck Lever496b77a2016-09-15 10:57:57 -0400944rpcrdma_reply_handler(struct work_struct *work)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400945{
Chuck Lever496b77a2016-09-15 10:57:57 -0400946 struct rpcrdma_rep *rep =
947 container_of(work, struct rpcrdma_rep, rr_work);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400948 struct rpcrdma_msg *headerp;
949 struct rpcrdma_req *req;
950 struct rpc_rqst *rqst;
Chuck Leverfed171b2015-05-26 11:51:37 -0400951 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
952 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Al Viro2d8a9722007-10-29 04:37:58 +0000953 __be32 *iptr;
Chuck Lever59aa1f92016-03-04 11:28:18 -0500954 int rdmalen, status, rmerr;
Chuck Levere7ce7102014-05-28 10:34:57 -0400955 unsigned long cwnd;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400956
Chuck Leverb0e178a2015-10-24 17:26:54 -0400957 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
958
959 if (rep->rr_len == RPCRDMA_BAD_LEN)
960 goto out_badstatus;
Chuck Lever59aa1f92016-03-04 11:28:18 -0500961 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
Chuck Leverb0e178a2015-10-24 17:26:54 -0400962 goto out_shortreply;
963
Chuck Lever6b1184c2015-01-21 11:04:25 -0500964 headerp = rdmab_to_msg(rep->rr_rdmabuf);
Chuck Lever63cae472015-10-24 17:28:08 -0400965#if defined(CONFIG_SUNRPC_BACKCHANNEL)
966 if (rpcrdma_is_bcall(headerp))
967 goto out_bcall;
968#endif
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400969
Chuck Leverfe97b472015-10-24 17:27:10 -0400970 /* Match incoming rpcrdma_rep to an rpcrdma_req to
971 * get context for handling any incoming chunks.
972 */
973 spin_lock_bh(&xprt->transport_lock);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400974 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400975 if (!rqst)
976 goto out_nomatch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400977
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400978 req = rpcr_to_rdmar(rqst);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400979 if (req->rl_reply)
980 goto out_duplicate;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400981
Chuck Lever68791642015-12-16 17:23:11 -0500982 /* Sanity checking has passed. We are now committed
983 * to complete this transaction.
984 */
985 list_del_init(&rqst->rq_list);
986 spin_unlock_bh(&xprt->transport_lock);
Chuck Leveraf0f16e2016-03-04 11:27:43 -0500987 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
988 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400989
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400990 /* from here on, the reply is no longer an orphan */
991 req->rl_reply = rep;
Chuck Lever18906972014-05-28 10:34:41 -0400992 xprt->reestablish_timeout = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400993
Chuck Lever59aa1f92016-03-04 11:28:18 -0500994 if (headerp->rm_vers != rpcrdma_version)
995 goto out_badversion;
996
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400997 /* check for expected message types */
998 /* The order of some of these tests is important. */
999 switch (headerp->rm_type) {
Chuck Lever284f4902015-01-21 11:02:13 -05001000 case rdma_msg:
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001001 /* never expect read chunks */
1002 /* never expect reply chunks (two ways to check) */
1003 /* never expect write chunks without having offered RDMA */
1004 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
1005 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
1006 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
1007 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
Chuck Lever9d6b0402016-06-29 13:54:16 -04001008 list_empty(&req->rl_registered)))
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001009 goto badheader;
1010 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
1011 /* count any expected write chunks in read reply */
1012 /* start at write chunk array count */
1013 iptr = &headerp->rm_body.rm_chunks[2];
Chuck Lever9d6b0402016-06-29 13:54:16 -04001014 rdmalen = rpcrdma_count_chunks(rep, 1, &iptr);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001015 /* check for validity, and no reply chunk after */
1016 if (rdmalen < 0 || *iptr++ != xdr_zero)
1017 goto badheader;
1018 rep->rr_len -=
1019 ((unsigned char *)iptr - (unsigned char *)headerp);
1020 status = rep->rr_len + rdmalen;
1021 r_xprt->rx_stats.total_rdma_reply += rdmalen;
Tom Talpey9191ca32008-10-09 15:01:11 -04001022 /* special case - last chunk may omit padding */
1023 if (rdmalen &= 3) {
1024 rdmalen = 4 - rdmalen;
1025 status += rdmalen;
1026 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001027 } else {
1028 /* else ordinary inline */
Tom Talpey9191ca32008-10-09 15:01:11 -04001029 rdmalen = 0;
Chuck Leverf2846482015-01-21 11:02:29 -05001030 iptr = (__be32 *)((unsigned char *)headerp +
1031 RPCRDMA_HDRLEN_MIN);
1032 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001033 status = rep->rr_len;
1034 }
Chuck Lever64695bde2016-06-29 13:54:58 -04001035
1036 r_xprt->rx_stats.fixup_copy_count +=
1037 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len,
1038 rdmalen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001039 break;
1040
Chuck Lever284f4902015-01-21 11:02:13 -05001041 case rdma_nomsg:
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001042 /* never expect read or write chunks, always reply chunks */
1043 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
1044 headerp->rm_body.rm_chunks[1] != xdr_zero ||
1045 headerp->rm_body.rm_chunks[2] != xdr_one ||
Chuck Lever9d6b0402016-06-29 13:54:16 -04001046 list_empty(&req->rl_registered))
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001047 goto badheader;
Chuck Leverf2846482015-01-21 11:02:29 -05001048 iptr = (__be32 *)((unsigned char *)headerp +
1049 RPCRDMA_HDRLEN_MIN);
Chuck Lever9d6b0402016-06-29 13:54:16 -04001050 rdmalen = rpcrdma_count_chunks(rep, 0, &iptr);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001051 if (rdmalen < 0)
1052 goto badheader;
1053 r_xprt->rx_stats.total_rdma_reply += rdmalen;
1054 /* Reply chunk buffer already is the reply vector - no fixup. */
1055 status = rdmalen;
1056 break;
1057
Chuck Lever59aa1f92016-03-04 11:28:18 -05001058 case rdma_error:
1059 goto out_rdmaerr;
1060
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001061badheader:
1062 default:
Chuck Lever9d6b0402016-06-29 13:54:16 -04001063 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1064 rqst->rq_task->tk_pid, __func__,
1065 be32_to_cpu(headerp->rm_type));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001066 status = -EIO;
1067 r_xprt->rx_stats.bad_reply_count++;
1068 break;
1069 }
1070
Chuck Lever59aa1f92016-03-04 11:28:18 -05001071out:
Chuck Lever68791642015-12-16 17:23:11 -05001072 /* Invalidate and flush the data payloads before waking the
1073 * waiting application. This guarantees the memory region is
1074 * properly fenced from the server before the application
1075 * accesses the data. It also ensures proper send flow
1076 * control: waking the next RPC waits until this RPC has
1077 * relinquished all its Send Queue entries.
1078 */
Chuck Lever9d6b0402016-06-29 13:54:16 -04001079 if (!list_empty(&req->rl_registered))
Chuck Lever68791642015-12-16 17:23:11 -05001080 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
1081
Chuck Lever68791642015-12-16 17:23:11 -05001082 spin_lock_bh(&xprt->transport_lock);
Chuck Levere7ce7102014-05-28 10:34:57 -04001083 cwnd = xprt->cwnd;
Chuck Lever23826c72016-03-04 11:28:27 -05001084 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
Chuck Levere7ce7102014-05-28 10:34:57 -04001085 if (xprt->cwnd > cwnd)
1086 xprt_release_rqst_cong(rqst->rq_task);
1087
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001088 xprt_complete_rqst(rqst->rq_task, status);
Chuck Leverfe97b472015-10-24 17:27:10 -04001089 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001090 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
1091 __func__, xprt, rqst, status);
1092 return;
1093
1094out_badstatus:
1095 rpcrdma_recv_buffer_put(rep);
1096 if (r_xprt->rx_ep.rep_connected == 1) {
1097 r_xprt->rx_ep.rep_connected = -EIO;
1098 rpcrdma_conn_func(&r_xprt->rx_ep);
1099 }
1100 return;
1101
Chuck Lever63cae472015-10-24 17:28:08 -04001102#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1103out_bcall:
1104 rpcrdma_bc_receive_call(r_xprt, rep);
1105 return;
1106#endif
1107
Chuck Lever59aa1f92016-03-04 11:28:18 -05001108/* If the incoming reply terminated a pending RPC, the next
1109 * RPC call will post a replacement receive buffer as it is
1110 * being marshaled.
1111 */
Chuck Leverb0e178a2015-10-24 17:26:54 -04001112out_badversion:
1113 dprintk("RPC: %s: invalid version %d\n",
1114 __func__, be32_to_cpu(headerp->rm_vers));
Chuck Lever59aa1f92016-03-04 11:28:18 -05001115 status = -EIO;
1116 r_xprt->rx_stats.bad_reply_count++;
1117 goto out;
1118
1119out_rdmaerr:
1120 rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
1121 switch (rmerr) {
1122 case ERR_VERS:
1123 pr_err("%s: server reports header version error (%u-%u)\n",
1124 __func__,
1125 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
1126 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
1127 break;
1128 case ERR_CHUNK:
1129 pr_err("%s: server reports header decoding error\n",
1130 __func__);
1131 break;
1132 default:
1133 pr_err("%s: server reports unknown error %d\n",
1134 __func__, rmerr);
1135 }
1136 status = -EREMOTEIO;
1137 r_xprt->rx_stats.bad_reply_count++;
1138 goto out;
1139
1140/* If no pending RPC transaction was matched, post a replacement
1141 * receive buffer before returning.
1142 */
1143out_shortreply:
1144 dprintk("RPC: %s: short/invalid reply\n", __func__);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001145 goto repost;
1146
1147out_nomatch:
Chuck Leverfe97b472015-10-24 17:27:10 -04001148 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001149 dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
1150 __func__, be32_to_cpu(headerp->rm_xid),
1151 rep->rr_len);
1152 goto repost;
1153
1154out_duplicate:
Chuck Leverfe97b472015-10-24 17:27:10 -04001155 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -04001156 dprintk("RPC: %s: "
1157 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
1158 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
1159
1160repost:
1161 r_xprt->rx_stats.bad_reply_count++;
Chuck Leverb1573802016-09-15 10:56:35 -04001162 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
Chuck Leverb0e178a2015-10-24 17:26:54 -04001163 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04001164}