blob: b7a21e5518880c0476710358cd9a75f8279e1fc3 [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * rpc_rdma.c
42 *
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040046 */
47
48#include "xprt_rdma.h"
49
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040050#include <linux/highmem.h>
51
Jeff Laytonf895b252014-11-17 16:58:04 -050052#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040053# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
Chuck Levere2377942015-03-30 14:33:53 -040056enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
Jeff Laytonf895b252014-11-17 16:58:04 -050064#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -040065static const char transfertypes[][12] = {
66 "pure inline", /* no chunks */
67 " read chunk", /* some argument via rdma read */
68 "*read chunk", /* entire request via rdma read */
69 "write chunk", /* some result via rdma write */
70 "reply chunk" /* entire reply via rdma write */
71};
72#endif
73
Chuck Lever5457ced2015-08-03 13:03:49 -040074/* The client can send a request inline as long as the RPCRDMA header
75 * plus the RPC call fit under the transport's inline limit. If the
76 * combined call message size exceeds that limit, the client must use
77 * the read chunk list for this operation.
78 */
79static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
80{
81 unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
82
83 return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
84}
85
86/* The client can't know how large the actual reply will be. Thus it
87 * plans for the largest possible reply for that particular ULP
88 * operation. If the maximum combined reply message size exceeds that
89 * limit, the client must provide a write list or a reply chunk for
90 * this request.
91 */
92static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
93{
94 unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
95
96 return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
97}
98
Chuck Lever677eb172015-08-03 13:04:17 -040099static int
100rpcrdma_tail_pullup(struct xdr_buf *buf)
101{
102 size_t tlen = buf->tail[0].iov_len;
103 size_t skip = tlen & 3;
104
105 /* Do not include the tail if it is only an XDR pad */
106 if (tlen < 4)
107 return 0;
108
109 /* xdr_write_pages() adds a pad at the beginning of the tail
110 * if the content in "buf->pages" is unaligned. Force the
111 * tail's actual content to land at the next XDR position
112 * after the head instead.
113 */
114 if (skip) {
115 unsigned char *src, *dst;
116 unsigned int count;
117
118 src = buf->tail[0].iov_base;
119 dst = buf->head[0].iov_base;
120 dst += buf->head[0].iov_len;
121
122 src += skip;
123 tlen -= skip;
124
125 dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n",
126 __func__, skip, dst, src, tlen);
127
128 for (count = tlen; count; count--)
129 *dst++ = *src++;
130 }
131
132 return tlen;
133}
134
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400135/*
136 * Chunk assembly from upper layer xdr_buf.
137 *
138 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
139 * elements. Segments are then coalesced when registered, if possible
140 * within the selected memreg mode.
Chuck Leverc93c6222014-05-28 10:35:14 -0400141 *
142 * Returns positive number of segments converted, or a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400143 */
144
145static int
Chuck Lever2a428b22007-10-26 13:30:43 -0400146rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400147 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
148{
149 int len, n = 0, p;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000150 int page_base;
151 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400152
153 if (pos == 0 && xdrbuf->head[0].iov_len) {
154 seg[n].mr_page = NULL;
155 seg[n].mr_offset = xdrbuf->head[0].iov_base;
156 seg[n].mr_len = xdrbuf->head[0].iov_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400157 ++n;
158 }
159
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000160 len = xdrbuf->page_len;
161 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
162 page_base = xdrbuf->page_base & ~PAGE_MASK;
163 p = 0;
164 while (len && n < nsegs) {
Shirley Ma196c6992014-05-28 10:34:24 -0400165 if (!ppages[p]) {
166 /* alloc the pagelist for receiving buffer */
167 ppages[p] = alloc_page(GFP_ATOMIC);
168 if (!ppages[p])
Chuck Leverc93c6222014-05-28 10:35:14 -0400169 return -ENOMEM;
Shirley Ma196c6992014-05-28 10:34:24 -0400170 }
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000171 seg[n].mr_page = ppages[p];
172 seg[n].mr_offset = (void *)(unsigned long) page_base;
173 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
Chuck Leverc93c6222014-05-28 10:35:14 -0400174 if (seg[n].mr_len > PAGE_SIZE)
175 return -EIO;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000176 len -= seg[n].mr_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400177 ++n;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000178 ++p;
179 page_base = 0; /* page offset only applies to first page */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400180 }
181
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000182 /* Message overflows the seg array */
183 if (len && n == nsegs)
Chuck Leverc93c6222014-05-28 10:35:14 -0400184 return -EIO;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000185
Chuck Lever677eb172015-08-03 13:04:17 -0400186 /* When encoding the read list, the tail is always sent inline */
187 if (type == rpcrdma_readch)
188 return n;
189
James Lentini50e10922007-12-10 11:24:48 -0500190 if (xdrbuf->tail[0].iov_len) {
Tom Talpey9191ca32008-10-09 15:01:11 -0400191 /* the rpcrdma protocol allows us to omit any trailing
192 * xdr pad bytes, saving the server an RDMA operation. */
193 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
194 return n;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400195 if (n == nsegs)
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000196 /* Tail remains, but we're out of segments */
Chuck Leverc93c6222014-05-28 10:35:14 -0400197 return -EIO;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400198 seg[n].mr_page = NULL;
199 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
200 seg[n].mr_len = xdrbuf->tail[0].iov_len;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400201 ++n;
202 }
203
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400204 return n;
205}
206
207/*
208 * Create read/write chunk lists, and reply chunks, for RDMA
209 *
210 * Assume check against THRESHOLD has been done, and chunks are required.
211 * Assume only encoding one list entry for read|write chunks. The NFSv3
212 * protocol is simple enough to allow this as it only has a single "bulk
213 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
214 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
215 *
216 * When used for a single reply chunk (which is a special write
217 * chunk used for the entire reply, rather than just the data), it
218 * is used primarily for READDIR and READLINK which would otherwise
219 * be severely size-limited by a small rdma inline read max. The server
220 * response will come back as an RDMA Write, followed by a message
221 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
222 * chunks do not provide data alignment, however they do not require
223 * "fixup" (moving the response to the upper layer buffer) either.
224 *
225 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
226 *
227 * Read chunklist (a linked list):
228 * N elements, position P (same P for all chunks of same arg!):
229 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
230 *
231 * Write chunklist (a list of (one) counted array):
232 * N elements:
233 * 1 - N - HLOO - HLOO - ... - HLOO - 0
234 *
235 * Reply chunk (a counted array):
236 * N elements:
237 * 1 - N - HLOO - HLOO - ... - HLOO
Chuck Leverc93c6222014-05-28 10:35:14 -0400238 *
239 * Returns positive RPC/RDMA header size, or negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400240 */
241
Chuck Leverc93c6222014-05-28 10:35:14 -0400242static ssize_t
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400243rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
244 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
245{
246 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
Trond Myklebusta4f08352013-01-08 09:10:21 -0500247 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Chuck Leverc93c6222014-05-28 10:35:14 -0400248 int n, nsegs, nchunks = 0;
Chuck Lever2a428b22007-10-26 13:30:43 -0400249 unsigned int pos;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400250 struct rpcrdma_mr_seg *seg = req->rl_segments;
251 struct rpcrdma_read_chunk *cur_rchunk = NULL;
252 struct rpcrdma_write_array *warray = NULL;
253 struct rpcrdma_write_chunk *cur_wchunk = NULL;
Al Viro2d8a9722007-10-29 04:37:58 +0000254 __be32 *iptr = headerp->rm_body.rm_chunks;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400255 int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400256
257 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
258 /* a read chunk - server will RDMA Read our memory */
259 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
260 } else {
261 /* a write or reply chunk - server will RDMA Write our memory */
262 *iptr++ = xdr_zero; /* encode a NULL read chunk list */
263 if (type == rpcrdma_replych)
264 *iptr++ = xdr_zero; /* a NULL write chunk list */
265 warray = (struct rpcrdma_write_array *) iptr;
266 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
267 }
268
269 if (type == rpcrdma_replych || type == rpcrdma_areadch)
270 pos = 0;
271 else
272 pos = target->head[0].iov_len;
273
274 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
Chuck Leverc93c6222014-05-28 10:35:14 -0400275 if (nsegs < 0)
276 return nsegs;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400277
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400278 map = r_xprt->rx_ia.ri_ops->ro_map;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400279 do {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400280 n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400281 if (n <= 0)
282 goto out;
283 if (cur_rchunk) { /* read */
284 cur_rchunk->rc_discrim = xdr_one;
285 /* all read chunks have the same "position" */
Chuck Lever284f4902015-01-21 11:02:13 -0500286 cur_rchunk->rc_position = cpu_to_be32(pos);
287 cur_rchunk->rc_target.rs_handle =
288 cpu_to_be32(seg->mr_rkey);
289 cur_rchunk->rc_target.rs_length =
290 cpu_to_be32(seg->mr_len);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400291 xdr_encode_hyper(
Al Viro2d8a9722007-10-29 04:37:58 +0000292 (__be32 *)&cur_rchunk->rc_target.rs_offset,
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400293 seg->mr_base);
294 dprintk("RPC: %s: read chunk "
Chuck Lever2a428b22007-10-26 13:30:43 -0400295 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700296 seg->mr_len, (unsigned long long)seg->mr_base,
297 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400298 cur_rchunk++;
299 r_xprt->rx_stats.read_chunk_count++;
300 } else { /* write/reply */
Chuck Lever284f4902015-01-21 11:02:13 -0500301 cur_wchunk->wc_target.rs_handle =
302 cpu_to_be32(seg->mr_rkey);
303 cur_wchunk->wc_target.rs_length =
304 cpu_to_be32(seg->mr_len);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400305 xdr_encode_hyper(
Al Viro2d8a9722007-10-29 04:37:58 +0000306 (__be32 *)&cur_wchunk->wc_target.rs_offset,
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400307 seg->mr_base);
308 dprintk("RPC: %s: %s chunk "
309 "elem %d@0x%llx:0x%x (%s)\n", __func__,
310 (type == rpcrdma_replych) ? "reply" : "write",
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700311 seg->mr_len, (unsigned long long)seg->mr_base,
312 seg->mr_rkey, n < nsegs ? "more" : "last");
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400313 cur_wchunk++;
314 if (type == rpcrdma_replych)
315 r_xprt->rx_stats.reply_chunk_count++;
316 else
317 r_xprt->rx_stats.write_chunk_count++;
318 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
319 }
320 nchunks++;
321 seg += n;
322 nsegs -= n;
323 } while (nsegs);
324
325 /* success. all failures return above */
326 req->rl_nchunks = nchunks;
327
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400328 /*
329 * finish off header. If write, marshal discrim and nchunks.
330 */
331 if (cur_rchunk) {
Al Viro2d8a9722007-10-29 04:37:58 +0000332 iptr = (__be32 *) cur_rchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400333 *iptr++ = xdr_zero; /* finish the read chunk list */
334 *iptr++ = xdr_zero; /* encode a NULL write chunk list */
335 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
336 } else {
337 warray->wc_discrim = xdr_one;
Chuck Lever284f4902015-01-21 11:02:13 -0500338 warray->wc_nchunks = cpu_to_be32(nchunks);
Al Viro2d8a9722007-10-29 04:37:58 +0000339 iptr = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400340 if (type == rpcrdma_writech) {
341 *iptr++ = xdr_zero; /* finish the write chunk list */
342 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
343 }
344 }
345
346 /*
347 * Return header size.
348 */
349 return (unsigned char *)iptr - (unsigned char *)headerp;
350
351out:
Chuck Lever6814bae2015-03-30 14:34:48 -0400352 for (pos = 0; nchunks--;)
353 pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
354 &req->rl_segments[pos]);
Chuck Leverc93c6222014-05-28 10:35:14 -0400355 return n;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400356}
357
358/*
359 * Copy write data inline.
360 * This function is used for "small" requests. Data which is passed
361 * to RPC via iovecs (or page list) is copied directly into the
362 * pre-registered memory buffer for this request. For small amounts
363 * of data, this is efficient. The cutoff value is tunable.
364 */
Chuck Leverb3221d62015-08-03 13:03:39 -0400365static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400366{
367 int i, npages, curlen;
368 int copy_len;
369 unsigned char *srcp, *destp;
370 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000371 int page_base;
372 struct page **ppages;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400373
374 destp = rqst->rq_svec[0].iov_base;
375 curlen = rqst->rq_svec[0].iov_len;
376 destp += curlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400377
Chuck Leverb3221d62015-08-03 13:03:39 -0400378 dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n",
379 __func__, destp, rqst->rq_slen, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400380
381 copy_len = rqst->rq_snd_buf.page_len;
Tom Talpeyb38ab402009-03-11 14:37:55 -0400382
383 if (rqst->rq_snd_buf.tail[0].iov_len) {
384 curlen = rqst->rq_snd_buf.tail[0].iov_len;
385 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
386 memmove(destp + copy_len,
387 rqst->rq_snd_buf.tail[0].iov_base, curlen);
388 r_xprt->rx_stats.pullup_copy_count += curlen;
389 }
390 dprintk("RPC: %s: tail destp 0x%p len %d\n",
391 __func__, destp + copy_len, curlen);
392 rqst->rq_svec[0].iov_len += curlen;
393 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400394 r_xprt->rx_stats.pullup_copy_count += copy_len;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000395
396 page_base = rqst->rq_snd_buf.page_base;
397 ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
398 page_base &= ~PAGE_MASK;
399 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400400 for (i = 0; copy_len && i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000401 curlen = PAGE_SIZE - page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400402 if (curlen > copy_len)
403 curlen = copy_len;
404 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
405 __func__, i, destp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800406 srcp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000407 memcpy(destp, srcp+page_base, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800408 kunmap_atomic(srcp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400409 rqst->rq_svec[0].iov_len += curlen;
410 destp += curlen;
411 copy_len -= curlen;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000412 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400413 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400414 /* header now contains entire send message */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400415}
416
417/*
418 * Marshal a request: the primary job of this routine is to choose
419 * the transfer modes. See comments below.
420 *
421 * Uses multiple RDMA IOVs for a request:
422 * [0] -- RPC RDMA header, which uses memory from the *start* of the
423 * preregistered buffer that already holds the RPC data in
424 * its middle.
425 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
426 * [2] -- optional padding.
427 * [3] -- if padded, header only in [1] and data here.
Chuck Leverc93c6222014-05-28 10:35:14 -0400428 *
429 * Returns zero on success, otherwise a negative errno.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400430 */
431
432int
433rpcrdma_marshal_req(struct rpc_rqst *rqst)
434{
Trond Myklebusta4f08352013-01-08 09:10:21 -0500435 struct rpc_xprt *xprt = rqst->rq_xprt;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400436 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
437 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
438 char *base;
Chuck Leverb3221d62015-08-03 13:03:39 -0400439 size_t rpclen;
Chuck Leverc93c6222014-05-28 10:35:14 -0400440 ssize_t hdrlen;
Chuck Levere2377942015-03-30 14:33:53 -0400441 enum rpcrdma_chunktype rtype, wtype;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400442 struct rpcrdma_msg *headerp;
443
Chuck Lever83128a62015-10-24 17:27:59 -0400444#if defined(CONFIG_SUNRPC_BACKCHANNEL)
445 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
446 return rpcrdma_bc_marshal_reply(rqst);
447#endif
448
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400449 /*
450 * rpclen gets amount of data in first buffer, which is the
451 * pre-registered buffer.
452 */
453 base = rqst->rq_svec[0].iov_base;
454 rpclen = rqst->rq_svec[0].iov_len;
455
Chuck Lever85275c82015-01-21 11:04:16 -0500456 headerp = rdmab_to_msg(req->rl_rdmabuf);
Chuck Lever284f4902015-01-21 11:02:13 -0500457 /* don't byte-swap XID, it's already done in request */
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400458 headerp->rm_xid = rqst->rq_xid;
Chuck Lever284f4902015-01-21 11:02:13 -0500459 headerp->rm_vers = rpcrdma_version;
460 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
461 headerp->rm_type = rdma_msg;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400462
463 /*
464 * Chunks needed for results?
465 *
Chuck Lever02eb57d82015-08-03 13:03:58 -0400466 * o Read ops return data as write chunk(s), header as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400467 * o If the expected result is under the inline threshold, all ops
Chuck Lever33943b22015-08-03 13:04:08 -0400468 * return as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400469 * o Large non-read ops return as a single reply chunk.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400470 */
Chuck Lever02eb57d82015-08-03 13:03:58 -0400471 if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
Chuck Levere2377942015-03-30 14:33:53 -0400472 wtype = rpcrdma_writech;
Chuck Lever02eb57d82015-08-03 13:03:58 -0400473 else if (rpcrdma_results_inline(rqst))
474 wtype = rpcrdma_noch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400475 else
Chuck Levere2377942015-03-30 14:33:53 -0400476 wtype = rpcrdma_replych;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400477
478 /*
479 * Chunks needed for arguments?
480 *
481 * o If the total request is under the inline threshold, all ops
482 * are sent as inline.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400483 * o Large write ops transmit data as read chunk(s), header as
484 * inline.
Chuck Lever2fcc2132015-08-03 13:04:26 -0400485 * o Large non-write ops are sent with the entire message as a
486 * single read chunk (protocol 0-position special case).
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400487 *
Chuck Lever2fcc2132015-08-03 13:04:26 -0400488 * This assumes that the upper layer does not present a request
489 * that both has a data payload, and whose non-data arguments
490 * by themselves are larger than the inline threshold.
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400491 */
Chuck Lever2fcc2132015-08-03 13:04:26 -0400492 if (rpcrdma_args_inline(rqst)) {
Chuck Levere2377942015-03-30 14:33:53 -0400493 rtype = rpcrdma_noch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400494 } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
Chuck Levere2377942015-03-30 14:33:53 -0400495 rtype = rpcrdma_readch;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400496 } else {
Chuck Lever860477d2015-08-03 13:04:45 -0400497 r_xprt->rx_stats.nomsg_call_count++;
Chuck Lever2fcc2132015-08-03 13:04:26 -0400498 headerp->rm_type = htonl(RDMA_NOMSG);
499 rtype = rpcrdma_areadch;
500 rpclen = 0;
501 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400502
503 /* The following simplification is not true forever */
Chuck Levere2377942015-03-30 14:33:53 -0400504 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
505 wtype = rpcrdma_noch;
506 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
Chuck Leverc93c6222014-05-28 10:35:14 -0400507 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
508 __func__);
509 return -EIO;
510 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400511
Chuck Leverf2846482015-01-21 11:02:29 -0500512 hdrlen = RPCRDMA_HDRLEN_MIN;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400513
514 /*
515 * Pull up any extra send data into the preregistered buffer.
516 * When padding is in use and applies to the transfer, insert
517 * it and change the message type.
518 */
Chuck Levere2377942015-03-30 14:33:53 -0400519 if (rtype == rpcrdma_noch) {
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400520
Chuck Leverb3221d62015-08-03 13:03:39 -0400521 rpcrdma_inline_pullup(rqst);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400522
Chuck Leverb3221d62015-08-03 13:03:39 -0400523 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
524 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
525 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
526 /* new length after pullup */
527 rpclen = rqst->rq_svec[0].iov_len;
Chuck Lever677eb172015-08-03 13:04:17 -0400528 } else if (rtype == rpcrdma_readch)
529 rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
Chuck Levere2377942015-03-30 14:33:53 -0400530 if (rtype != rpcrdma_noch) {
531 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
532 headerp, rtype);
533 wtype = rtype; /* simplify dprintk */
534
535 } else if (wtype != rpcrdma_noch) {
536 hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
537 headerp, wtype);
538 }
Chuck Leverc93c6222014-05-28 10:35:14 -0400539 if (hdrlen < 0)
540 return hdrlen;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400541
Chuck Leverb3221d62015-08-03 13:03:39 -0400542 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
Tom Talpey5f37d562008-10-09 15:01:52 -0400543 " headerp 0x%p base 0x%p lkey 0x%x\n",
Chuck Leverb3221d62015-08-03 13:03:39 -0400544 __func__, transfertypes[wtype], hdrlen, rpclen,
Chuck Lever85275c82015-01-21 11:04:16 -0500545 headerp, base, rdmab_lkey(req->rl_rdmabuf));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400546
547 /*
548 * initialize send_iov's - normally only two: rdma chunk header and
549 * single preregistered RPC header buffer, but if padding is present,
550 * then use a preregistered (and zeroed) pad buffer between the RPC
551 * header and any write data. In all non-rdma cases, any following
552 * data has been copied into the RPC header buffer.
553 */
Chuck Lever85275c82015-01-21 11:04:16 -0500554 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400555 req->rl_send_iov[0].length = hdrlen;
Chuck Lever85275c82015-01-21 11:04:16 -0500556 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400557
Chuck Lever2fcc2132015-08-03 13:04:26 -0400558 req->rl_niovs = 1;
559 if (rtype == rpcrdma_areadch)
560 return 0;
561
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500562 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400563 req->rl_send_iov[1].length = rpclen;
Chuck Lever0ca77dc2015-01-21 11:04:08 -0500564 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400565
566 req->rl_niovs = 2;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400567 return 0;
568}
569
570/*
571 * Chase down a received write or reply chunklist to get length
572 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
573 */
574static int
Chuck Leverd4b37ff2007-10-26 13:30:49 -0400575rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400576{
577 unsigned int i, total_len;
578 struct rpcrdma_write_chunk *cur_wchunk;
Chuck Lever6b1184c2015-01-21 11:04:25 -0500579 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400580
Chuck Lever284f4902015-01-21 11:02:13 -0500581 i = be32_to_cpu(**iptrp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400582 if (i > max)
583 return -1;
584 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
585 total_len = 0;
586 while (i--) {
587 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
588 ifdebug(FACILITY) {
589 u64 off;
Al Viro2d8a9722007-10-29 04:37:58 +0000590 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400591 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
592 __func__,
Chuck Lever284f4902015-01-21 11:02:13 -0500593 be32_to_cpu(seg->rs_length),
Stephen Rothwelle08a1322007-10-30 00:44:32 -0700594 (unsigned long long)off,
Chuck Lever284f4902015-01-21 11:02:13 -0500595 be32_to_cpu(seg->rs_handle));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400596 }
Chuck Lever284f4902015-01-21 11:02:13 -0500597 total_len += be32_to_cpu(seg->rs_length);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400598 ++cur_wchunk;
599 }
600 /* check and adjust for properly terminated write chunk */
601 if (wrchunk) {
Al Viro2d8a9722007-10-29 04:37:58 +0000602 __be32 *w = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400603 if (*w++ != xdr_zero)
604 return -1;
605 cur_wchunk = (struct rpcrdma_write_chunk *) w;
606 }
Chuck Lever6b1184c2015-01-21 11:04:25 -0500607 if ((char *)cur_wchunk > base + rep->rr_len)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400608 return -1;
609
Al Viro2d8a9722007-10-29 04:37:58 +0000610 *iptrp = (__be32 *) cur_wchunk;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400611 return total_len;
612}
613
614/*
615 * Scatter inline received data back into provided iov's.
616 */
617static void
Tom Talpey9191ca32008-10-09 15:01:11 -0400618rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400619{
620 int i, npages, curlen, olen;
621 char *destp;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000622 struct page **ppages;
623 int page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400624
625 curlen = rqst->rq_rcv_buf.head[0].iov_len;
626 if (curlen > copy_len) { /* write chunk header fixup */
627 curlen = copy_len;
628 rqst->rq_rcv_buf.head[0].iov_len = curlen;
629 }
630
631 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
632 __func__, srcp, copy_len, curlen);
633
634 /* Shift pointer for first receive segment only */
635 rqst->rq_rcv_buf.head[0].iov_base = srcp;
636 srcp += curlen;
637 copy_len -= curlen;
638
639 olen = copy_len;
640 i = 0;
641 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000642 page_base = rqst->rq_rcv_buf.page_base;
643 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
644 page_base &= ~PAGE_MASK;
645
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400646 if (copy_len && rqst->rq_rcv_buf.page_len) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000647 npages = PAGE_ALIGN(page_base +
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400648 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
649 for (; i < npages; i++) {
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000650 curlen = PAGE_SIZE - page_base;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400651 if (curlen > copy_len)
652 curlen = copy_len;
653 dprintk("RPC: %s: page %d"
654 " srcp 0x%p len %d curlen %d\n",
655 __func__, i, srcp, copy_len, curlen);
Cong Wangb8541782011-11-25 23:14:40 +0800656 destp = kmap_atomic(ppages[i]);
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000657 memcpy(destp + page_base, srcp, curlen);
658 flush_dcache_page(ppages[i]);
Cong Wangb8541782011-11-25 23:14:40 +0800659 kunmap_atomic(destp);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400660 srcp += curlen;
661 copy_len -= curlen;
662 if (copy_len == 0)
663 break;
Tom Tuckerbd7ea312011-02-09 19:45:28 +0000664 page_base = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400665 }
Chuck Lever2b7bbc92014-03-12 12:51:30 -0400666 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400667
668 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
669 curlen = copy_len;
670 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
671 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
672 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
Tom Talpeyb38ab402009-03-11 14:37:55 -0400673 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400674 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
675 __func__, srcp, copy_len, curlen);
676 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
677 copy_len -= curlen; ++i;
678 } else
679 rqst->rq_rcv_buf.tail[0].iov_len = 0;
680
Tom Talpey9191ca32008-10-09 15:01:11 -0400681 if (pad) {
682 /* implicit padding on terminal chunk */
683 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
684 while (pad--)
685 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
686 }
687
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400688 if (copy_len)
689 dprintk("RPC: %s: %d bytes in"
690 " %d extra segments (%d lost)\n",
691 __func__, olen, i, copy_len);
692
693 /* TBD avoid a warning from call_decode() */
694 rqst->rq_private_buf = rqst->rq_rcv_buf;
695}
696
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400697void
Chuck Lever254f91e2014-05-28 10:32:17 -0400698rpcrdma_connect_worker(struct work_struct *work)
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400699{
Chuck Lever254f91e2014-05-28 10:32:17 -0400700 struct rpcrdma_ep *ep =
701 container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
Chuck Leverafadc462015-01-21 11:03:11 -0500702 struct rpcrdma_xprt *r_xprt =
703 container_of(ep, struct rpcrdma_xprt, rx_ep);
704 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400705
706 spin_lock_bh(&xprt->transport_lock);
Tom Talpey575448b2008-10-09 15:00:40 -0400707 if (++xprt->connect_cookie == 0) /* maintain a reserved value */
708 ++xprt->connect_cookie;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400709 if (ep->rep_connected > 0) {
710 if (!xprt_test_and_set_connected(xprt))
711 xprt_wake_pending_tasks(xprt, 0);
712 } else {
713 if (xprt_test_and_clear_connected(xprt))
Tom Talpey926449b2008-10-09 15:01:21 -0400714 xprt_wake_pending_tasks(xprt, -ENOTCONN);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400715 }
716 spin_unlock_bh(&xprt->transport_lock);
717}
718
719/*
Chuck Lever254f91e2014-05-28 10:32:17 -0400720 * This function is called when an async event is posted to
721 * the connection which changes the connection state. All it
722 * does at this point is mark the connection up/down, the rpc
723 * timers do the rest.
724 */
725void
726rpcrdma_conn_func(struct rpcrdma_ep *ep)
727{
728 schedule_delayed_work(&ep->rep_connect_worker, 0);
729}
730
Chuck Leverfe97b472015-10-24 17:27:10 -0400731/* Process received RPC/RDMA messages.
732 *
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400733 * Errors must result in the RPC task either being awakened, or
734 * allowed to timeout, to discover the errors at that time.
735 */
736void
737rpcrdma_reply_handler(struct rpcrdma_rep *rep)
738{
739 struct rpcrdma_msg *headerp;
740 struct rpcrdma_req *req;
741 struct rpc_rqst *rqst;
Chuck Leverfed171b2015-05-26 11:51:37 -0400742 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
743 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
Al Viro2d8a9722007-10-29 04:37:58 +0000744 __be32 *iptr;
Chuck Lever9b1dcbc2015-02-12 10:14:51 -0500745 int rdmalen, status;
Chuck Levere7ce7102014-05-28 10:34:57 -0400746 unsigned long cwnd;
Chuck Lever9b1dcbc2015-02-12 10:14:51 -0500747 u32 credits;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400748
Chuck Leverb0e178a2015-10-24 17:26:54 -0400749 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
750
751 if (rep->rr_len == RPCRDMA_BAD_LEN)
752 goto out_badstatus;
753 if (rep->rr_len < RPCRDMA_HDRLEN_MIN)
754 goto out_shortreply;
755
Chuck Lever6b1184c2015-01-21 11:04:25 -0500756 headerp = rdmab_to_msg(rep->rr_rdmabuf);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400757 if (headerp->rm_vers != rpcrdma_version)
758 goto out_badversion;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400759
Chuck Leverfe97b472015-10-24 17:27:10 -0400760 /* Match incoming rpcrdma_rep to an rpcrdma_req to
761 * get context for handling any incoming chunks.
762 */
763 spin_lock_bh(&xprt->transport_lock);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400764 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400765 if (!rqst)
766 goto out_nomatch;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400767
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400768 req = rpcr_to_rdmar(rqst);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400769 if (req->rl_reply)
770 goto out_duplicate;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400771
772 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
773 " RPC request 0x%p xid 0x%08x\n",
Chuck Lever052151a2015-01-21 11:02:21 -0500774 __func__, rep, req, rqst,
775 be32_to_cpu(headerp->rm_xid));
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400776
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400777 /* from here on, the reply is no longer an orphan */
778 req->rl_reply = rep;
Chuck Lever18906972014-05-28 10:34:41 -0400779 xprt->reestablish_timeout = 0;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400780
781 /* check for expected message types */
782 /* The order of some of these tests is important. */
783 switch (headerp->rm_type) {
Chuck Lever284f4902015-01-21 11:02:13 -0500784 case rdma_msg:
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400785 /* never expect read chunks */
786 /* never expect reply chunks (two ways to check) */
787 /* never expect write chunks without having offered RDMA */
788 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
789 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
790 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
791 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
792 req->rl_nchunks == 0))
793 goto badheader;
794 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
795 /* count any expected write chunks in read reply */
796 /* start at write chunk array count */
797 iptr = &headerp->rm_body.rm_chunks[2];
798 rdmalen = rpcrdma_count_chunks(rep,
799 req->rl_nchunks, 1, &iptr);
800 /* check for validity, and no reply chunk after */
801 if (rdmalen < 0 || *iptr++ != xdr_zero)
802 goto badheader;
803 rep->rr_len -=
804 ((unsigned char *)iptr - (unsigned char *)headerp);
805 status = rep->rr_len + rdmalen;
806 r_xprt->rx_stats.total_rdma_reply += rdmalen;
Tom Talpey9191ca32008-10-09 15:01:11 -0400807 /* special case - last chunk may omit padding */
808 if (rdmalen &= 3) {
809 rdmalen = 4 - rdmalen;
810 status += rdmalen;
811 }
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400812 } else {
813 /* else ordinary inline */
Tom Talpey9191ca32008-10-09 15:01:11 -0400814 rdmalen = 0;
Chuck Leverf2846482015-01-21 11:02:29 -0500815 iptr = (__be32 *)((unsigned char *)headerp +
816 RPCRDMA_HDRLEN_MIN);
817 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400818 status = rep->rr_len;
819 }
820 /* Fix up the rpc results for upper layer */
Tom Talpey9191ca32008-10-09 15:01:11 -0400821 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400822 break;
823
Chuck Lever284f4902015-01-21 11:02:13 -0500824 case rdma_nomsg:
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400825 /* never expect read or write chunks, always reply chunks */
826 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
827 headerp->rm_body.rm_chunks[1] != xdr_zero ||
828 headerp->rm_body.rm_chunks[2] != xdr_one ||
829 req->rl_nchunks == 0)
830 goto badheader;
Chuck Leverf2846482015-01-21 11:02:29 -0500831 iptr = (__be32 *)((unsigned char *)headerp +
832 RPCRDMA_HDRLEN_MIN);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400833 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
834 if (rdmalen < 0)
835 goto badheader;
836 r_xprt->rx_stats.total_rdma_reply += rdmalen;
837 /* Reply chunk buffer already is the reply vector - no fixup. */
838 status = rdmalen;
839 break;
840
841badheader:
842 default:
843 dprintk("%s: invalid rpcrdma reply header (type %d):"
844 " chunks[012] == %d %d %d"
845 " expected chunks <= %d\n",
Chuck Lever284f4902015-01-21 11:02:13 -0500846 __func__, be32_to_cpu(headerp->rm_type),
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400847 headerp->rm_body.rm_chunks[0],
848 headerp->rm_body.rm_chunks[1],
849 headerp->rm_body.rm_chunks[2],
850 req->rl_nchunks);
851 status = -EIO;
852 r_xprt->rx_stats.bad_reply_count++;
853 break;
854 }
855
Chuck Levereba8ff62015-01-21 11:03:02 -0500856 credits = be32_to_cpu(headerp->rm_credit);
857 if (credits == 0)
858 credits = 1; /* don't deadlock */
859 else if (credits > r_xprt->rx_buf.rb_max_requests)
860 credits = r_xprt->rx_buf.rb_max_requests;
861
Chuck Levere7ce7102014-05-28 10:34:57 -0400862 cwnd = xprt->cwnd;
Chuck Levereba8ff62015-01-21 11:03:02 -0500863 xprt->cwnd = credits << RPC_CWNDSHIFT;
Chuck Levere7ce7102014-05-28 10:34:57 -0400864 if (xprt->cwnd > cwnd)
865 xprt_release_rqst_cong(rqst->rq_task);
866
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400867 xprt_complete_rqst(rqst->rq_task, status);
Chuck Leverfe97b472015-10-24 17:27:10 -0400868 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400869 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
870 __func__, xprt, rqst, status);
871 return;
872
873out_badstatus:
874 rpcrdma_recv_buffer_put(rep);
875 if (r_xprt->rx_ep.rep_connected == 1) {
876 r_xprt->rx_ep.rep_connected = -EIO;
877 rpcrdma_conn_func(&r_xprt->rx_ep);
878 }
879 return;
880
881out_shortreply:
882 dprintk("RPC: %s: short/invalid reply\n", __func__);
883 goto repost;
884
885out_badversion:
886 dprintk("RPC: %s: invalid version %d\n",
887 __func__, be32_to_cpu(headerp->rm_vers));
888 goto repost;
889
890out_nomatch:
Chuck Leverfe97b472015-10-24 17:27:10 -0400891 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400892 dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
893 __func__, be32_to_cpu(headerp->rm_xid),
894 rep->rr_len);
895 goto repost;
896
897out_duplicate:
Chuck Leverfe97b472015-10-24 17:27:10 -0400898 spin_unlock_bh(&xprt->transport_lock);
Chuck Leverb0e178a2015-10-24 17:26:54 -0400899 dprintk("RPC: %s: "
900 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
901 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
902
903repost:
904 r_xprt->rx_stats.bad_reply_count++;
905 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
906 rpcrdma_recv_buffer_put(rep);
\"Talpey, Thomas\e9601822007-09-10 13:50:42 -0400907}