\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 1 | /* |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the BSD-type |
| 8 | * license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * |
| 14 | * Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * |
| 17 | * Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials provided |
| 20 | * with the distribution. |
| 21 | * |
| 22 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 23 | * its contributors may be used to endorse or promote products |
| 24 | * derived from this software without specific prior written |
| 25 | * permission. |
| 26 | * |
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 38 | */ |
| 39 | |
| 40 | /* |
| 41 | * rpc_rdma.c |
| 42 | * |
| 43 | * This file contains the guts of the RPC RDMA protocol, and |
| 44 | * does marshaling/unmarshaling, etc. It is also where interfacing |
| 45 | * to the Linux RPC framework lives. |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 46 | */ |
| 47 | |
| 48 | #include "xprt_rdma.h" |
| 49 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 50 | #include <linux/highmem.h> |
| 51 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 52 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 53 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 54 | #endif |
| 55 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 56 | static const char transfertypes[][12] = { |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 57 | "inline", /* no chunks */ |
| 58 | "read list", /* some argument via rdma read */ |
| 59 | "*read list", /* entire request via rdma read */ |
| 60 | "write list", /* some result via rdma write */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 61 | "reply chunk" /* entire reply via rdma write */ |
| 62 | }; |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 63 | |
| 64 | /* Returns size of largest RPC-over-RDMA header in a Call message |
| 65 | * |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 66 | * The largest Call header contains a full-size Read list and a |
| 67 | * minimal Reply chunk. |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 68 | */ |
| 69 | static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) |
| 70 | { |
| 71 | unsigned int size; |
| 72 | |
| 73 | /* Fixed header fields and list discriminators */ |
| 74 | size = RPCRDMA_HDRLEN_MIN; |
| 75 | |
| 76 | /* Maximum Read list size */ |
| 77 | maxsegs += 2; /* segment for head and tail buffers */ |
| 78 | size = maxsegs * sizeof(struct rpcrdma_read_chunk); |
| 79 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 80 | /* Minimal Read chunk size */ |
| 81 | size += sizeof(__be32); /* segment count */ |
| 82 | size += sizeof(struct rpcrdma_segment); |
| 83 | size += sizeof(__be32); /* list discriminator */ |
| 84 | |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 85 | dprintk("RPC: %s: max call header size = %u\n", |
| 86 | __func__, size); |
| 87 | return size; |
| 88 | } |
| 89 | |
| 90 | /* Returns size of largest RPC-over-RDMA header in a Reply message |
| 91 | * |
| 92 | * There is only one Write list or one Reply chunk per Reply |
| 93 | * message. The larger list is the Write list. |
| 94 | */ |
| 95 | static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) |
| 96 | { |
| 97 | unsigned int size; |
| 98 | |
| 99 | /* Fixed header fields and list discriminators */ |
| 100 | size = RPCRDMA_HDRLEN_MIN; |
| 101 | |
| 102 | /* Maximum Write list size */ |
| 103 | maxsegs += 2; /* segment for head and tail buffers */ |
| 104 | size = sizeof(__be32); /* segment count */ |
| 105 | size += maxsegs * sizeof(struct rpcrdma_segment); |
| 106 | size += sizeof(__be32); /* list discriminator */ |
| 107 | |
| 108 | dprintk("RPC: %s: max reply header size = %u\n", |
| 109 | __func__, size); |
| 110 | return size; |
| 111 | } |
| 112 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 113 | void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 114 | { |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 115 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
| 116 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 117 | unsigned int maxsegs = ia->ri_max_segs; |
| 118 | |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 119 | ia->ri_max_inline_write = cdata->inline_wsize - |
| 120 | rpcrdma_max_call_header_size(maxsegs); |
| 121 | ia->ri_max_inline_read = cdata->inline_rsize - |
| 122 | rpcrdma_max_reply_header_size(maxsegs); |
| 123 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 124 | |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 125 | /* The client can send a request inline as long as the RPCRDMA header |
| 126 | * plus the RPC call fit under the transport's inline limit. If the |
| 127 | * combined call message size exceeds that limit, the client must use |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 128 | * a Read chunk for this operation. |
| 129 | * |
| 130 | * A Read chunk is also required if sending the RPC call inline would |
| 131 | * exceed this device's max_sge limit. |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 132 | */ |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 133 | static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, |
| 134 | struct rpc_rqst *rqst) |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 135 | { |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 136 | struct xdr_buf *xdr = &rqst->rq_snd_buf; |
| 137 | unsigned int count, remaining, offset; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 138 | |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 139 | if (xdr->len > r_xprt->rx_ia.ri_max_inline_write) |
| 140 | return false; |
| 141 | |
| 142 | if (xdr->page_len) { |
| 143 | remaining = xdr->page_len; |
Chuck Lever | d933cc3 | 2017-06-08 11:53:16 -0400 | [diff] [blame] | 144 | offset = offset_in_page(xdr->page_base); |
Chuck Lever | 16f906d | 2017-02-08 17:00:10 -0500 | [diff] [blame] | 145 | count = 0; |
| 146 | while (remaining) { |
| 147 | remaining -= min_t(unsigned int, |
| 148 | PAGE_SIZE - offset, remaining); |
| 149 | offset = 0; |
| 150 | if (++count > r_xprt->rx_ia.ri_max_send_sges) |
| 151 | return false; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | return true; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /* The client can't know how large the actual reply will be. Thus it |
| 159 | * plans for the largest possible reply for that particular ULP |
| 160 | * operation. If the maximum combined reply message size exceeds that |
| 161 | * limit, the client must provide a write list or a reply chunk for |
| 162 | * this request. |
| 163 | */ |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 164 | static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, |
| 165 | struct rpc_rqst *rqst) |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 166 | { |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 167 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 168 | |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 169 | return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 170 | } |
| 171 | |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 172 | /* Split "vec" on page boundaries into segments. FMR registers pages, |
| 173 | * not a byte range. Other modes coalesce these segments into a single |
| 174 | * MR when they can. |
| 175 | */ |
| 176 | static int |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 177 | rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 178 | { |
| 179 | size_t page_offset; |
| 180 | u32 remaining; |
| 181 | char *base; |
| 182 | |
| 183 | base = vec->iov_base; |
| 184 | page_offset = offset_in_page(base); |
| 185 | remaining = vec->iov_len; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 186 | while (remaining && n < RPCRDMA_MAX_SEGS) { |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 187 | seg[n].mr_page = NULL; |
| 188 | seg[n].mr_offset = base; |
| 189 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining); |
| 190 | remaining -= seg[n].mr_len; |
| 191 | base += seg[n].mr_len; |
| 192 | ++n; |
| 193 | page_offset = 0; |
| 194 | } |
| 195 | return n; |
| 196 | } |
| 197 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 198 | /* |
| 199 | * Chunk assembly from upper layer xdr_buf. |
| 200 | * |
| 201 | * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk |
| 202 | * elements. Segments are then coalesced when registered, if possible |
| 203 | * within the selected memreg mode. |
Chuck Lever | c93c622 | 2014-05-28 10:35:14 -0400 | [diff] [blame] | 204 | * |
| 205 | * Returns positive number of segments converted, or a negative errno. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 206 | */ |
| 207 | |
| 208 | static int |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 209 | rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, |
| 210 | unsigned int pos, enum rpcrdma_chunktype type, |
| 211 | struct rpcrdma_mr_seg *seg) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 212 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 213 | int len, n, p, page_base; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 214 | struct page **ppages; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 215 | |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 216 | n = 0; |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 217 | if (pos == 0) { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 218 | n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n); |
| 219 | if (n == RPCRDMA_MAX_SEGS) |
| 220 | goto out_overflow; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 221 | } |
| 222 | |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 223 | len = xdrbuf->page_len; |
| 224 | ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); |
Chuck Lever | d933cc3 | 2017-06-08 11:53:16 -0400 | [diff] [blame] | 225 | page_base = offset_in_page(xdrbuf->page_base); |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 226 | p = 0; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 227 | while (len && n < RPCRDMA_MAX_SEGS) { |
Shirley Ma | 196c699 | 2014-05-28 10:34:24 -0400 | [diff] [blame] | 228 | if (!ppages[p]) { |
| 229 | /* alloc the pagelist for receiving buffer */ |
| 230 | ppages[p] = alloc_page(GFP_ATOMIC); |
| 231 | if (!ppages[p]) |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 232 | return -EAGAIN; |
Shirley Ma | 196c699 | 2014-05-28 10:34:24 -0400 | [diff] [blame] | 233 | } |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 234 | seg[n].mr_page = ppages[p]; |
| 235 | seg[n].mr_offset = (void *)(unsigned long) page_base; |
| 236 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); |
Chuck Lever | c93c622 | 2014-05-28 10:35:14 -0400 | [diff] [blame] | 237 | if (seg[n].mr_len > PAGE_SIZE) |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 238 | goto out_overflow; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 239 | len -= seg[n].mr_len; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 240 | ++n; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 241 | ++p; |
| 242 | page_base = 0; /* page offset only applies to first page */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 243 | } |
| 244 | |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 245 | /* Message overflows the seg array */ |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 246 | if (len && n == RPCRDMA_MAX_SEGS) |
| 247 | goto out_overflow; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 248 | |
Chuck Lever | 24abdf1 | 2017-02-08 16:59:46 -0500 | [diff] [blame] | 249 | /* When encoding a Read chunk, the tail iovec contains an |
| 250 | * XDR pad and may be omitted. |
| 251 | */ |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 252 | if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) |
Chuck Lever | 677eb17 | 2015-08-03 13:04:17 -0400 | [diff] [blame] | 253 | return n; |
| 254 | |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 255 | /* When encoding a Write chunk, some servers need to see an |
| 256 | * extra segment for non-XDR-aligned Write chunks. The upper |
| 257 | * layer provides space in the tail iovec that may be used |
| 258 | * for this purpose. |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 259 | */ |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 260 | if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 261 | return n; |
| 262 | |
James Lentini | 50e1092 | 2007-12-10 11:24:48 -0500 | [diff] [blame] | 263 | if (xdrbuf->tail[0].iov_len) { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 264 | n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); |
| 265 | if (n == RPCRDMA_MAX_SEGS) |
| 266 | goto out_overflow; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 267 | } |
| 268 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 269 | return n; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 270 | |
| 271 | out_overflow: |
| 272 | pr_err("rpcrdma: segment array overflow\n"); |
| 273 | return -EIO; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 274 | } |
| 275 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 276 | static inline __be32 * |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 277 | xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 278 | { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 279 | *iptr++ = cpu_to_be32(mw->mw_handle); |
| 280 | *iptr++ = cpu_to_be32(mw->mw_length); |
| 281 | return xdr_encode_hyper(iptr, mw->mw_offset); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | /* XDR-encode the Read list. Supports encoding a list of read |
| 285 | * segments that belong to a single read chunk. |
| 286 | * |
| 287 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): |
| 288 | * |
| 289 | * Read chunklist (a linked list): |
| 290 | * N elements, position P (same P for all chunks of same arg!): |
| 291 | * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 |
| 292 | * |
| 293 | * Returns a pointer to the XDR word in the RDMA header following |
| 294 | * the end of the Read list, or an error pointer. |
| 295 | */ |
| 296 | static __be32 * |
| 297 | rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, |
| 298 | struct rpcrdma_req *req, struct rpc_rqst *rqst, |
| 299 | __be32 *iptr, enum rpcrdma_chunktype rtype) |
| 300 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 301 | struct rpcrdma_mr_seg *seg; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 302 | struct rpcrdma_mw *mw; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 303 | unsigned int pos; |
| 304 | int n, nsegs; |
| 305 | |
| 306 | if (rtype == rpcrdma_noch) { |
| 307 | *iptr++ = xdr_zero; /* item not present */ |
| 308 | return iptr; |
| 309 | } |
| 310 | |
| 311 | pos = rqst->rq_snd_buf.head[0].iov_len; |
| 312 | if (rtype == rpcrdma_areadch) |
| 313 | pos = 0; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 314 | seg = req->rl_segments; |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 315 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, |
| 316 | rtype, seg); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 317 | if (nsegs < 0) |
| 318 | return ERR_PTR(nsegs); |
| 319 | |
| 320 | do { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 321 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
| 322 | false, &mw); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 323 | if (n < 0) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 324 | return ERR_PTR(n); |
Chuck Lever | 9a5c63e | 2017-02-08 17:00:43 -0500 | [diff] [blame] | 325 | rpcrdma_push_mw(mw, &req->rl_registered); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 326 | |
| 327 | *iptr++ = xdr_one; /* item present */ |
| 328 | |
| 329 | /* All read segments in this chunk |
| 330 | * have the same "position". |
| 331 | */ |
| 332 | *iptr++ = cpu_to_be32(pos); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 333 | iptr = xdr_encode_rdma_segment(iptr, mw); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 334 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 335 | dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n", |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 336 | rqst->rq_task->tk_pid, __func__, pos, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 337 | mw->mw_length, (unsigned long long)mw->mw_offset, |
| 338 | mw->mw_handle, n < nsegs ? "more" : "last"); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 339 | |
| 340 | r_xprt->rx_stats.read_chunk_count++; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 341 | seg += n; |
| 342 | nsegs -= n; |
| 343 | } while (nsegs); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 344 | |
| 345 | /* Finish Read list */ |
| 346 | *iptr++ = xdr_zero; /* Next item not present */ |
| 347 | return iptr; |
| 348 | } |
| 349 | |
| 350 | /* XDR-encode the Write list. Supports encoding a list containing |
| 351 | * one array of plain segments that belong to a single write chunk. |
| 352 | * |
| 353 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): |
| 354 | * |
| 355 | * Write chunklist (a list of (one) counted array): |
| 356 | * N elements: |
| 357 | * 1 - N - HLOO - HLOO - ... - HLOO - 0 |
| 358 | * |
| 359 | * Returns a pointer to the XDR word in the RDMA header following |
| 360 | * the end of the Write list, or an error pointer. |
| 361 | */ |
| 362 | static __be32 * |
| 363 | rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, |
| 364 | struct rpc_rqst *rqst, __be32 *iptr, |
| 365 | enum rpcrdma_chunktype wtype) |
| 366 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 367 | struct rpcrdma_mr_seg *seg; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 368 | struct rpcrdma_mw *mw; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 369 | int n, nsegs, nchunks; |
| 370 | __be32 *segcount; |
| 371 | |
| 372 | if (wtype != rpcrdma_writech) { |
| 373 | *iptr++ = xdr_zero; /* no Write list present */ |
| 374 | return iptr; |
| 375 | } |
| 376 | |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 377 | seg = req->rl_segments; |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 378 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 379 | rqst->rq_rcv_buf.head[0].iov_len, |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 380 | wtype, seg); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 381 | if (nsegs < 0) |
| 382 | return ERR_PTR(nsegs); |
| 383 | |
| 384 | *iptr++ = xdr_one; /* Write list present */ |
| 385 | segcount = iptr++; /* save location of segment count */ |
| 386 | |
| 387 | nchunks = 0; |
| 388 | do { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 389 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
| 390 | true, &mw); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 391 | if (n < 0) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 392 | return ERR_PTR(n); |
Chuck Lever | 9a5c63e | 2017-02-08 17:00:43 -0500 | [diff] [blame] | 393 | rpcrdma_push_mw(mw, &req->rl_registered); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 394 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 395 | iptr = xdr_encode_rdma_segment(iptr, mw); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 396 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 397 | dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n", |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 398 | rqst->rq_task->tk_pid, __func__, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 399 | mw->mw_length, (unsigned long long)mw->mw_offset, |
| 400 | mw->mw_handle, n < nsegs ? "more" : "last"); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 401 | |
| 402 | r_xprt->rx_stats.write_chunk_count++; |
| 403 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 404 | nchunks++; |
| 405 | seg += n; |
| 406 | nsegs -= n; |
| 407 | } while (nsegs); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 408 | |
| 409 | /* Update count of segments in this Write chunk */ |
| 410 | *segcount = cpu_to_be32(nchunks); |
| 411 | |
| 412 | /* Finish Write list */ |
| 413 | *iptr++ = xdr_zero; /* Next item not present */ |
| 414 | return iptr; |
| 415 | } |
| 416 | |
| 417 | /* XDR-encode the Reply chunk. Supports encoding an array of plain |
| 418 | * segments that belong to a single write (reply) chunk. |
| 419 | * |
| 420 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): |
| 421 | * |
| 422 | * Reply chunk (a counted array): |
| 423 | * N elements: |
| 424 | * 1 - N - HLOO - HLOO - ... - HLOO |
| 425 | * |
| 426 | * Returns a pointer to the XDR word in the RDMA header following |
| 427 | * the end of the Reply chunk, or an error pointer. |
| 428 | */ |
| 429 | static __be32 * |
| 430 | rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, |
| 431 | struct rpcrdma_req *req, struct rpc_rqst *rqst, |
| 432 | __be32 *iptr, enum rpcrdma_chunktype wtype) |
| 433 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 434 | struct rpcrdma_mr_seg *seg; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 435 | struct rpcrdma_mw *mw; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 436 | int n, nsegs, nchunks; |
| 437 | __be32 *segcount; |
| 438 | |
| 439 | if (wtype != rpcrdma_replych) { |
| 440 | *iptr++ = xdr_zero; /* no Reply chunk present */ |
| 441 | return iptr; |
| 442 | } |
| 443 | |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 444 | seg = req->rl_segments; |
Chuck Lever | b5f0afb | 2017-02-08 16:59:54 -0500 | [diff] [blame] | 445 | nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 446 | if (nsegs < 0) |
| 447 | return ERR_PTR(nsegs); |
| 448 | |
| 449 | *iptr++ = xdr_one; /* Reply chunk present */ |
| 450 | segcount = iptr++; /* save location of segment count */ |
| 451 | |
| 452 | nchunks = 0; |
| 453 | do { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 454 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
| 455 | true, &mw); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 456 | if (n < 0) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 457 | return ERR_PTR(n); |
Chuck Lever | 9a5c63e | 2017-02-08 17:00:43 -0500 | [diff] [blame] | 458 | rpcrdma_push_mw(mw, &req->rl_registered); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 459 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 460 | iptr = xdr_encode_rdma_segment(iptr, mw); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 461 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 462 | dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n", |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 463 | rqst->rq_task->tk_pid, __func__, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 464 | mw->mw_length, (unsigned long long)mw->mw_offset, |
| 465 | mw->mw_handle, n < nsegs ? "more" : "last"); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 466 | |
| 467 | r_xprt->rx_stats.reply_chunk_count++; |
| 468 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 469 | nchunks++; |
| 470 | seg += n; |
| 471 | nsegs -= n; |
| 472 | } while (nsegs); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 473 | |
| 474 | /* Update count of segments in the Reply chunk */ |
| 475 | *segcount = cpu_to_be32(nchunks); |
| 476 | |
| 477 | return iptr; |
| 478 | } |
| 479 | |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 480 | /* Prepare the RPC-over-RDMA header SGE. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 481 | */ |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 482 | static bool |
| 483 | rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, |
| 484 | u32 len) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 485 | { |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 486 | struct rpcrdma_regbuf *rb = req->rl_rdmabuf; |
| 487 | struct ib_sge *sge = &req->rl_send_sge[0]; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 488 | |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 489 | if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { |
| 490 | if (!__rpcrdma_dma_map_regbuf(ia, rb)) |
| 491 | return false; |
| 492 | sge->addr = rdmab_addr(rb); |
| 493 | sge->lkey = rdmab_lkey(rb); |
| 494 | } |
| 495 | sge->length = len; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 496 | |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 497 | ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 498 | sge->length, DMA_TO_DEVICE); |
| 499 | req->rl_send_wr.num_sge++; |
| 500 | return true; |
| 501 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 502 | |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 503 | /* Prepare the Send SGEs. The head and tail iovec, and each entry |
| 504 | * in the page list, gets its own SGE. |
| 505 | */ |
| 506 | static bool |
| 507 | rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, |
| 508 | struct xdr_buf *xdr, enum rpcrdma_chunktype rtype) |
| 509 | { |
| 510 | unsigned int sge_no, page_base, len, remaining; |
| 511 | struct rpcrdma_regbuf *rb = req->rl_sendbuf; |
| 512 | struct ib_device *device = ia->ri_device; |
| 513 | struct ib_sge *sge = req->rl_send_sge; |
| 514 | u32 lkey = ia->ri_pd->local_dma_lkey; |
| 515 | struct page *page, **ppages; |
Tom Talpey | b38ab40 | 2009-03-11 14:37:55 -0400 | [diff] [blame] | 516 | |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 517 | /* The head iovec is straightforward, as it is already |
| 518 | * DMA-mapped. Sync the content that has changed. |
| 519 | */ |
| 520 | if (!rpcrdma_dma_map_regbuf(ia, rb)) |
| 521 | return false; |
| 522 | sge_no = 1; |
| 523 | sge[sge_no].addr = rdmab_addr(rb); |
| 524 | sge[sge_no].length = xdr->head[0].iov_len; |
| 525 | sge[sge_no].lkey = rdmab_lkey(rb); |
Chuck Lever | 91a10c5 | 2017-04-11 13:23:02 -0400 | [diff] [blame] | 526 | ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr, |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 527 | sge[sge_no].length, DMA_TO_DEVICE); |
| 528 | |
| 529 | /* If there is a Read chunk, the page list is being handled |
| 530 | * via explicit RDMA, and thus is skipped here. However, the |
| 531 | * tail iovec may include an XDR pad for the page list, as |
| 532 | * well as additional content, and may not reside in the |
| 533 | * same page as the head iovec. |
| 534 | */ |
| 535 | if (rtype == rpcrdma_readch) { |
| 536 | len = xdr->tail[0].iov_len; |
| 537 | |
| 538 | /* Do not include the tail if it is only an XDR pad */ |
| 539 | if (len < 4) |
| 540 | goto out; |
| 541 | |
| 542 | page = virt_to_page(xdr->tail[0].iov_base); |
Chuck Lever | d933cc3 | 2017-06-08 11:53:16 -0400 | [diff] [blame] | 543 | page_base = offset_in_page(xdr->tail[0].iov_base); |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 544 | |
| 545 | /* If the content in the page list is an odd length, |
| 546 | * xdr_write_pages() has added a pad at the beginning |
| 547 | * of the tail iovec. Force the tail's non-pad content |
| 548 | * to land at the next XDR position in the Send message. |
| 549 | */ |
| 550 | page_base += len & 3; |
| 551 | len -= len & 3; |
| 552 | goto map_tail; |
| 553 | } |
| 554 | |
| 555 | /* If there is a page list present, temporarily DMA map |
| 556 | * and prepare an SGE for each page to be sent. |
| 557 | */ |
| 558 | if (xdr->page_len) { |
| 559 | ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); |
Chuck Lever | d933cc3 | 2017-06-08 11:53:16 -0400 | [diff] [blame] | 560 | page_base = offset_in_page(xdr->page_base); |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 561 | remaining = xdr->page_len; |
| 562 | while (remaining) { |
| 563 | sge_no++; |
| 564 | if (sge_no > RPCRDMA_MAX_SEND_SGES - 2) |
| 565 | goto out_mapping_overflow; |
| 566 | |
| 567 | len = min_t(u32, PAGE_SIZE - page_base, remaining); |
| 568 | sge[sge_no].addr = ib_dma_map_page(device, *ppages, |
| 569 | page_base, len, |
| 570 | DMA_TO_DEVICE); |
| 571 | if (ib_dma_mapping_error(device, sge[sge_no].addr)) |
| 572 | goto out_mapping_err; |
| 573 | sge[sge_no].length = len; |
| 574 | sge[sge_no].lkey = lkey; |
| 575 | |
| 576 | req->rl_mapped_sges++; |
| 577 | ppages++; |
| 578 | remaining -= len; |
| 579 | page_base = 0; |
Tom Talpey | b38ab40 | 2009-03-11 14:37:55 -0400 | [diff] [blame] | 580 | } |
Tom Talpey | b38ab40 | 2009-03-11 14:37:55 -0400 | [diff] [blame] | 581 | } |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 582 | |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 583 | /* The tail iovec is not always constructed in the same |
| 584 | * page where the head iovec resides (see, for example, |
| 585 | * gss_wrap_req_priv). To neatly accommodate that case, |
| 586 | * DMA map it separately. |
| 587 | */ |
| 588 | if (xdr->tail[0].iov_len) { |
| 589 | page = virt_to_page(xdr->tail[0].iov_base); |
Chuck Lever | d933cc3 | 2017-06-08 11:53:16 -0400 | [diff] [blame] | 590 | page_base = offset_in_page(xdr->tail[0].iov_base); |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 591 | len = xdr->tail[0].iov_len; |
| 592 | |
| 593 | map_tail: |
| 594 | sge_no++; |
| 595 | sge[sge_no].addr = ib_dma_map_page(device, page, |
| 596 | page_base, len, |
| 597 | DMA_TO_DEVICE); |
| 598 | if (ib_dma_mapping_error(device, sge[sge_no].addr)) |
| 599 | goto out_mapping_err; |
| 600 | sge[sge_no].length = len; |
| 601 | sge[sge_no].lkey = lkey; |
| 602 | req->rl_mapped_sges++; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 603 | } |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 604 | |
| 605 | out: |
| 606 | req->rl_send_wr.num_sge = sge_no + 1; |
| 607 | return true; |
| 608 | |
| 609 | out_mapping_overflow: |
| 610 | pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); |
| 611 | return false; |
| 612 | |
| 613 | out_mapping_err: |
| 614 | pr_err("rpcrdma: Send mapping error\n"); |
| 615 | return false; |
| 616 | } |
| 617 | |
| 618 | bool |
| 619 | rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, |
| 620 | u32 hdrlen, struct xdr_buf *xdr, |
| 621 | enum rpcrdma_chunktype rtype) |
| 622 | { |
| 623 | req->rl_send_wr.num_sge = 0; |
| 624 | req->rl_mapped_sges = 0; |
| 625 | |
| 626 | if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen)) |
| 627 | goto out_map; |
| 628 | |
| 629 | if (rtype != rpcrdma_areadch) |
| 630 | if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype)) |
| 631 | goto out_map; |
| 632 | |
| 633 | return true; |
| 634 | |
| 635 | out_map: |
| 636 | pr_err("rpcrdma: failed to DMA map a Send buffer\n"); |
| 637 | return false; |
| 638 | } |
| 639 | |
| 640 | void |
| 641 | rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req) |
| 642 | { |
| 643 | struct ib_device *device = ia->ri_device; |
| 644 | struct ib_sge *sge; |
| 645 | int count; |
| 646 | |
| 647 | sge = &req->rl_send_sge[2]; |
| 648 | for (count = req->rl_mapped_sges; count--; sge++) |
| 649 | ib_dma_unmap_page(device, sge->addr, sge->length, |
| 650 | DMA_TO_DEVICE); |
| 651 | req->rl_mapped_sges = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | /* |
| 655 | * Marshal a request: the primary job of this routine is to choose |
| 656 | * the transfer modes. See comments below. |
| 657 | * |
Chuck Lever | c93c622 | 2014-05-28 10:35:14 -0400 | [diff] [blame] | 658 | * Returns zero on success, otherwise a negative errno. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 659 | */ |
| 660 | |
| 661 | int |
| 662 | rpcrdma_marshal_req(struct rpc_rqst *rqst) |
| 663 | { |
Trond Myklebust | a4f0835 | 2013-01-08 09:10:21 -0500 | [diff] [blame] | 664 | struct rpc_xprt *xprt = rqst->rq_xprt; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 665 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
| 666 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 667 | enum rpcrdma_chunktype rtype, wtype; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 668 | struct rpcrdma_msg *headerp; |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 669 | bool ddp_allowed; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 670 | ssize_t hdrlen; |
| 671 | size_t rpclen; |
| 672 | __be32 *iptr; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 673 | |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 674 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
| 675 | if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)) |
| 676 | return rpcrdma_bc_marshal_reply(rqst); |
| 677 | #endif |
| 678 | |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 679 | headerp = rdmab_to_msg(req->rl_rdmabuf); |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 680 | /* don't byte-swap XID, it's already done in request */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 681 | headerp->rm_xid = rqst->rq_xid; |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 682 | headerp->rm_vers = rpcrdma_version; |
| 683 | headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); |
| 684 | headerp->rm_type = rdma_msg; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 685 | |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 686 | /* When the ULP employs a GSS flavor that guarantees integrity |
| 687 | * or privacy, direct data placement of individual data items |
| 688 | * is not allowed. |
| 689 | */ |
| 690 | ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags & |
| 691 | RPCAUTH_AUTH_DATATOUCH); |
| 692 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 693 | /* |
| 694 | * Chunks needed for results? |
| 695 | * |
| 696 | * o If the expected result is under the inline threshold, all ops |
Chuck Lever | 33943b2 | 2015-08-03 13:04:08 -0400 | [diff] [blame] | 697 | * return as inline. |
Chuck Lever | cce6dee | 2016-05-02 14:41:14 -0400 | [diff] [blame] | 698 | * o Large read ops return data as write chunk(s), header as |
| 699 | * inline. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 700 | * o Large non-read ops return as a single reply chunk. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 701 | */ |
Chuck Lever | cce6dee | 2016-05-02 14:41:14 -0400 | [diff] [blame] | 702 | if (rpcrdma_results_inline(r_xprt, rqst)) |
Chuck Lever | 02eb57d8 | 2015-08-03 13:03:58 -0400 | [diff] [blame] | 703 | wtype = rpcrdma_noch; |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 704 | else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) |
Chuck Lever | cce6dee | 2016-05-02 14:41:14 -0400 | [diff] [blame] | 705 | wtype = rpcrdma_writech; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 706 | else |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 707 | wtype = rpcrdma_replych; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 708 | |
| 709 | /* |
| 710 | * Chunks needed for arguments? |
| 711 | * |
| 712 | * o If the total request is under the inline threshold, all ops |
| 713 | * are sent as inline. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 714 | * o Large write ops transmit data as read chunk(s), header as |
| 715 | * inline. |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 716 | * o Large non-write ops are sent with the entire message as a |
| 717 | * single read chunk (protocol 0-position special case). |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 718 | * |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 719 | * This assumes that the upper layer does not present a request |
| 720 | * that both has a data payload, and whose non-data arguments |
| 721 | * by themselves are larger than the inline threshold. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 722 | */ |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 723 | if (rpcrdma_args_inline(r_xprt, rqst)) { |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 724 | rtype = rpcrdma_noch; |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 725 | rpclen = rqst->rq_snd_buf.len; |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 726 | } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) { |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 727 | rtype = rpcrdma_readch; |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 728 | rpclen = rqst->rq_snd_buf.head[0].iov_len + |
| 729 | rqst->rq_snd_buf.tail[0].iov_len; |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 730 | } else { |
Chuck Lever | 860477d | 2015-08-03 13:04:45 -0400 | [diff] [blame] | 731 | r_xprt->rx_stats.nomsg_call_count++; |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 732 | headerp->rm_type = htonl(RDMA_NOMSG); |
| 733 | rtype = rpcrdma_areadch; |
| 734 | rpclen = 0; |
| 735 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 736 | |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 737 | req->rl_xid = rqst->rq_xid; |
| 738 | rpcrdma_insert_req(&r_xprt->rx_buf, req); |
| 739 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 740 | /* This implementation supports the following combinations |
| 741 | * of chunk lists in one RPC-over-RDMA Call message: |
| 742 | * |
| 743 | * - Read list |
| 744 | * - Write list |
| 745 | * - Reply chunk |
| 746 | * - Read list + Reply chunk |
| 747 | * |
| 748 | * It might not yet support the following combinations: |
| 749 | * |
| 750 | * - Read list + Write list |
| 751 | * |
| 752 | * It does not support the following combinations: |
| 753 | * |
| 754 | * - Write list + Reply chunk |
| 755 | * - Read list + Write list + Reply chunk |
| 756 | * |
| 757 | * This implementation supports only a single chunk in each |
| 758 | * Read or Write list. Thus for example the client cannot |
| 759 | * send a Call message with a Position Zero Read chunk and a |
| 760 | * regular Read chunk at the same time. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 761 | */ |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 762 | iptr = headerp->rm_body.rm_chunks; |
| 763 | iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype); |
| 764 | if (IS_ERR(iptr)) |
Chuck Lever | 18c0fb3 | 2017-02-08 17:00:27 -0500 | [diff] [blame] | 765 | goto out_err; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 766 | iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype); |
| 767 | if (IS_ERR(iptr)) |
Chuck Lever | 18c0fb3 | 2017-02-08 17:00:27 -0500 | [diff] [blame] | 768 | goto out_err; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 769 | iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype); |
| 770 | if (IS_ERR(iptr)) |
Chuck Lever | 18c0fb3 | 2017-02-08 17:00:27 -0500 | [diff] [blame] | 771 | goto out_err; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 772 | hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 773 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 774 | dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", |
| 775 | rqst->rq_task->tk_pid, __func__, |
| 776 | transfertypes[rtype], transfertypes[wtype], |
| 777 | hdrlen, rpclen); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 778 | |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 779 | if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen, |
| 780 | &rqst->rq_snd_buf, rtype)) { |
| 781 | iptr = ERR_PTR(-EIO); |
Chuck Lever | 18c0fb3 | 2017-02-08 17:00:27 -0500 | [diff] [blame] | 782 | goto out_err; |
Chuck Lever | 655fec6 | 2016-09-15 10:57:24 -0400 | [diff] [blame] | 783 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 784 | return 0; |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 785 | |
Chuck Lever | 18c0fb3 | 2017-02-08 17:00:27 -0500 | [diff] [blame] | 786 | out_err: |
Chuck Lever | 0031e47 | 2017-04-11 13:23:51 -0400 | [diff] [blame] | 787 | if (PTR_ERR(iptr) != -ENOBUFS) { |
| 788 | pr_err("rpcrdma: rpcrdma_marshal_req failed, status %ld\n", |
| 789 | PTR_ERR(iptr)); |
| 790 | r_xprt->rx_stats.failed_marshal_count++; |
| 791 | } |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 792 | return PTR_ERR(iptr); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 793 | } |
| 794 | |
| 795 | /* |
| 796 | * Chase down a received write or reply chunklist to get length |
| 797 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) |
| 798 | */ |
| 799 | static int |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 800 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 801 | { |
| 802 | unsigned int i, total_len; |
| 803 | struct rpcrdma_write_chunk *cur_wchunk; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 804 | char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 805 | |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 806 | i = be32_to_cpu(**iptrp); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 807 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); |
| 808 | total_len = 0; |
| 809 | while (i--) { |
| 810 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; |
| 811 | ifdebug(FACILITY) { |
| 812 | u64 off; |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 813 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
Chuck Lever | c351f94 | 2016-11-29 10:53:29 -0500 | [diff] [blame] | 814 | dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n", |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 815 | __func__, |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 816 | be32_to_cpu(seg->rs_length), |
Stephen Rothwell | e08a132 | 2007-10-30 00:44:32 -0700 | [diff] [blame] | 817 | (unsigned long long)off, |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 818 | be32_to_cpu(seg->rs_handle)); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 819 | } |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 820 | total_len += be32_to_cpu(seg->rs_length); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 821 | ++cur_wchunk; |
| 822 | } |
| 823 | /* check and adjust for properly terminated write chunk */ |
| 824 | if (wrchunk) { |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 825 | __be32 *w = (__be32 *) cur_wchunk; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 826 | if (*w++ != xdr_zero) |
| 827 | return -1; |
| 828 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
| 829 | } |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 830 | if ((char *)cur_wchunk > base + rep->rr_len) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 831 | return -1; |
| 832 | |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 833 | *iptrp = (__be32 *) cur_wchunk; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 834 | return total_len; |
| 835 | } |
| 836 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 837 | /** |
| 838 | * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs |
| 839 | * @rqst: controlling RPC request |
| 840 | * @srcp: points to RPC message payload in receive buffer |
| 841 | * @copy_len: remaining length of receive buffer content |
| 842 | * @pad: Write chunk pad bytes needed (zero for pure inline) |
| 843 | * |
| 844 | * The upper layer has set the maximum number of bytes it can |
| 845 | * receive in each component of rq_rcv_buf. These values are set in |
| 846 | * the head.iov_len, page_len, tail.iov_len, and buflen fields. |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 847 | * |
| 848 | * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in |
| 849 | * many cases this function simply updates iov_base pointers in |
| 850 | * rq_rcv_buf to point directly to the received reply data, to |
| 851 | * avoid copying reply data. |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 852 | * |
| 853 | * Returns the count of bytes which had to be memcopied. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 854 | */ |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 855 | static unsigned long |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 856 | rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 857 | { |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 858 | unsigned long fixup_copy_count; |
| 859 | int i, npages, curlen; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 860 | char *destp; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 861 | struct page **ppages; |
| 862 | int page_base; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 863 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 864 | /* The head iovec is redirected to the RPC reply message |
| 865 | * in the receive buffer, to avoid a memcopy. |
| 866 | */ |
| 867 | rqst->rq_rcv_buf.head[0].iov_base = srcp; |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 868 | rqst->rq_private_buf.head[0].iov_base = srcp; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 869 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 870 | /* The contents of the receive buffer that follow |
| 871 | * head.iov_len bytes are copied into the page list. |
| 872 | */ |
| 873 | curlen = rqst->rq_rcv_buf.head[0].iov_len; |
| 874 | if (curlen > copy_len) |
| 875 | curlen = copy_len; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 876 | dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", |
| 877 | __func__, srcp, copy_len, curlen); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 878 | srcp += curlen; |
| 879 | copy_len -= curlen; |
| 880 | |
Chuck Lever | d933cc3 | 2017-06-08 11:53:16 -0400 | [diff] [blame] | 881 | ppages = rqst->rq_rcv_buf.pages + |
| 882 | (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT); |
| 883 | page_base = offset_in_page(rqst->rq_rcv_buf.page_base); |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 884 | fixup_copy_count = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 885 | if (copy_len && rqst->rq_rcv_buf.page_len) { |
Chuck Lever | 80414ab | 2016-06-29 13:54:33 -0400 | [diff] [blame] | 886 | int pagelist_len; |
| 887 | |
| 888 | pagelist_len = rqst->rq_rcv_buf.page_len; |
| 889 | if (pagelist_len > copy_len) |
| 890 | pagelist_len = copy_len; |
| 891 | npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT; |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 892 | for (i = 0; i < npages; i++) { |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 893 | curlen = PAGE_SIZE - page_base; |
Chuck Lever | 80414ab | 2016-06-29 13:54:33 -0400 | [diff] [blame] | 894 | if (curlen > pagelist_len) |
| 895 | curlen = pagelist_len; |
| 896 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 897 | dprintk("RPC: %s: page %d" |
| 898 | " srcp 0x%p len %d curlen %d\n", |
| 899 | __func__, i, srcp, copy_len, curlen); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 900 | destp = kmap_atomic(ppages[i]); |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 901 | memcpy(destp + page_base, srcp, curlen); |
| 902 | flush_dcache_page(ppages[i]); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 903 | kunmap_atomic(destp); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 904 | srcp += curlen; |
| 905 | copy_len -= curlen; |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 906 | fixup_copy_count += curlen; |
Chuck Lever | 80414ab | 2016-06-29 13:54:33 -0400 | [diff] [blame] | 907 | pagelist_len -= curlen; |
| 908 | if (!pagelist_len) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 909 | break; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 910 | page_base = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 911 | } |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 912 | |
| 913 | /* Implicit padding for the last segment in a Write |
| 914 | * chunk is inserted inline at the front of the tail |
| 915 | * iovec. The upper layer ignores the content of |
| 916 | * the pad. Simply ensure inline content in the tail |
| 917 | * that follows the Write chunk is properly aligned. |
| 918 | */ |
| 919 | if (pad) |
| 920 | srcp -= pad; |
Chuck Lever | 2b7bbc9 | 2014-03-12 12:51:30 -0400 | [diff] [blame] | 921 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 922 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 923 | /* The tail iovec is redirected to the remaining data |
| 924 | * in the receive buffer, to avoid a memcopy. |
| 925 | */ |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 926 | if (copy_len || pad) { |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 927 | rqst->rq_rcv_buf.tail[0].iov_base = srcp; |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 928 | rqst->rq_private_buf.tail[0].iov_base = srcp; |
| 929 | } |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 930 | |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 931 | return fixup_copy_count; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 932 | } |
| 933 | |
Chuck Lever | 4b196dc6 | 2017-06-08 11:51:56 -0400 | [diff] [blame] | 934 | /* Caller must guarantee @rep remains stable during this call. |
| 935 | */ |
| 936 | static void |
| 937 | rpcrdma_mark_remote_invalidation(struct list_head *mws, |
| 938 | struct rpcrdma_rep *rep) |
| 939 | { |
| 940 | struct rpcrdma_mw *mw; |
| 941 | |
| 942 | if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)) |
| 943 | return; |
| 944 | |
| 945 | list_for_each_entry(mw, mws, mw_list) |
| 946 | if (mw->mw_handle == rep->rr_inv_rkey) { |
| 947 | mw->mw_flags = RPCRDMA_MW_F_RI; |
| 948 | break; /* only one invalidated MR per RPC */ |
| 949 | } |
| 950 | } |
| 951 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 952 | /* By convention, backchannel calls arrive via rdma_msg type |
| 953 | * messages, and never populate the chunk lists. This makes |
| 954 | * the RPC/RDMA header small and fixed in size, so it is |
| 955 | * straightforward to check the RPC header's direction field. |
| 956 | */ |
| 957 | static bool |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 958 | rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, |
| 959 | __be32 xid, __be32 proc) |
| 960 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 961 | { |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 962 | struct xdr_stream *xdr = &rep->rr_stream; |
| 963 | __be32 *p; |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 964 | |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 965 | if (proc != rdma_msg) |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 966 | return false; |
| 967 | |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 968 | /* Peek at stream contents without advancing. */ |
| 969 | p = xdr_inline_decode(xdr, 0); |
| 970 | |
| 971 | /* Chunk lists */ |
| 972 | if (*p++ != xdr_zero) |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 973 | return false; |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 974 | if (*p++ != xdr_zero) |
| 975 | return false; |
| 976 | if (*p++ != xdr_zero) |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 977 | return false; |
| 978 | |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 979 | /* RPC header */ |
| 980 | if (*p++ != xid) |
| 981 | return false; |
| 982 | if (*p != cpu_to_be32(RPC_CALL)) |
| 983 | return false; |
| 984 | |
| 985 | /* Now that we are sure this is a backchannel call, |
| 986 | * advance to the RPC header. |
| 987 | */ |
| 988 | p = xdr_inline_decode(xdr, 3 * sizeof(*p)); |
| 989 | if (unlikely(!p)) |
| 990 | goto out_short; |
| 991 | |
| 992 | rpcrdma_bc_receive_call(r_xprt, rep); |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 993 | return true; |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 994 | |
| 995 | out_short: |
| 996 | pr_warn("RPC/RDMA short backward direction call\n"); |
| 997 | if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep)) |
| 998 | xprt_disconnect_done(&r_xprt->rx_xprt); |
| 999 | return true; |
| 1000 | } |
| 1001 | #else /* CONFIG_SUNRPC_BACKCHANNEL */ |
| 1002 | { |
| 1003 | return false; |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 1004 | } |
| 1005 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
| 1006 | |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1007 | static int |
| 1008 | rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, |
| 1009 | struct rpc_rqst *rqst) |
| 1010 | { |
| 1011 | struct xdr_stream *xdr = &rep->rr_stream; |
| 1012 | int rdmalen, status; |
| 1013 | __be32 *p; |
| 1014 | |
| 1015 | p = xdr_inline_decode(xdr, 2 * sizeof(*p)); |
| 1016 | if (unlikely(!p)) |
| 1017 | return -EIO; |
| 1018 | |
| 1019 | /* never expect read list */ |
| 1020 | if (unlikely(*p++ != xdr_zero)) |
| 1021 | return -EIO; |
| 1022 | |
| 1023 | /* Write list */ |
| 1024 | if (*p != xdr_zero) { |
| 1025 | char *base = rep->rr_hdrbuf.head[0].iov_base; |
| 1026 | |
| 1027 | p++; |
| 1028 | rdmalen = rpcrdma_count_chunks(rep, 1, &p); |
| 1029 | if (rdmalen < 0 || *p++ != xdr_zero) |
| 1030 | return -EIO; |
| 1031 | |
| 1032 | rep->rr_len -= (char *)p - base; |
| 1033 | status = rep->rr_len + rdmalen; |
| 1034 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
| 1035 | |
| 1036 | /* special case - last segment may omit padding */ |
| 1037 | rdmalen &= 3; |
| 1038 | if (rdmalen) { |
| 1039 | rdmalen = 4 - rdmalen; |
| 1040 | status += rdmalen; |
| 1041 | } |
| 1042 | } else { |
| 1043 | p = xdr_inline_decode(xdr, sizeof(*p)); |
| 1044 | if (!p) |
| 1045 | return -EIO; |
| 1046 | |
| 1047 | /* never expect reply chunk */ |
| 1048 | if (*p++ != xdr_zero) |
| 1049 | return -EIO; |
| 1050 | rdmalen = 0; |
| 1051 | rep->rr_len -= RPCRDMA_HDRLEN_MIN; |
| 1052 | status = rep->rr_len; |
| 1053 | } |
| 1054 | |
| 1055 | r_xprt->rx_stats.fixup_copy_count += |
| 1056 | rpcrdma_inline_fixup(rqst, (char *)p, rep->rr_len, |
| 1057 | rdmalen); |
| 1058 | |
| 1059 | return status; |
| 1060 | } |
| 1061 | |
| 1062 | static noinline int |
| 1063 | rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) |
| 1064 | { |
| 1065 | struct xdr_stream *xdr = &rep->rr_stream; |
| 1066 | int rdmalen; |
| 1067 | __be32 *p; |
| 1068 | |
| 1069 | p = xdr_inline_decode(xdr, 3 * sizeof(*p)); |
| 1070 | if (unlikely(!p)) |
| 1071 | return -EIO; |
| 1072 | |
| 1073 | /* never expect Read chunks */ |
| 1074 | if (unlikely(*p++ != xdr_zero)) |
| 1075 | return -EIO; |
| 1076 | /* never expect Write chunks */ |
| 1077 | if (unlikely(*p++ != xdr_zero)) |
| 1078 | return -EIO; |
| 1079 | /* always expect a Reply chunk */ |
| 1080 | if (unlikely(*p++ == xdr_zero)) |
| 1081 | return -EIO; |
| 1082 | |
| 1083 | rdmalen = rpcrdma_count_chunks(rep, 0, &p); |
| 1084 | if (rdmalen < 0) |
| 1085 | return -EIO; |
| 1086 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
| 1087 | |
| 1088 | /* Reply chunk buffer already is the reply vector - no fixup. */ |
| 1089 | return rdmalen; |
| 1090 | } |
| 1091 | |
| 1092 | static noinline int |
| 1093 | rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, |
| 1094 | struct rpc_rqst *rqst) |
| 1095 | { |
| 1096 | struct xdr_stream *xdr = &rep->rr_stream; |
| 1097 | __be32 *p; |
| 1098 | |
| 1099 | p = xdr_inline_decode(xdr, sizeof(*p)); |
| 1100 | if (unlikely(!p)) |
| 1101 | return -EIO; |
| 1102 | |
| 1103 | switch (*p) { |
| 1104 | case err_vers: |
| 1105 | p = xdr_inline_decode(xdr, 2 * sizeof(*p)); |
| 1106 | if (!p) |
| 1107 | break; |
| 1108 | dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n", |
| 1109 | rqst->rq_task->tk_pid, __func__, |
| 1110 | be32_to_cpup(p), be32_to_cpu(*(p + 1))); |
| 1111 | break; |
| 1112 | case err_chunk: |
| 1113 | dprintk("RPC: %5u: %s: server reports header decoding error\n", |
| 1114 | rqst->rq_task->tk_pid, __func__); |
| 1115 | break; |
| 1116 | default: |
| 1117 | dprintk("RPC: %5u: %s: server reports unrecognized error %d\n", |
| 1118 | rqst->rq_task->tk_pid, __func__, be32_to_cpup(p)); |
| 1119 | } |
| 1120 | |
| 1121 | r_xprt->rx_stats.bad_reply_count++; |
| 1122 | return -EREMOTEIO; |
| 1123 | } |
| 1124 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 1125 | /* Process received RPC/RDMA messages. |
| 1126 | * |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1127 | * Errors must result in the RPC task either being awakened, or |
| 1128 | * allowed to timeout, to discover the errors at that time. |
| 1129 | */ |
| 1130 | void |
Chuck Lever | 496b77a | 2016-09-15 10:57:57 -0400 | [diff] [blame] | 1131 | rpcrdma_reply_handler(struct work_struct *work) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1132 | { |
Chuck Lever | 496b77a | 2016-09-15 10:57:57 -0400 | [diff] [blame] | 1133 | struct rpcrdma_rep *rep = |
| 1134 | container_of(work, struct rpcrdma_rep, rr_work); |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1135 | struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; |
| 1136 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
| 1137 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1138 | struct xdr_stream *xdr = &rep->rr_stream; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1139 | struct rpcrdma_req *req; |
| 1140 | struct rpc_rqst *rqst; |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1141 | __be32 *p, xid, vers, proc; |
Chuck Lever | e7ce710 | 2014-05-28 10:34:57 -0400 | [diff] [blame] | 1142 | unsigned long cwnd; |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 1143 | struct list_head mws; |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1144 | int status; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1145 | |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1146 | dprintk("RPC: %s: incoming rep %p\n", __func__, rep); |
| 1147 | |
| 1148 | if (rep->rr_len == RPCRDMA_BAD_LEN) |
| 1149 | goto out_badstatus; |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1150 | |
| 1151 | xdr_init_decode(xdr, &rep->rr_hdrbuf, |
| 1152 | rep->rr_hdrbuf.head[0].iov_base); |
| 1153 | |
| 1154 | /* Fixed transport header fields */ |
| 1155 | p = xdr_inline_decode(xdr, 4 * sizeof(*p)); |
| 1156 | if (unlikely(!p)) |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1157 | goto out_shortreply; |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1158 | xid = *p++; |
| 1159 | vers = *p++; |
| 1160 | p++; /* credits */ |
| 1161 | proc = *p++; |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1162 | |
Chuck Lever | 41c8f70 | 2017-08-03 14:30:11 -0400 | [diff] [blame] | 1163 | if (rpcrdma_is_bcall(r_xprt, rep, xid, proc)) |
| 1164 | return; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1165 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 1166 | /* Match incoming rpcrdma_rep to an rpcrdma_req to |
| 1167 | * get context for handling any incoming chunks. |
| 1168 | */ |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1169 | spin_lock(&buf->rb_lock); |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1170 | req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf, xid); |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1171 | if (!req) |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1172 | goto out_nomatch; |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1173 | if (req->rl_reply) |
| 1174 | goto out_duplicate; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1175 | |
Chuck Lever | 451d26e | 2017-06-08 11:52:04 -0400 | [diff] [blame] | 1176 | list_replace_init(&req->rl_registered, &mws); |
| 1177 | rpcrdma_mark_remote_invalidation(&mws, rep); |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1178 | |
| 1179 | /* Avoid races with signals and duplicate replies |
| 1180 | * by marking this req as matched. |
| 1181 | */ |
Chuck Lever | 4b196dc6 | 2017-06-08 11:51:56 -0400 | [diff] [blame] | 1182 | req->rl_reply = rep; |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1183 | spin_unlock(&buf->rb_lock); |
| 1184 | |
Chuck Lever | af0f16e | 2016-03-04 11:27:43 -0500 | [diff] [blame] | 1185 | dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1186 | __func__, rep, req, be32_to_cpu(xid)); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1187 | |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1188 | /* Invalidate and unmap the data payloads before waking the |
| 1189 | * waiting application. This guarantees the memory regions |
| 1190 | * are properly fenced from the server before the application |
| 1191 | * accesses the data. It also ensures proper send flow control: |
| 1192 | * waking the next RPC waits until this RPC has relinquished |
| 1193 | * all its Send Queue entries. |
| 1194 | */ |
| 1195 | if (!list_empty(&mws)) |
| 1196 | r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1197 | |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1198 | /* Perform XID lookup, reconstruction of the RPC reply, and |
| 1199 | * RPC completion while holding the transport lock to ensure |
| 1200 | * the rep, rqst, and rq_task pointers remain stable. |
| 1201 | */ |
| 1202 | spin_lock_bh(&xprt->transport_lock); |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1203 | rqst = xprt_lookup_rqst(xprt, xid); |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1204 | if (!rqst) |
| 1205 | goto out_norqst; |
| 1206 | xprt->reestablish_timeout = 0; |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1207 | if (vers != rpcrdma_version) |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1208 | goto out_badversion; |
| 1209 | |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1210 | switch (proc) { |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 1211 | case rdma_msg: |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1212 | status = rpcrdma_decode_msg(r_xprt, rep, rqst); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1213 | break; |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 1214 | case rdma_nomsg: |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1215 | status = rpcrdma_decode_nomsg(r_xprt, rep); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1216 | break; |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1217 | case rdma_error: |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1218 | status = rpcrdma_decode_error(r_xprt, rep, rqst); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1219 | break; |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1220 | default: |
| 1221 | status = -EIO; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1222 | } |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1223 | if (status < 0) |
| 1224 | goto out_badheader; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1225 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1226 | out: |
Chuck Lever | e7ce710 | 2014-05-28 10:34:57 -0400 | [diff] [blame] | 1227 | cwnd = xprt->cwnd; |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 1228 | xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT; |
Chuck Lever | e7ce710 | 2014-05-28 10:34:57 -0400 | [diff] [blame] | 1229 | if (xprt->cwnd > cwnd) |
| 1230 | xprt_release_rqst_cong(rqst->rq_task); |
| 1231 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1232 | xprt_complete_rqst(rqst->rq_task, status); |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 1233 | spin_unlock_bh(&xprt->transport_lock); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1234 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1235 | __func__, xprt, rqst, status); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1236 | return; |
| 1237 | |
| 1238 | out_badstatus: |
| 1239 | rpcrdma_recv_buffer_put(rep); |
| 1240 | if (r_xprt->rx_ep.rep_connected == 1) { |
| 1241 | r_xprt->rx_ep.rep_connected = -EIO; |
| 1242 | rpcrdma_conn_func(&r_xprt->rx_ep); |
| 1243 | } |
| 1244 | return; |
| 1245 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1246 | /* If the incoming reply terminated a pending RPC, the next |
| 1247 | * RPC call will post a replacement receive buffer as it is |
| 1248 | * being marshaled. |
| 1249 | */ |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1250 | out_badversion: |
| 1251 | dprintk("RPC: %s: invalid version %d\n", |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1252 | __func__, be32_to_cpu(vers)); |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1253 | status = -EIO; |
| 1254 | r_xprt->rx_stats.bad_reply_count++; |
| 1255 | goto out; |
| 1256 | |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1257 | out_badheader: |
| 1258 | dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n", |
| 1259 | rqst->rq_task->tk_pid, __func__, be32_to_cpu(proc)); |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1260 | r_xprt->rx_stats.bad_reply_count++; |
Chuck Lever | 07ff2dd | 2017-08-03 14:30:19 -0400 | [diff] [blame^] | 1261 | status = -EIO; |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1262 | goto out; |
| 1263 | |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1264 | /* The req was still available, but by the time the transport_lock |
| 1265 | * was acquired, the rqst and task had been released. Thus the RPC |
| 1266 | * has already been terminated. |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1267 | */ |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1268 | out_norqst: |
| 1269 | spin_unlock_bh(&xprt->transport_lock); |
| 1270 | rpcrdma_buffer_put(req); |
| 1271 | dprintk("RPC: %s: race, no rqst left for req %p\n", |
| 1272 | __func__, req); |
| 1273 | return; |
| 1274 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1275 | out_shortreply: |
| 1276 | dprintk("RPC: %s: short/invalid reply\n", __func__); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1277 | goto repost; |
| 1278 | |
| 1279 | out_nomatch: |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1280 | spin_unlock(&buf->rb_lock); |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1281 | dprintk("RPC: %s: no match for incoming xid 0x%08x\n", |
| 1282 | __func__, be32_to_cpu(xid)); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1283 | goto repost; |
| 1284 | |
| 1285 | out_duplicate: |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1286 | spin_unlock(&buf->rb_lock); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1287 | dprintk("RPC: %s: " |
| 1288 | "duplicate reply %p to RPC request %p: xid 0x%08x\n", |
Chuck Lever | 96f8778 | 2017-08-03 14:30:03 -0400 | [diff] [blame] | 1289 | __func__, rep, req, be32_to_cpu(xid)); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1290 | |
Chuck Lever | 431af64 | 2017-06-08 11:52:20 -0400 | [diff] [blame] | 1291 | /* If no pending RPC transaction was matched, post a replacement |
| 1292 | * receive buffer before returning. |
| 1293 | */ |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1294 | repost: |
| 1295 | r_xprt->rx_stats.bad_reply_count++; |
Chuck Lever | b157380 | 2016-09-15 10:56:35 -0400 | [diff] [blame] | 1296 | if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep)) |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1297 | rpcrdma_recv_buffer_put(rep); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1298 | } |