\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 1 | /* |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the BSD-type |
| 8 | * license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * |
| 14 | * Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * |
| 17 | * Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials provided |
| 20 | * with the distribution. |
| 21 | * |
| 22 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 23 | * its contributors may be used to endorse or promote products |
| 24 | * derived from this software without specific prior written |
| 25 | * permission. |
| 26 | * |
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 38 | */ |
| 39 | |
| 40 | /* |
| 41 | * rpc_rdma.c |
| 42 | * |
| 43 | * This file contains the guts of the RPC RDMA protocol, and |
| 44 | * does marshaling/unmarshaling, etc. It is also where interfacing |
| 45 | * to the Linux RPC framework lives. |
\"Talpey, Thomas\ | f58851e | 2007-09-10 13:50:12 -0400 | [diff] [blame] | 46 | */ |
| 47 | |
| 48 | #include "xprt_rdma.h" |
| 49 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 50 | #include <linux/highmem.h> |
| 51 | |
Jeff Layton | f895b25 | 2014-11-17 16:58:04 -0500 | [diff] [blame] | 52 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 53 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 54 | #endif |
| 55 | |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 56 | enum rpcrdma_chunktype { |
| 57 | rpcrdma_noch = 0, |
| 58 | rpcrdma_readch, |
| 59 | rpcrdma_areadch, |
| 60 | rpcrdma_writech, |
| 61 | rpcrdma_replych |
| 62 | }; |
| 63 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 64 | static const char transfertypes[][12] = { |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 65 | "inline", /* no chunks */ |
| 66 | "read list", /* some argument via rdma read */ |
| 67 | "*read list", /* entire request via rdma read */ |
| 68 | "write list", /* some result via rdma write */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 69 | "reply chunk" /* entire reply via rdma write */ |
| 70 | }; |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 71 | |
| 72 | /* Returns size of largest RPC-over-RDMA header in a Call message |
| 73 | * |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 74 | * The largest Call header contains a full-size Read list and a |
| 75 | * minimal Reply chunk. |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 76 | */ |
| 77 | static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) |
| 78 | { |
| 79 | unsigned int size; |
| 80 | |
| 81 | /* Fixed header fields and list discriminators */ |
| 82 | size = RPCRDMA_HDRLEN_MIN; |
| 83 | |
| 84 | /* Maximum Read list size */ |
| 85 | maxsegs += 2; /* segment for head and tail buffers */ |
| 86 | size = maxsegs * sizeof(struct rpcrdma_read_chunk); |
| 87 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 88 | /* Minimal Read chunk size */ |
| 89 | size += sizeof(__be32); /* segment count */ |
| 90 | size += sizeof(struct rpcrdma_segment); |
| 91 | size += sizeof(__be32); /* list discriminator */ |
| 92 | |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 93 | dprintk("RPC: %s: max call header size = %u\n", |
| 94 | __func__, size); |
| 95 | return size; |
| 96 | } |
| 97 | |
| 98 | /* Returns size of largest RPC-over-RDMA header in a Reply message |
| 99 | * |
| 100 | * There is only one Write list or one Reply chunk per Reply |
| 101 | * message. The larger list is the Write list. |
| 102 | */ |
| 103 | static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) |
| 104 | { |
| 105 | unsigned int size; |
| 106 | |
| 107 | /* Fixed header fields and list discriminators */ |
| 108 | size = RPCRDMA_HDRLEN_MIN; |
| 109 | |
| 110 | /* Maximum Write list size */ |
| 111 | maxsegs += 2; /* segment for head and tail buffers */ |
| 112 | size = sizeof(__be32); /* segment count */ |
| 113 | size += maxsegs * sizeof(struct rpcrdma_segment); |
| 114 | size += sizeof(__be32); /* list discriminator */ |
| 115 | |
| 116 | dprintk("RPC: %s: max reply header size = %u\n", |
| 117 | __func__, size); |
| 118 | return size; |
| 119 | } |
| 120 | |
| 121 | void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *ia, |
| 122 | struct rpcrdma_create_data_internal *cdata, |
| 123 | unsigned int maxsegs) |
| 124 | { |
| 125 | ia->ri_max_inline_write = cdata->inline_wsize - |
| 126 | rpcrdma_max_call_header_size(maxsegs); |
| 127 | ia->ri_max_inline_read = cdata->inline_rsize - |
| 128 | rpcrdma_max_reply_header_size(maxsegs); |
| 129 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 130 | |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 131 | /* The client can send a request inline as long as the RPCRDMA header |
| 132 | * plus the RPC call fit under the transport's inline limit. If the |
| 133 | * combined call message size exceeds that limit, the client must use |
| 134 | * the read chunk list for this operation. |
| 135 | */ |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 136 | static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, |
| 137 | struct rpc_rqst *rqst) |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 138 | { |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 139 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 140 | |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 141 | return rqst->rq_snd_buf.len <= ia->ri_max_inline_write; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | /* The client can't know how large the actual reply will be. Thus it |
| 145 | * plans for the largest possible reply for that particular ULP |
| 146 | * operation. If the maximum combined reply message size exceeds that |
| 147 | * limit, the client must provide a write list or a reply chunk for |
| 148 | * this request. |
| 149 | */ |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 150 | static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, |
| 151 | struct rpc_rqst *rqst) |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 152 | { |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 153 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 154 | |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 155 | return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read; |
Chuck Lever | 5457ced | 2015-08-03 13:03:49 -0400 | [diff] [blame] | 156 | } |
| 157 | |
Chuck Lever | 677eb17 | 2015-08-03 13:04:17 -0400 | [diff] [blame] | 158 | static int |
| 159 | rpcrdma_tail_pullup(struct xdr_buf *buf) |
| 160 | { |
| 161 | size_t tlen = buf->tail[0].iov_len; |
| 162 | size_t skip = tlen & 3; |
| 163 | |
| 164 | /* Do not include the tail if it is only an XDR pad */ |
| 165 | if (tlen < 4) |
| 166 | return 0; |
| 167 | |
| 168 | /* xdr_write_pages() adds a pad at the beginning of the tail |
| 169 | * if the content in "buf->pages" is unaligned. Force the |
| 170 | * tail's actual content to land at the next XDR position |
| 171 | * after the head instead. |
| 172 | */ |
| 173 | if (skip) { |
| 174 | unsigned char *src, *dst; |
| 175 | unsigned int count; |
| 176 | |
| 177 | src = buf->tail[0].iov_base; |
| 178 | dst = buf->head[0].iov_base; |
| 179 | dst += buf->head[0].iov_len; |
| 180 | |
| 181 | src += skip; |
| 182 | tlen -= skip; |
| 183 | |
| 184 | dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n", |
| 185 | __func__, skip, dst, src, tlen); |
| 186 | |
| 187 | for (count = tlen; count; count--) |
| 188 | *dst++ = *src++; |
| 189 | } |
| 190 | |
| 191 | return tlen; |
| 192 | } |
| 193 | |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 194 | /* Split "vec" on page boundaries into segments. FMR registers pages, |
| 195 | * not a byte range. Other modes coalesce these segments into a single |
| 196 | * MR when they can. |
| 197 | */ |
| 198 | static int |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 199 | rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n) |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 200 | { |
| 201 | size_t page_offset; |
| 202 | u32 remaining; |
| 203 | char *base; |
| 204 | |
| 205 | base = vec->iov_base; |
| 206 | page_offset = offset_in_page(base); |
| 207 | remaining = vec->iov_len; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 208 | while (remaining && n < RPCRDMA_MAX_SEGS) { |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 209 | seg[n].mr_page = NULL; |
| 210 | seg[n].mr_offset = base; |
| 211 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining); |
| 212 | remaining -= seg[n].mr_len; |
| 213 | base += seg[n].mr_len; |
| 214 | ++n; |
| 215 | page_offset = 0; |
| 216 | } |
| 217 | return n; |
| 218 | } |
| 219 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 220 | /* |
| 221 | * Chunk assembly from upper layer xdr_buf. |
| 222 | * |
| 223 | * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk |
| 224 | * elements. Segments are then coalesced when registered, if possible |
| 225 | * within the selected memreg mode. |
Chuck Lever | c93c622 | 2014-05-28 10:35:14 -0400 | [diff] [blame] | 226 | * |
| 227 | * Returns positive number of segments converted, or a negative errno. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 228 | */ |
| 229 | |
| 230 | static int |
Chuck Lever | 2a428b2 | 2007-10-26 13:30:43 -0400 | [diff] [blame] | 231 | rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 232 | enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 233 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 234 | int len, n, p, page_base; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 235 | struct page **ppages; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 236 | |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 237 | n = 0; |
Chuck Lever | 821c791 | 2016-03-04 11:27:52 -0500 | [diff] [blame] | 238 | if (pos == 0) { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 239 | n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n); |
| 240 | if (n == RPCRDMA_MAX_SEGS) |
| 241 | goto out_overflow; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 242 | } |
| 243 | |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 244 | len = xdrbuf->page_len; |
| 245 | ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); |
| 246 | page_base = xdrbuf->page_base & ~PAGE_MASK; |
| 247 | p = 0; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 248 | while (len && n < RPCRDMA_MAX_SEGS) { |
Shirley Ma | 196c699 | 2014-05-28 10:34:24 -0400 | [diff] [blame] | 249 | if (!ppages[p]) { |
| 250 | /* alloc the pagelist for receiving buffer */ |
| 251 | ppages[p] = alloc_page(GFP_ATOMIC); |
| 252 | if (!ppages[p]) |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 253 | return -EAGAIN; |
Shirley Ma | 196c699 | 2014-05-28 10:34:24 -0400 | [diff] [blame] | 254 | } |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 255 | seg[n].mr_page = ppages[p]; |
| 256 | seg[n].mr_offset = (void *)(unsigned long) page_base; |
| 257 | seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); |
Chuck Lever | c93c622 | 2014-05-28 10:35:14 -0400 | [diff] [blame] | 258 | if (seg[n].mr_len > PAGE_SIZE) |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 259 | goto out_overflow; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 260 | len -= seg[n].mr_len; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 261 | ++n; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 262 | ++p; |
| 263 | page_base = 0; /* page offset only applies to first page */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 264 | } |
| 265 | |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 266 | /* Message overflows the seg array */ |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 267 | if (len && n == RPCRDMA_MAX_SEGS) |
| 268 | goto out_overflow; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 269 | |
Chuck Lever | 677eb17 | 2015-08-03 13:04:17 -0400 | [diff] [blame] | 270 | /* When encoding the read list, the tail is always sent inline */ |
| 271 | if (type == rpcrdma_readch) |
| 272 | return n; |
| 273 | |
James Lentini | 50e1092 | 2007-12-10 11:24:48 -0500 | [diff] [blame] | 274 | if (xdrbuf->tail[0].iov_len) { |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 275 | /* the rpcrdma protocol allows us to omit any trailing |
| 276 | * xdr pad bytes, saving the server an RDMA operation. */ |
| 277 | if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) |
| 278 | return n; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 279 | n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n); |
| 280 | if (n == RPCRDMA_MAX_SEGS) |
| 281 | goto out_overflow; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 282 | } |
| 283 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 284 | return n; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 285 | |
| 286 | out_overflow: |
| 287 | pr_err("rpcrdma: segment array overflow\n"); |
| 288 | return -EIO; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 289 | } |
| 290 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 291 | static inline __be32 * |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 292 | xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 293 | { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 294 | *iptr++ = cpu_to_be32(mw->mw_handle); |
| 295 | *iptr++ = cpu_to_be32(mw->mw_length); |
| 296 | return xdr_encode_hyper(iptr, mw->mw_offset); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | /* XDR-encode the Read list. Supports encoding a list of read |
| 300 | * segments that belong to a single read chunk. |
| 301 | * |
| 302 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): |
| 303 | * |
| 304 | * Read chunklist (a linked list): |
| 305 | * N elements, position P (same P for all chunks of same arg!): |
| 306 | * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 |
| 307 | * |
| 308 | * Returns a pointer to the XDR word in the RDMA header following |
| 309 | * the end of the Read list, or an error pointer. |
| 310 | */ |
| 311 | static __be32 * |
| 312 | rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, |
| 313 | struct rpcrdma_req *req, struct rpc_rqst *rqst, |
| 314 | __be32 *iptr, enum rpcrdma_chunktype rtype) |
| 315 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 316 | struct rpcrdma_mr_seg *seg; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 317 | struct rpcrdma_mw *mw; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 318 | unsigned int pos; |
| 319 | int n, nsegs; |
| 320 | |
| 321 | if (rtype == rpcrdma_noch) { |
| 322 | *iptr++ = xdr_zero; /* item not present */ |
| 323 | return iptr; |
| 324 | } |
| 325 | |
| 326 | pos = rqst->rq_snd_buf.head[0].iov_len; |
| 327 | if (rtype == rpcrdma_areadch) |
| 328 | pos = 0; |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 329 | seg = req->rl_segments; |
| 330 | nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 331 | if (nsegs < 0) |
| 332 | return ERR_PTR(nsegs); |
| 333 | |
| 334 | do { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 335 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
| 336 | false, &mw); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 337 | if (n < 0) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 338 | return ERR_PTR(n); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 339 | list_add(&mw->mw_list, &req->rl_registered); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 340 | |
| 341 | *iptr++ = xdr_one; /* item present */ |
| 342 | |
| 343 | /* All read segments in this chunk |
| 344 | * have the same "position". |
| 345 | */ |
| 346 | *iptr++ = cpu_to_be32(pos); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 347 | iptr = xdr_encode_rdma_segment(iptr, mw); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 348 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 349 | dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n", |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 350 | rqst->rq_task->tk_pid, __func__, pos, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 351 | mw->mw_length, (unsigned long long)mw->mw_offset, |
| 352 | mw->mw_handle, n < nsegs ? "more" : "last"); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 353 | |
| 354 | r_xprt->rx_stats.read_chunk_count++; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 355 | seg += n; |
| 356 | nsegs -= n; |
| 357 | } while (nsegs); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 358 | |
| 359 | /* Finish Read list */ |
| 360 | *iptr++ = xdr_zero; /* Next item not present */ |
| 361 | return iptr; |
| 362 | } |
| 363 | |
| 364 | /* XDR-encode the Write list. Supports encoding a list containing |
| 365 | * one array of plain segments that belong to a single write chunk. |
| 366 | * |
| 367 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): |
| 368 | * |
| 369 | * Write chunklist (a list of (one) counted array): |
| 370 | * N elements: |
| 371 | * 1 - N - HLOO - HLOO - ... - HLOO - 0 |
| 372 | * |
| 373 | * Returns a pointer to the XDR word in the RDMA header following |
| 374 | * the end of the Write list, or an error pointer. |
| 375 | */ |
| 376 | static __be32 * |
| 377 | rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, |
| 378 | struct rpc_rqst *rqst, __be32 *iptr, |
| 379 | enum rpcrdma_chunktype wtype) |
| 380 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 381 | struct rpcrdma_mr_seg *seg; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 382 | struct rpcrdma_mw *mw; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 383 | int n, nsegs, nchunks; |
| 384 | __be32 *segcount; |
| 385 | |
| 386 | if (wtype != rpcrdma_writech) { |
| 387 | *iptr++ = xdr_zero; /* no Write list present */ |
| 388 | return iptr; |
| 389 | } |
| 390 | |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 391 | seg = req->rl_segments; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 392 | nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, |
| 393 | rqst->rq_rcv_buf.head[0].iov_len, |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 394 | wtype, seg); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 395 | if (nsegs < 0) |
| 396 | return ERR_PTR(nsegs); |
| 397 | |
| 398 | *iptr++ = xdr_one; /* Write list present */ |
| 399 | segcount = iptr++; /* save location of segment count */ |
| 400 | |
| 401 | nchunks = 0; |
| 402 | do { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 403 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
| 404 | true, &mw); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 405 | if (n < 0) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 406 | return ERR_PTR(n); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 407 | list_add(&mw->mw_list, &req->rl_registered); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 408 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 409 | iptr = xdr_encode_rdma_segment(iptr, mw); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 410 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 411 | dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n", |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 412 | rqst->rq_task->tk_pid, __func__, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 413 | mw->mw_length, (unsigned long long)mw->mw_offset, |
| 414 | mw->mw_handle, n < nsegs ? "more" : "last"); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 415 | |
| 416 | r_xprt->rx_stats.write_chunk_count++; |
| 417 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 418 | nchunks++; |
| 419 | seg += n; |
| 420 | nsegs -= n; |
| 421 | } while (nsegs); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 422 | |
| 423 | /* Update count of segments in this Write chunk */ |
| 424 | *segcount = cpu_to_be32(nchunks); |
| 425 | |
| 426 | /* Finish Write list */ |
| 427 | *iptr++ = xdr_zero; /* Next item not present */ |
| 428 | return iptr; |
| 429 | } |
| 430 | |
| 431 | /* XDR-encode the Reply chunk. Supports encoding an array of plain |
| 432 | * segments that belong to a single write (reply) chunk. |
| 433 | * |
| 434 | * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): |
| 435 | * |
| 436 | * Reply chunk (a counted array): |
| 437 | * N elements: |
| 438 | * 1 - N - HLOO - HLOO - ... - HLOO |
| 439 | * |
| 440 | * Returns a pointer to the XDR word in the RDMA header following |
| 441 | * the end of the Reply chunk, or an error pointer. |
| 442 | */ |
| 443 | static __be32 * |
| 444 | rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, |
| 445 | struct rpcrdma_req *req, struct rpc_rqst *rqst, |
| 446 | __be32 *iptr, enum rpcrdma_chunktype wtype) |
| 447 | { |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 448 | struct rpcrdma_mr_seg *seg; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 449 | struct rpcrdma_mw *mw; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 450 | int n, nsegs, nchunks; |
| 451 | __be32 *segcount; |
| 452 | |
| 453 | if (wtype != rpcrdma_replych) { |
| 454 | *iptr++ = xdr_zero; /* no Reply chunk present */ |
| 455 | return iptr; |
| 456 | } |
| 457 | |
Chuck Lever | 5ab8142 | 2016-06-29 13:54:25 -0400 | [diff] [blame] | 458 | seg = req->rl_segments; |
| 459 | nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 460 | if (nsegs < 0) |
| 461 | return ERR_PTR(nsegs); |
| 462 | |
| 463 | *iptr++ = xdr_one; /* Reply chunk present */ |
| 464 | segcount = iptr++; /* save location of segment count */ |
| 465 | |
| 466 | nchunks = 0; |
| 467 | do { |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 468 | n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, |
| 469 | true, &mw); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 470 | if (n < 0) |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 471 | return ERR_PTR(n); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 472 | list_add(&mw->mw_list, &req->rl_registered); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 473 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 474 | iptr = xdr_encode_rdma_segment(iptr, mw); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 475 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 476 | dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n", |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 477 | rqst->rq_task->tk_pid, __func__, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 478 | mw->mw_length, (unsigned long long)mw->mw_offset, |
| 479 | mw->mw_handle, n < nsegs ? "more" : "last"); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 480 | |
| 481 | r_xprt->rx_stats.reply_chunk_count++; |
| 482 | r_xprt->rx_stats.total_rdma_request += seg->mr_len; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 483 | nchunks++; |
| 484 | seg += n; |
| 485 | nsegs -= n; |
| 486 | } while (nsegs); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 487 | |
| 488 | /* Update count of segments in the Reply chunk */ |
| 489 | *segcount = cpu_to_be32(nchunks); |
| 490 | |
| 491 | return iptr; |
| 492 | } |
| 493 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 494 | /* |
| 495 | * Copy write data inline. |
| 496 | * This function is used for "small" requests. Data which is passed |
| 497 | * to RPC via iovecs (or page list) is copied directly into the |
| 498 | * pre-registered memory buffer for this request. For small amounts |
| 499 | * of data, this is efficient. The cutoff value is tunable. |
| 500 | */ |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 501 | static void rpcrdma_inline_pullup(struct rpc_rqst *rqst) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 502 | { |
| 503 | int i, npages, curlen; |
| 504 | int copy_len; |
| 505 | unsigned char *srcp, *destp; |
| 506 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 507 | int page_base; |
| 508 | struct page **ppages; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 509 | |
| 510 | destp = rqst->rq_svec[0].iov_base; |
| 511 | curlen = rqst->rq_svec[0].iov_len; |
| 512 | destp += curlen; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 513 | |
Chuck Lever | b3221d6 | 2015-08-03 13:03:39 -0400 | [diff] [blame] | 514 | dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n", |
| 515 | __func__, destp, rqst->rq_slen, curlen); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 516 | |
| 517 | copy_len = rqst->rq_snd_buf.page_len; |
Tom Talpey | b38ab40 | 2009-03-11 14:37:55 -0400 | [diff] [blame] | 518 | |
| 519 | if (rqst->rq_snd_buf.tail[0].iov_len) { |
| 520 | curlen = rqst->rq_snd_buf.tail[0].iov_len; |
| 521 | if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { |
| 522 | memmove(destp + copy_len, |
| 523 | rqst->rq_snd_buf.tail[0].iov_base, curlen); |
| 524 | r_xprt->rx_stats.pullup_copy_count += curlen; |
| 525 | } |
| 526 | dprintk("RPC: %s: tail destp 0x%p len %d\n", |
| 527 | __func__, destp + copy_len, curlen); |
| 528 | rqst->rq_svec[0].iov_len += curlen; |
| 529 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 530 | r_xprt->rx_stats.pullup_copy_count += copy_len; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 531 | |
| 532 | page_base = rqst->rq_snd_buf.page_base; |
| 533 | ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); |
| 534 | page_base &= ~PAGE_MASK; |
| 535 | npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 536 | for (i = 0; copy_len && i < npages; i++) { |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 537 | curlen = PAGE_SIZE - page_base; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 538 | if (curlen > copy_len) |
| 539 | curlen = copy_len; |
| 540 | dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", |
| 541 | __func__, i, destp, copy_len, curlen); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 542 | srcp = kmap_atomic(ppages[i]); |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 543 | memcpy(destp, srcp+page_base, curlen); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 544 | kunmap_atomic(srcp); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 545 | rqst->rq_svec[0].iov_len += curlen; |
| 546 | destp += curlen; |
| 547 | copy_len -= curlen; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 548 | page_base = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 549 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 550 | /* header now contains entire send message */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 551 | } |
| 552 | |
| 553 | /* |
| 554 | * Marshal a request: the primary job of this routine is to choose |
| 555 | * the transfer modes. See comments below. |
| 556 | * |
Chuck Lever | 88b18a1 | 2016-05-02 14:41:22 -0400 | [diff] [blame] | 557 | * Prepares up to two IOVs per Call message: |
| 558 | * |
| 559 | * [0] -- RPC RDMA header |
| 560 | * [1] -- the RPC header/data |
Chuck Lever | c93c622 | 2014-05-28 10:35:14 -0400 | [diff] [blame] | 561 | * |
| 562 | * Returns zero on success, otherwise a negative errno. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 563 | */ |
| 564 | |
| 565 | int |
| 566 | rpcrdma_marshal_req(struct rpc_rqst *rqst) |
| 567 | { |
Trond Myklebust | a4f0835 | 2013-01-08 09:10:21 -0500 | [diff] [blame] | 568 | struct rpc_xprt *xprt = rqst->rq_xprt; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 569 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
| 570 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 571 | enum rpcrdma_chunktype rtype, wtype; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 572 | struct rpcrdma_msg *headerp; |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 573 | bool ddp_allowed; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 574 | ssize_t hdrlen; |
| 575 | size_t rpclen; |
| 576 | __be32 *iptr; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 577 | |
Chuck Lever | 83128a6 | 2015-10-24 17:27:59 -0400 | [diff] [blame] | 578 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
| 579 | if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state)) |
| 580 | return rpcrdma_bc_marshal_reply(rqst); |
| 581 | #endif |
| 582 | |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 583 | headerp = rdmab_to_msg(req->rl_rdmabuf); |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 584 | /* don't byte-swap XID, it's already done in request */ |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 585 | headerp->rm_xid = rqst->rq_xid; |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 586 | headerp->rm_vers = rpcrdma_version; |
| 587 | headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); |
| 588 | headerp->rm_type = rdma_msg; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 589 | |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 590 | /* When the ULP employs a GSS flavor that guarantees integrity |
| 591 | * or privacy, direct data placement of individual data items |
| 592 | * is not allowed. |
| 593 | */ |
| 594 | ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags & |
| 595 | RPCAUTH_AUTH_DATATOUCH); |
| 596 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 597 | /* |
| 598 | * Chunks needed for results? |
| 599 | * |
| 600 | * o If the expected result is under the inline threshold, all ops |
Chuck Lever | 33943b2 | 2015-08-03 13:04:08 -0400 | [diff] [blame] | 601 | * return as inline. |
Chuck Lever | cce6dee | 2016-05-02 14:41:14 -0400 | [diff] [blame] | 602 | * o Large read ops return data as write chunk(s), header as |
| 603 | * inline. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 604 | * o Large non-read ops return as a single reply chunk. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 605 | */ |
Chuck Lever | cce6dee | 2016-05-02 14:41:14 -0400 | [diff] [blame] | 606 | if (rpcrdma_results_inline(r_xprt, rqst)) |
Chuck Lever | 02eb57d8 | 2015-08-03 13:03:58 -0400 | [diff] [blame] | 607 | wtype = rpcrdma_noch; |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 608 | else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) |
Chuck Lever | cce6dee | 2016-05-02 14:41:14 -0400 | [diff] [blame] | 609 | wtype = rpcrdma_writech; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 610 | else |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 611 | wtype = rpcrdma_replych; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 612 | |
| 613 | /* |
| 614 | * Chunks needed for arguments? |
| 615 | * |
| 616 | * o If the total request is under the inline threshold, all ops |
| 617 | * are sent as inline. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 618 | * o Large write ops transmit data as read chunk(s), header as |
| 619 | * inline. |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 620 | * o Large non-write ops are sent with the entire message as a |
| 621 | * single read chunk (protocol 0-position special case). |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 622 | * |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 623 | * This assumes that the upper layer does not present a request |
| 624 | * that both has a data payload, and whose non-data arguments |
| 625 | * by themselves are larger than the inline threshold. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 626 | */ |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 627 | if (rpcrdma_args_inline(r_xprt, rqst)) { |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 628 | rtype = rpcrdma_noch; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 629 | rpcrdma_inline_pullup(rqst); |
| 630 | rpclen = rqst->rq_svec[0].iov_len; |
Chuck Lever | 65b8017 | 2016-06-29 13:55:06 -0400 | [diff] [blame] | 631 | } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) { |
Chuck Lever | e237794 | 2015-03-30 14:33:53 -0400 | [diff] [blame] | 632 | rtype = rpcrdma_readch; |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 633 | rpclen = rqst->rq_svec[0].iov_len; |
| 634 | rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf); |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 635 | } else { |
Chuck Lever | 860477d | 2015-08-03 13:04:45 -0400 | [diff] [blame] | 636 | r_xprt->rx_stats.nomsg_call_count++; |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 637 | headerp->rm_type = htonl(RDMA_NOMSG); |
| 638 | rtype = rpcrdma_areadch; |
| 639 | rpclen = 0; |
| 640 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 641 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 642 | /* This implementation supports the following combinations |
| 643 | * of chunk lists in one RPC-over-RDMA Call message: |
| 644 | * |
| 645 | * - Read list |
| 646 | * - Write list |
| 647 | * - Reply chunk |
| 648 | * - Read list + Reply chunk |
| 649 | * |
| 650 | * It might not yet support the following combinations: |
| 651 | * |
| 652 | * - Read list + Write list |
| 653 | * |
| 654 | * It does not support the following combinations: |
| 655 | * |
| 656 | * - Write list + Reply chunk |
| 657 | * - Read list + Write list + Reply chunk |
| 658 | * |
| 659 | * This implementation supports only a single chunk in each |
| 660 | * Read or Write list. Thus for example the client cannot |
| 661 | * send a Call message with a Position Zero Read chunk and a |
| 662 | * regular Read chunk at the same time. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 663 | */ |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 664 | iptr = headerp->rm_body.rm_chunks; |
| 665 | iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype); |
| 666 | if (IS_ERR(iptr)) |
| 667 | goto out_unmap; |
| 668 | iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype); |
| 669 | if (IS_ERR(iptr)) |
| 670 | goto out_unmap; |
| 671 | iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype); |
| 672 | if (IS_ERR(iptr)) |
| 673 | goto out_unmap; |
| 674 | hdrlen = (unsigned char *)iptr - (unsigned char *)headerp; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 675 | |
Chuck Lever | eb342e9 | 2016-09-15 10:55:04 -0400 | [diff] [blame] | 676 | if (hdrlen + rpclen > r_xprt->rx_data.inline_wsize) |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 677 | goto out_overflow; |
| 678 | |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 679 | dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n", |
| 680 | rqst->rq_task->tk_pid, __func__, |
| 681 | transfertypes[rtype], transfertypes[wtype], |
| 682 | hdrlen, rpclen); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 683 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 684 | if (!rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, req->rl_rdmabuf)) |
| 685 | goto out_map; |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 686 | req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 687 | req->rl_send_iov[0].length = hdrlen; |
Chuck Lever | 85275c8 | 2015-01-21 11:04:16 -0500 | [diff] [blame] | 688 | req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 689 | |
Chuck Lever | 90aab60 | 2016-09-15 10:56:43 -0400 | [diff] [blame] | 690 | req->rl_send_wr.num_sge = 1; |
Chuck Lever | 2fcc213 | 2015-08-03 13:04:26 -0400 | [diff] [blame] | 691 | if (rtype == rpcrdma_areadch) |
| 692 | return 0; |
| 693 | |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 694 | if (!rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, req->rl_sendbuf)) |
| 695 | goto out_map; |
Chuck Lever | 0ca77dc | 2015-01-21 11:04:08 -0500 | [diff] [blame] | 696 | req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 697 | req->rl_send_iov[1].length = rpclen; |
Chuck Lever | 0ca77dc | 2015-01-21 11:04:08 -0500 | [diff] [blame] | 698 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 699 | |
Chuck Lever | 90aab60 | 2016-09-15 10:56:43 -0400 | [diff] [blame] | 700 | req->rl_send_wr.num_sge = 2; |
| 701 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 702 | return 0; |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 703 | |
| 704 | out_overflow: |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 705 | pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n", |
| 706 | hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 707 | iptr = ERR_PTR(-EIO); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 708 | |
| 709 | out_unmap: |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 710 | r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false); |
Chuck Lever | 94f58c5 | 2016-05-02 14:41:30 -0400 | [diff] [blame] | 711 | return PTR_ERR(iptr); |
Chuck Lever | 54cbd6b | 2016-09-15 10:56:18 -0400 | [diff] [blame] | 712 | |
| 713 | out_map: |
| 714 | pr_err("rpcrdma: failed to DMA map a Send buffer\n"); |
| 715 | iptr = ERR_PTR(-EIO); |
| 716 | goto out_unmap; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | /* |
| 720 | * Chase down a received write or reply chunklist to get length |
| 721 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) |
| 722 | */ |
| 723 | static int |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 724 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 725 | { |
| 726 | unsigned int i, total_len; |
| 727 | struct rpcrdma_write_chunk *cur_wchunk; |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 728 | char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 729 | |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 730 | i = be32_to_cpu(**iptrp); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 731 | cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); |
| 732 | total_len = 0; |
| 733 | while (i--) { |
| 734 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; |
| 735 | ifdebug(FACILITY) { |
| 736 | u64 off; |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 737 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 738 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
| 739 | __func__, |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 740 | be32_to_cpu(seg->rs_length), |
Stephen Rothwell | e08a132 | 2007-10-30 00:44:32 -0700 | [diff] [blame] | 741 | (unsigned long long)off, |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 742 | be32_to_cpu(seg->rs_handle)); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 743 | } |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 744 | total_len += be32_to_cpu(seg->rs_length); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 745 | ++cur_wchunk; |
| 746 | } |
| 747 | /* check and adjust for properly terminated write chunk */ |
| 748 | if (wrchunk) { |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 749 | __be32 *w = (__be32 *) cur_wchunk; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 750 | if (*w++ != xdr_zero) |
| 751 | return -1; |
| 752 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
| 753 | } |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 754 | if ((char *)cur_wchunk > base + rep->rr_len) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 755 | return -1; |
| 756 | |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 757 | *iptrp = (__be32 *) cur_wchunk; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 758 | return total_len; |
| 759 | } |
| 760 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 761 | /** |
| 762 | * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs |
| 763 | * @rqst: controlling RPC request |
| 764 | * @srcp: points to RPC message payload in receive buffer |
| 765 | * @copy_len: remaining length of receive buffer content |
| 766 | * @pad: Write chunk pad bytes needed (zero for pure inline) |
| 767 | * |
| 768 | * The upper layer has set the maximum number of bytes it can |
| 769 | * receive in each component of rq_rcv_buf. These values are set in |
| 770 | * the head.iov_len, page_len, tail.iov_len, and buflen fields. |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 771 | * |
| 772 | * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in |
| 773 | * many cases this function simply updates iov_base pointers in |
| 774 | * rq_rcv_buf to point directly to the received reply data, to |
| 775 | * avoid copying reply data. |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 776 | * |
| 777 | * Returns the count of bytes which had to be memcopied. |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 778 | */ |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 779 | static unsigned long |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 780 | rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 781 | { |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 782 | unsigned long fixup_copy_count; |
| 783 | int i, npages, curlen; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 784 | char *destp; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 785 | struct page **ppages; |
| 786 | int page_base; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 787 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 788 | /* The head iovec is redirected to the RPC reply message |
| 789 | * in the receive buffer, to avoid a memcopy. |
| 790 | */ |
| 791 | rqst->rq_rcv_buf.head[0].iov_base = srcp; |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 792 | rqst->rq_private_buf.head[0].iov_base = srcp; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 793 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 794 | /* The contents of the receive buffer that follow |
| 795 | * head.iov_len bytes are copied into the page list. |
| 796 | */ |
| 797 | curlen = rqst->rq_rcv_buf.head[0].iov_len; |
| 798 | if (curlen > copy_len) |
| 799 | curlen = copy_len; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 800 | dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", |
| 801 | __func__, srcp, copy_len, curlen); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 802 | srcp += curlen; |
| 803 | copy_len -= curlen; |
| 804 | |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 805 | page_base = rqst->rq_rcv_buf.page_base; |
| 806 | ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); |
| 807 | page_base &= ~PAGE_MASK; |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 808 | fixup_copy_count = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 809 | if (copy_len && rqst->rq_rcv_buf.page_len) { |
Chuck Lever | 80414ab | 2016-06-29 13:54:33 -0400 | [diff] [blame] | 810 | int pagelist_len; |
| 811 | |
| 812 | pagelist_len = rqst->rq_rcv_buf.page_len; |
| 813 | if (pagelist_len > copy_len) |
| 814 | pagelist_len = copy_len; |
| 815 | npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT; |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 816 | for (i = 0; i < npages; i++) { |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 817 | curlen = PAGE_SIZE - page_base; |
Chuck Lever | 80414ab | 2016-06-29 13:54:33 -0400 | [diff] [blame] | 818 | if (curlen > pagelist_len) |
| 819 | curlen = pagelist_len; |
| 820 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 821 | dprintk("RPC: %s: page %d" |
| 822 | " srcp 0x%p len %d curlen %d\n", |
| 823 | __func__, i, srcp, copy_len, curlen); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 824 | destp = kmap_atomic(ppages[i]); |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 825 | memcpy(destp + page_base, srcp, curlen); |
| 826 | flush_dcache_page(ppages[i]); |
Cong Wang | b854178 | 2011-11-25 23:14:40 +0800 | [diff] [blame] | 827 | kunmap_atomic(destp); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 828 | srcp += curlen; |
| 829 | copy_len -= curlen; |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 830 | fixup_copy_count += curlen; |
Chuck Lever | 80414ab | 2016-06-29 13:54:33 -0400 | [diff] [blame] | 831 | pagelist_len -= curlen; |
| 832 | if (!pagelist_len) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 833 | break; |
Tom Tucker | bd7ea31 | 2011-02-09 19:45:28 +0000 | [diff] [blame] | 834 | page_base = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 835 | } |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 836 | |
| 837 | /* Implicit padding for the last segment in a Write |
| 838 | * chunk is inserted inline at the front of the tail |
| 839 | * iovec. The upper layer ignores the content of |
| 840 | * the pad. Simply ensure inline content in the tail |
| 841 | * that follows the Write chunk is properly aligned. |
| 842 | */ |
| 843 | if (pad) |
| 844 | srcp -= pad; |
Chuck Lever | 2b7bbc9 | 2014-03-12 12:51:30 -0400 | [diff] [blame] | 845 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 846 | |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 847 | /* The tail iovec is redirected to the remaining data |
| 848 | * in the receive buffer, to avoid a memcopy. |
| 849 | */ |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 850 | if (copy_len || pad) { |
Chuck Lever | cb0ae1f | 2016-06-29 13:54:41 -0400 | [diff] [blame] | 851 | rqst->rq_rcv_buf.tail[0].iov_base = srcp; |
Chuck Lever | cfabe2c | 2016-06-29 13:54:49 -0400 | [diff] [blame] | 852 | rqst->rq_private_buf.tail[0].iov_base = srcp; |
| 853 | } |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 854 | |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 855 | return fixup_copy_count; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 856 | } |
| 857 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 858 | void |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 859 | rpcrdma_connect_worker(struct work_struct *work) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 860 | { |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 861 | struct rpcrdma_ep *ep = |
| 862 | container_of(work, struct rpcrdma_ep, rep_connect_worker.work); |
Chuck Lever | afadc46 | 2015-01-21 11:03:11 -0500 | [diff] [blame] | 863 | struct rpcrdma_xprt *r_xprt = |
| 864 | container_of(ep, struct rpcrdma_xprt, rx_ep); |
| 865 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 866 | |
| 867 | spin_lock_bh(&xprt->transport_lock); |
Tom Talpey | 575448b | 2008-10-09 15:00:40 -0400 | [diff] [blame] | 868 | if (++xprt->connect_cookie == 0) /* maintain a reserved value */ |
| 869 | ++xprt->connect_cookie; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 870 | if (ep->rep_connected > 0) { |
| 871 | if (!xprt_test_and_set_connected(xprt)) |
| 872 | xprt_wake_pending_tasks(xprt, 0); |
| 873 | } else { |
| 874 | if (xprt_test_and_clear_connected(xprt)) |
Tom Talpey | 926449b | 2008-10-09 15:01:21 -0400 | [diff] [blame] | 875 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 876 | } |
| 877 | spin_unlock_bh(&xprt->transport_lock); |
| 878 | } |
| 879 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 880 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
| 881 | /* By convention, backchannel calls arrive via rdma_msg type |
| 882 | * messages, and never populate the chunk lists. This makes |
| 883 | * the RPC/RDMA header small and fixed in size, so it is |
| 884 | * straightforward to check the RPC header's direction field. |
| 885 | */ |
| 886 | static bool |
| 887 | rpcrdma_is_bcall(struct rpcrdma_msg *headerp) |
| 888 | { |
| 889 | __be32 *p = (__be32 *)headerp; |
| 890 | |
| 891 | if (headerp->rm_type != rdma_msg) |
| 892 | return false; |
| 893 | if (headerp->rm_body.rm_chunks[0] != xdr_zero) |
| 894 | return false; |
| 895 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) |
| 896 | return false; |
| 897 | if (headerp->rm_body.rm_chunks[2] != xdr_zero) |
| 898 | return false; |
| 899 | |
| 900 | /* sanity */ |
| 901 | if (p[7] != headerp->rm_xid) |
| 902 | return false; |
| 903 | /* call direction */ |
| 904 | if (p[8] != cpu_to_be32(RPC_CALL)) |
| 905 | return false; |
| 906 | |
| 907 | return true; |
| 908 | } |
| 909 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
| 910 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 911 | /* |
Chuck Lever | 254f91e | 2014-05-28 10:32:17 -0400 | [diff] [blame] | 912 | * This function is called when an async event is posted to |
| 913 | * the connection which changes the connection state. All it |
| 914 | * does at this point is mark the connection up/down, the rpc |
| 915 | * timers do the rest. |
| 916 | */ |
| 917 | void |
| 918 | rpcrdma_conn_func(struct rpcrdma_ep *ep) |
| 919 | { |
| 920 | schedule_delayed_work(&ep->rep_connect_worker, 0); |
| 921 | } |
| 922 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 923 | /* Process received RPC/RDMA messages. |
| 924 | * |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 925 | * Errors must result in the RPC task either being awakened, or |
| 926 | * allowed to timeout, to discover the errors at that time. |
| 927 | */ |
| 928 | void |
| 929 | rpcrdma_reply_handler(struct rpcrdma_rep *rep) |
| 930 | { |
| 931 | struct rpcrdma_msg *headerp; |
| 932 | struct rpcrdma_req *req; |
| 933 | struct rpc_rqst *rqst; |
Chuck Lever | fed171b | 2015-05-26 11:51:37 -0400 | [diff] [blame] | 934 | struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; |
| 935 | struct rpc_xprt *xprt = &r_xprt->rx_xprt; |
Al Viro | 2d8a972 | 2007-10-29 04:37:58 +0000 | [diff] [blame] | 936 | __be32 *iptr; |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 937 | int rdmalen, status, rmerr; |
Chuck Lever | e7ce710 | 2014-05-28 10:34:57 -0400 | [diff] [blame] | 938 | unsigned long cwnd; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 939 | |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 940 | dprintk("RPC: %s: incoming rep %p\n", __func__, rep); |
| 941 | |
| 942 | if (rep->rr_len == RPCRDMA_BAD_LEN) |
| 943 | goto out_badstatus; |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 944 | if (rep->rr_len < RPCRDMA_HDRLEN_ERR) |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 945 | goto out_shortreply; |
| 946 | |
Chuck Lever | 6b1184c | 2015-01-21 11:04:25 -0500 | [diff] [blame] | 947 | headerp = rdmab_to_msg(rep->rr_rdmabuf); |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 948 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
| 949 | if (rpcrdma_is_bcall(headerp)) |
| 950 | goto out_bcall; |
| 951 | #endif |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 952 | |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 953 | /* Match incoming rpcrdma_rep to an rpcrdma_req to |
| 954 | * get context for handling any incoming chunks. |
| 955 | */ |
| 956 | spin_lock_bh(&xprt->transport_lock); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 957 | rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 958 | if (!rqst) |
| 959 | goto out_nomatch; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 960 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 961 | req = rpcr_to_rdmar(rqst); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 962 | if (req->rl_reply) |
| 963 | goto out_duplicate; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 964 | |
Chuck Lever | 6879164 | 2015-12-16 17:23:11 -0500 | [diff] [blame] | 965 | /* Sanity checking has passed. We are now committed |
| 966 | * to complete this transaction. |
| 967 | */ |
| 968 | list_del_init(&rqst->rq_list); |
| 969 | spin_unlock_bh(&xprt->transport_lock); |
Chuck Lever | af0f16e | 2016-03-04 11:27:43 -0500 | [diff] [blame] | 970 | dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", |
| 971 | __func__, rep, req, be32_to_cpu(headerp->rm_xid)); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 972 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 973 | /* from here on, the reply is no longer an orphan */ |
| 974 | req->rl_reply = rep; |
Chuck Lever | 1890697 | 2014-05-28 10:34:41 -0400 | [diff] [blame] | 975 | xprt->reestablish_timeout = 0; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 976 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 977 | if (headerp->rm_vers != rpcrdma_version) |
| 978 | goto out_badversion; |
| 979 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 980 | /* check for expected message types */ |
| 981 | /* The order of some of these tests is important. */ |
| 982 | switch (headerp->rm_type) { |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 983 | case rdma_msg: |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 984 | /* never expect read chunks */ |
| 985 | /* never expect reply chunks (two ways to check) */ |
| 986 | /* never expect write chunks without having offered RDMA */ |
| 987 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || |
| 988 | (headerp->rm_body.rm_chunks[1] == xdr_zero && |
| 989 | headerp->rm_body.rm_chunks[2] != xdr_zero) || |
| 990 | (headerp->rm_body.rm_chunks[1] != xdr_zero && |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 991 | list_empty(&req->rl_registered))) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 992 | goto badheader; |
| 993 | if (headerp->rm_body.rm_chunks[1] != xdr_zero) { |
| 994 | /* count any expected write chunks in read reply */ |
| 995 | /* start at write chunk array count */ |
| 996 | iptr = &headerp->rm_body.rm_chunks[2]; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 997 | rdmalen = rpcrdma_count_chunks(rep, 1, &iptr); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 998 | /* check for validity, and no reply chunk after */ |
| 999 | if (rdmalen < 0 || *iptr++ != xdr_zero) |
| 1000 | goto badheader; |
| 1001 | rep->rr_len -= |
| 1002 | ((unsigned char *)iptr - (unsigned char *)headerp); |
| 1003 | status = rep->rr_len + rdmalen; |
| 1004 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 1005 | /* special case - last chunk may omit padding */ |
| 1006 | if (rdmalen &= 3) { |
| 1007 | rdmalen = 4 - rdmalen; |
| 1008 | status += rdmalen; |
| 1009 | } |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1010 | } else { |
| 1011 | /* else ordinary inline */ |
Tom Talpey | 9191ca3 | 2008-10-09 15:01:11 -0400 | [diff] [blame] | 1012 | rdmalen = 0; |
Chuck Lever | f284648 | 2015-01-21 11:02:29 -0500 | [diff] [blame] | 1013 | iptr = (__be32 *)((unsigned char *)headerp + |
| 1014 | RPCRDMA_HDRLEN_MIN); |
| 1015 | rep->rr_len -= RPCRDMA_HDRLEN_MIN; |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1016 | status = rep->rr_len; |
| 1017 | } |
Chuck Lever | 64695bde | 2016-06-29 13:54:58 -0400 | [diff] [blame] | 1018 | |
| 1019 | r_xprt->rx_stats.fixup_copy_count += |
| 1020 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, |
| 1021 | rdmalen); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1022 | break; |
| 1023 | |
Chuck Lever | 284f490 | 2015-01-21 11:02:13 -0500 | [diff] [blame] | 1024 | case rdma_nomsg: |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1025 | /* never expect read or write chunks, always reply chunks */ |
| 1026 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || |
| 1027 | headerp->rm_body.rm_chunks[1] != xdr_zero || |
| 1028 | headerp->rm_body.rm_chunks[2] != xdr_one || |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 1029 | list_empty(&req->rl_registered)) |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1030 | goto badheader; |
Chuck Lever | f284648 | 2015-01-21 11:02:29 -0500 | [diff] [blame] | 1031 | iptr = (__be32 *)((unsigned char *)headerp + |
| 1032 | RPCRDMA_HDRLEN_MIN); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 1033 | rdmalen = rpcrdma_count_chunks(rep, 0, &iptr); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1034 | if (rdmalen < 0) |
| 1035 | goto badheader; |
| 1036 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
| 1037 | /* Reply chunk buffer already is the reply vector - no fixup. */ |
| 1038 | status = rdmalen; |
| 1039 | break; |
| 1040 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1041 | case rdma_error: |
| 1042 | goto out_rdmaerr; |
| 1043 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1044 | badheader: |
| 1045 | default: |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 1046 | dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n", |
| 1047 | rqst->rq_task->tk_pid, __func__, |
| 1048 | be32_to_cpu(headerp->rm_type)); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1049 | status = -EIO; |
| 1050 | r_xprt->rx_stats.bad_reply_count++; |
| 1051 | break; |
| 1052 | } |
| 1053 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1054 | out: |
Chuck Lever | 6879164 | 2015-12-16 17:23:11 -0500 | [diff] [blame] | 1055 | /* Invalidate and flush the data payloads before waking the |
| 1056 | * waiting application. This guarantees the memory region is |
| 1057 | * properly fenced from the server before the application |
| 1058 | * accesses the data. It also ensures proper send flow |
| 1059 | * control: waking the next RPC waits until this RPC has |
| 1060 | * relinquished all its Send Queue entries. |
| 1061 | */ |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 1062 | if (!list_empty(&req->rl_registered)) |
Chuck Lever | 6879164 | 2015-12-16 17:23:11 -0500 | [diff] [blame] | 1063 | r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req); |
| 1064 | |
Chuck Lever | 6879164 | 2015-12-16 17:23:11 -0500 | [diff] [blame] | 1065 | spin_lock_bh(&xprt->transport_lock); |
Chuck Lever | e7ce710 | 2014-05-28 10:34:57 -0400 | [diff] [blame] | 1066 | cwnd = xprt->cwnd; |
Chuck Lever | 23826c7 | 2016-03-04 11:28:27 -0500 | [diff] [blame] | 1067 | xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT; |
Chuck Lever | e7ce710 | 2014-05-28 10:34:57 -0400 | [diff] [blame] | 1068 | if (xprt->cwnd > cwnd) |
| 1069 | xprt_release_rqst_cong(rqst->rq_task); |
| 1070 | |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1071 | xprt_complete_rqst(rqst->rq_task, status); |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 1072 | spin_unlock_bh(&xprt->transport_lock); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1073 | dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", |
| 1074 | __func__, xprt, rqst, status); |
| 1075 | return; |
| 1076 | |
| 1077 | out_badstatus: |
| 1078 | rpcrdma_recv_buffer_put(rep); |
| 1079 | if (r_xprt->rx_ep.rep_connected == 1) { |
| 1080 | r_xprt->rx_ep.rep_connected = -EIO; |
| 1081 | rpcrdma_conn_func(&r_xprt->rx_ep); |
| 1082 | } |
| 1083 | return; |
| 1084 | |
Chuck Lever | 63cae47 | 2015-10-24 17:28:08 -0400 | [diff] [blame] | 1085 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
| 1086 | out_bcall: |
| 1087 | rpcrdma_bc_receive_call(r_xprt, rep); |
| 1088 | return; |
| 1089 | #endif |
| 1090 | |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1091 | /* If the incoming reply terminated a pending RPC, the next |
| 1092 | * RPC call will post a replacement receive buffer as it is |
| 1093 | * being marshaled. |
| 1094 | */ |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1095 | out_badversion: |
| 1096 | dprintk("RPC: %s: invalid version %d\n", |
| 1097 | __func__, be32_to_cpu(headerp->rm_vers)); |
Chuck Lever | 59aa1f9 | 2016-03-04 11:28:18 -0500 | [diff] [blame] | 1098 | status = -EIO; |
| 1099 | r_xprt->rx_stats.bad_reply_count++; |
| 1100 | goto out; |
| 1101 | |
| 1102 | out_rdmaerr: |
| 1103 | rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err); |
| 1104 | switch (rmerr) { |
| 1105 | case ERR_VERS: |
| 1106 | pr_err("%s: server reports header version error (%u-%u)\n", |
| 1107 | __func__, |
| 1108 | be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low), |
| 1109 | be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high)); |
| 1110 | break; |
| 1111 | case ERR_CHUNK: |
| 1112 | pr_err("%s: server reports header decoding error\n", |
| 1113 | __func__); |
| 1114 | break; |
| 1115 | default: |
| 1116 | pr_err("%s: server reports unknown error %d\n", |
| 1117 | __func__, rmerr); |
| 1118 | } |
| 1119 | status = -EREMOTEIO; |
| 1120 | r_xprt->rx_stats.bad_reply_count++; |
| 1121 | goto out; |
| 1122 | |
| 1123 | /* If no pending RPC transaction was matched, post a replacement |
| 1124 | * receive buffer before returning. |
| 1125 | */ |
| 1126 | out_shortreply: |
| 1127 | dprintk("RPC: %s: short/invalid reply\n", __func__); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1128 | goto repost; |
| 1129 | |
| 1130 | out_nomatch: |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 1131 | spin_unlock_bh(&xprt->transport_lock); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1132 | dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n", |
| 1133 | __func__, be32_to_cpu(headerp->rm_xid), |
| 1134 | rep->rr_len); |
| 1135 | goto repost; |
| 1136 | |
| 1137 | out_duplicate: |
Chuck Lever | fe97b47 | 2015-10-24 17:27:10 -0400 | [diff] [blame] | 1138 | spin_unlock_bh(&xprt->transport_lock); |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1139 | dprintk("RPC: %s: " |
| 1140 | "duplicate reply %p to RPC request %p: xid 0x%08x\n", |
| 1141 | __func__, rep, req, be32_to_cpu(headerp->rm_xid)); |
| 1142 | |
| 1143 | repost: |
| 1144 | r_xprt->rx_stats.bad_reply_count++; |
Chuck Lever | b157380 | 2016-09-15 10:56:35 -0400 | [diff] [blame] | 1145 | if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep)) |
Chuck Lever | b0e178a | 2015-10-24 17:26:54 -0400 | [diff] [blame] | 1146 | rpcrdma_recv_buffer_put(rep); |
\"Talpey, Thomas\ | e960182 | 2007-09-10 13:50:42 -0400 | [diff] [blame] | 1147 | } |