blob: 515221b16d0956ea027e91985c89606c403d5109 [file] [log] [blame]
Tom Tuckerc06b5402007-12-12 16:13:25 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerc06b5402007-12-12 16:13:25 -06003 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
Chuck Levercf570a92016-03-01 13:05:45 -050053static u32 xdr_padsize(u32 len)
54{
55 return (len & 3) ? (4 - (len & 3)) : 0;
56}
57
Chuck Leverba986c92016-01-07 14:49:53 -050058int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
59 struct xdr_buf *xdr,
Chuck Leverf6763c22016-03-01 13:05:54 -050060 struct svc_rdma_req_map *vec,
61 bool write_chunk_present)
Tom Tuckerc06b5402007-12-12 16:13:25 -060062{
Tom Tuckerc06b5402007-12-12 16:13:25 -060063 int sge_no;
Tom Tuckerc06b5402007-12-12 16:13:25 -060064 u32 sge_bytes;
65 u32 page_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050066 u32 page_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -060067 int page_no;
68
Chuck Lever3fe04ee2015-01-13 11:03:03 -050069 if (xdr->len !=
70 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
Chuck Leverba986c92016-01-07 14:49:53 -050071 pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
Chuck Lever3fe04ee2015-01-13 11:03:03 -050072 return -EIO;
73 }
Tom Tucker34d16e42008-07-02 14:56:13 -050074
Tom Tuckerc06b5402007-12-12 16:13:25 -060075 /* Skip the first sge, this is for the RPCRDMA header */
76 sge_no = 1;
77
78 /* Head SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050079 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
80 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -060081 sge_no++;
82
83 /* pages SGE */
84 page_no = 0;
85 page_bytes = xdr->page_len;
86 page_off = xdr->page_base;
Tom Tucker34d16e42008-07-02 14:56:13 -050087 while (page_bytes) {
88 vec->sge[sge_no].iov_base =
89 page_address(xdr->pages[page_no]) + page_off;
90 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
Tom Tuckerc06b5402007-12-12 16:13:25 -060091 page_bytes -= sge_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050092 vec->sge[sge_no].iov_len = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -060093
94 sge_no++;
95 page_no++;
96 page_off = 0; /* reset for next time through loop */
97 }
98
99 /* Tail SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -0500100 if (xdr->tail[0].iov_len) {
Chuck Leverf6763c22016-03-01 13:05:54 -0500101 unsigned char *base = xdr->tail[0].iov_base;
102 size_t len = xdr->tail[0].iov_len;
103 u32 xdr_pad = xdr_padsize(xdr->page_len);
104
105 if (write_chunk_present && xdr_pad) {
106 base += xdr_pad;
107 len -= xdr_pad;
108 }
109
110 if (len) {
111 vec->sge[sge_no].iov_base = base;
112 vec->sge[sge_no].iov_len = len;
113 sge_no++;
114 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600115 }
116
Chuck Leverba986c92016-01-07 14:49:53 -0500117 dprintk("svcrdma: %s: sge_no %d page_no %d "
Tom Talpey2e3c2302009-03-12 22:21:21 -0400118 "page_base %u page_len %u head_len %zu tail_len %zu\n",
Chuck Leverba986c92016-01-07 14:49:53 -0500119 __func__, sge_no, page_no, xdr->page_base, xdr->page_len,
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400120 xdr->head[0].iov_len, xdr->tail[0].iov_len);
121
Tom Tucker34d16e42008-07-02 14:56:13 -0500122 vec->count = sge_no;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500123 return 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600124}
125
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500126static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
127 struct xdr_buf *xdr,
128 u32 xdr_off, size_t len, int dir)
129{
130 struct page *page;
131 dma_addr_t dma_addr;
132 if (xdr_off < xdr->head[0].iov_len) {
133 /* This offset is in the head */
134 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
135 page = virt_to_page(xdr->head[0].iov_base);
136 } else {
137 xdr_off -= xdr->head[0].iov_len;
138 if (xdr_off < xdr->page_len) {
139 /* This offset is in the page list */
Jeff Layton3cbe01a2014-03-17 13:10:05 -0400140 xdr_off += xdr->page_base;
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500141 page = xdr->pages[xdr_off >> PAGE_SHIFT];
142 xdr_off &= ~PAGE_MASK;
143 } else {
144 /* This offset is in the tail */
145 xdr_off -= xdr->page_len;
146 xdr_off += (unsigned long)
147 xdr->tail[0].iov_base & ~PAGE_MASK;
148 page = virt_to_page(xdr->tail[0].iov_base);
149 }
150 }
151 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
152 min_t(size_t, PAGE_SIZE, len), dir);
153 return dma_addr;
154}
155
Chuck Lever5fdca652016-11-29 11:04:42 -0500156/* Parse the RPC Call's transport header.
Chuck Lever10dc4512015-07-09 16:45:28 -0400157 */
Chuck Lever5fdca652016-11-29 11:04:42 -0500158static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp,
159 struct rpcrdma_write_array **write,
160 struct rpcrdma_write_array **reply)
Chuck Lever10dc4512015-07-09 16:45:28 -0400161{
Chuck Lever5fdca652016-11-29 11:04:42 -0500162 __be32 *p;
Chuck Lever10dc4512015-07-09 16:45:28 -0400163
Chuck Lever5fdca652016-11-29 11:04:42 -0500164 p = (__be32 *)&rmsgp->rm_body.rm_chunks[0];
Chuck Lever10dc4512015-07-09 16:45:28 -0400165
Chuck Lever5fdca652016-11-29 11:04:42 -0500166 /* Read list */
167 while (*p++ != xdr_zero)
168 p += 5;
Chuck Lever10dc4512015-07-09 16:45:28 -0400169
Chuck Lever5fdca652016-11-29 11:04:42 -0500170 /* Write list */
171 if (*p != xdr_zero) {
172 *write = (struct rpcrdma_write_array *)p;
173 while (*p++ != xdr_zero)
174 p += 1 + be32_to_cpu(*p) * 4;
175 } else {
176 *write = NULL;
177 p++;
Chuck Lever10dc4512015-07-09 16:45:28 -0400178 }
179
Chuck Lever5fdca652016-11-29 11:04:42 -0500180 /* Reply chunk */
181 if (*p != xdr_zero)
182 *reply = (struct rpcrdma_write_array *)p;
183 else
184 *reply = NULL;
Chuck Lever10dc4512015-07-09 16:45:28 -0400185}
186
Chuck Lever25d552962016-09-13 10:53:23 -0400187/* RPC-over-RDMA Version One private extension: Remote Invalidation.
188 * Responder's choice: requester signals it can handle Send With
189 * Invalidate, and responder chooses one rkey to invalidate.
190 *
191 * Find a candidate rkey to invalidate when sending a reply. Picks the
192 * first rkey it finds in the chunks lists.
193 *
194 * Returns zero if RPC's chunk lists are empty.
195 */
196static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
197 struct rpcrdma_write_array *wr_ary,
198 struct rpcrdma_write_array *rp_ary)
199{
200 struct rpcrdma_read_chunk *rd_ary;
201 struct rpcrdma_segment *arg_ch;
Chuck Lever25d552962016-09-13 10:53:23 -0400202
Chuck Lever5fdca652016-11-29 11:04:42 -0500203 rd_ary = (struct rpcrdma_read_chunk *)&rdma_argp->rm_body.rm_chunks[0];
Chuck Leverfafedf82016-11-29 11:05:39 -0500204 if (rd_ary->rc_discrim != xdr_zero)
205 return be32_to_cpu(rd_ary->rc_target.rs_handle);
Chuck Lever25d552962016-09-13 10:53:23 -0400206
207 if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
208 arg_ch = &wr_ary->wc_array[0].wc_target;
Chuck Leverfafedf82016-11-29 11:05:39 -0500209 return be32_to_cpu(arg_ch->rs_handle);
Chuck Lever25d552962016-09-13 10:53:23 -0400210 }
211
212 if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
213 arg_ch = &rp_ary->wc_array[0].wc_target;
Chuck Leverfafedf82016-11-29 11:05:39 -0500214 return be32_to_cpu(arg_ch->rs_handle);
Chuck Lever25d552962016-09-13 10:53:23 -0400215 }
216
Chuck Leverfafedf82016-11-29 11:05:39 -0500217 return 0;
Chuck Lever25d552962016-09-13 10:53:23 -0400218}
219
Tom Tuckerc06b5402007-12-12 16:13:25 -0600220/* Assumptions:
221 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
222 */
223static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
224 u32 rmr, u64 to,
225 u32 xdr_off, int write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500226 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600227{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100228 struct ib_rdma_wr write_wr;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600229 struct ib_sge *sge;
230 int xdr_sge_no;
231 int sge_no;
232 int sge_bytes;
233 int sge_off;
234 int bc;
235 struct svc_rdma_op_ctxt *ctxt;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600236
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500237 if (vec->count > RPCSVC_MAXPAGES) {
238 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
239 return -EIO;
240 }
241
Tom Tuckerc06b5402007-12-12 16:13:25 -0600242 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
Tom Tucker34d16e42008-07-02 14:56:13 -0500243 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
Roland Dreierbb50c802008-02-08 16:02:04 -0800244 rmr, (unsigned long long)to, xdr_off,
Tom Tucker34d16e42008-07-02 14:56:13 -0500245 write_len, vec->sge, vec->count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600246
247 ctxt = svc_rdma_get_context(xprt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500248 ctxt->direction = DMA_TO_DEVICE;
249 sge = ctxt->sge;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600250
251 /* Find the SGE associated with xdr_off */
Tom Tucker34d16e42008-07-02 14:56:13 -0500252 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600253 xdr_sge_no++) {
Tom Tucker34d16e42008-07-02 14:56:13 -0500254 if (vec->sge[xdr_sge_no].iov_len > bc)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600255 break;
Tom Tucker34d16e42008-07-02 14:56:13 -0500256 bc -= vec->sge[xdr_sge_no].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600257 }
258
259 sge_off = bc;
260 bc = write_len;
261 sge_no = 0;
262
263 /* Copy the remaining SGE */
Tom Tuckerafd566e2008-10-03 15:45:03 -0500264 while (bc != 0) {
265 sge_bytes = min_t(size_t,
266 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600267 sge[sge_no].length = sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500268 sge[sge_no].addr =
269 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
270 sge_bytes, DMA_TO_DEVICE);
271 xdr_off += sge_bytes;
272 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
273 sge[sge_no].addr))
274 goto err;
Chuck Levercace5642016-09-13 10:52:50 -0400275 svc_rdma_count_mappings(xprt, ctxt);
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800276 sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500277 ctxt->count++;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600278 sge_off = 0;
279 sge_no++;
280 xdr_sge_no++;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500281 if (xdr_sge_no > vec->count) {
282 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
283 goto err;
284 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600285 bc -= sge_bytes;
Steve Wise25594292014-07-09 13:49:15 -0500286 if (sge_no == xprt->sc_max_sge)
287 break;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600288 }
289
Tom Tuckerc06b5402007-12-12 16:13:25 -0600290 /* Prepare WRITE WR */
291 memset(&write_wr, 0, sizeof write_wr);
Chuck Leverbe99bb12016-03-01 13:07:22 -0500292 ctxt->cqe.done = svc_rdma_wc_write;
293 write_wr.wr.wr_cqe = &ctxt->cqe;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100294 write_wr.wr.sg_list = &sge[0];
295 write_wr.wr.num_sge = sge_no;
296 write_wr.wr.opcode = IB_WR_RDMA_WRITE;
297 write_wr.wr.send_flags = IB_SEND_SIGNALED;
298 write_wr.rkey = rmr;
299 write_wr.remote_addr = to;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600300
301 /* Post It */
302 atomic_inc(&rdma_stat_write);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100303 if (svc_rdma_send(xprt, &write_wr.wr))
Tom Tucker34d16e42008-07-02 14:56:13 -0500304 goto err;
Steve Wise25594292014-07-09 13:49:15 -0500305 return write_len - bc;
Tom Tucker34d16e42008-07-02 14:56:13 -0500306 err:
Tom Tucker4a843862010-10-12 15:33:57 -0500307 svc_rdma_unmap_dma(ctxt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500308 svc_rdma_put_context(ctxt, 0);
Tom Tucker34d16e42008-07-02 14:56:13 -0500309 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600310}
311
Chuck Lever08ae4e72016-03-01 13:05:36 -0500312noinline
Tom Tuckerc06b5402007-12-12 16:13:25 -0600313static int send_write_chunks(struct svcxprt_rdma *xprt,
Chuck Lever08ae4e72016-03-01 13:05:36 -0500314 struct rpcrdma_write_array *wr_ary,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600315 struct rpcrdma_msg *rdma_resp,
316 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500317 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600318{
Chuck Levercf570a92016-03-01 13:05:45 -0500319 u32 xfer_len = rqstp->rq_res.page_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600320 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600321 u32 xdr_off;
322 int chunk_off;
323 int chunk_no;
Chuck Lever70747c22015-06-04 11:20:39 -0400324 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600325 struct rpcrdma_write_array *res_ary;
326 int ret;
327
Tom Tuckerc06b5402007-12-12 16:13:25 -0600328 res_ary = (struct rpcrdma_write_array *)
329 &rdma_resp->rm_body.rm_chunks[1];
330
Tom Tuckerc06b5402007-12-12 16:13:25 -0600331 /* Write chunks start at the pagelist */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500332 nchunks = be32_to_cpu(wr_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600333 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
Chuck Lever70747c22015-06-04 11:20:39 -0400334 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600335 chunk_no++) {
336 struct rpcrdma_segment *arg_ch;
337 u64 rs_offset;
338
Chuck Lever08ae4e72016-03-01 13:05:36 -0500339 arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
Chuck Lever70747c22015-06-04 11:20:39 -0400340 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600341
342 /* Prepare the response chunk given the length actually
343 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600344 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600345 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600346 arg_ch->rs_handle,
347 arg_ch->rs_offset,
348 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600349 chunk_off = 0;
350 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600351 ret = send_write(xprt, rqstp,
Chuck Lever70747c22015-06-04 11:20:39 -0400352 be32_to_cpu(arg_ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600353 rs_offset + chunk_off,
354 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500355 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500356 vec);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500357 if (ret <= 0)
358 goto out_err;
Steve Wise25594292014-07-09 13:49:15 -0500359 chunk_off += ret;
360 xdr_off += ret;
361 xfer_len -= ret;
362 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600363 }
364 }
365 /* Update the req with the number of chunks actually used */
366 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
367
Chuck Levercf570a92016-03-01 13:05:45 -0500368 return rqstp->rq_res.page_len;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500369
370out_err:
371 pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
372 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600373}
374
Chuck Lever08ae4e72016-03-01 13:05:36 -0500375noinline
Tom Tuckerc06b5402007-12-12 16:13:25 -0600376static int send_reply_chunks(struct svcxprt_rdma *xprt,
Chuck Lever08ae4e72016-03-01 13:05:36 -0500377 struct rpcrdma_write_array *rp_ary,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600378 struct rpcrdma_msg *rdma_resp,
379 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500380 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600381{
382 u32 xfer_len = rqstp->rq_res.len;
383 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600384 u32 xdr_off;
385 int chunk_no;
386 int chunk_off;
Tom Tuckercec56c82012-02-15 11:30:00 -0600387 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600388 struct rpcrdma_segment *ch;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600389 struct rpcrdma_write_array *res_ary;
390 int ret;
391
Tom Tuckerc06b5402007-12-12 16:13:25 -0600392 /* XXX: need to fix when reply lists occur with read-list and or
393 * write-list */
394 res_ary = (struct rpcrdma_write_array *)
395 &rdma_resp->rm_body.rm_chunks[2];
396
Tom Tuckerc06b5402007-12-12 16:13:25 -0600397 /* xdr offset starts at RPC message */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500398 nchunks = be32_to_cpu(rp_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600399 for (xdr_off = 0, chunk_no = 0;
Tom Tuckercec56c82012-02-15 11:30:00 -0600400 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600401 chunk_no++) {
402 u64 rs_offset;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500403 ch = &rp_ary->wc_array[chunk_no].wc_target;
Chuck Lever70747c22015-06-04 11:20:39 -0400404 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600405
Tom Tuckerc06b5402007-12-12 16:13:25 -0600406 /* Prepare the reply chunk given the length actually
407 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600408 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600409 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600410 ch->rs_handle, ch->rs_offset,
411 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600412 chunk_off = 0;
413 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600414 ret = send_write(xprt, rqstp,
Chuck Lever70747c22015-06-04 11:20:39 -0400415 be32_to_cpu(ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600416 rs_offset + chunk_off,
417 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500418 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500419 vec);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500420 if (ret <= 0)
421 goto out_err;
Steve Wise25594292014-07-09 13:49:15 -0500422 chunk_off += ret;
423 xdr_off += ret;
424 xfer_len -= ret;
425 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600426 }
427 }
428 /* Update the req with the number of chunks actually used */
429 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
430
431 return rqstp->rq_res.len;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500432
433out_err:
434 pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
435 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600436}
437
438/* This function prepares the portion of the RPCRDMA message to be
439 * sent in the RDMA_SEND. This function is called after data sent via
440 * RDMA has already been transmitted. There are three cases:
441 * - The RPCRDMA header, RPC header, and payload are all sent in a
442 * single RDMA_SEND. This is the "inline" case.
443 * - The RPCRDMA header and some portion of the RPC header and data
444 * are sent via this RDMA_SEND and another portion of the data is
445 * sent via RDMA.
446 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
447 * header and data are all transmitted via RDMA.
448 * In all three cases, this function prepares the RPCRDMA header in
449 * sge[0], the 'type' parameter indicates the type to place in the
450 * RPCRDMA header, and the 'byte_count' field indicates how much of
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500451 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
452 * to send is zero in the XDR.
Tom Tuckerc06b5402007-12-12 16:13:25 -0600453 */
454static int send_reply(struct svcxprt_rdma *rdma,
455 struct svc_rqst *rqstp,
456 struct page *page,
457 struct rpcrdma_msg *rdma_resp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500458 struct svc_rdma_req_map *vec,
Chuck Lever25d552962016-09-13 10:53:23 -0400459 int byte_count,
460 u32 inv_rkey)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600461{
Chuck Lever9ec64052016-05-04 10:53:05 -0400462 struct svc_rdma_op_ctxt *ctxt;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600463 struct ib_send_wr send_wr;
Chuck Lever9d11b512015-07-09 16:45:18 -0400464 u32 xdr_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600465 int sge_no;
466 int sge_bytes;
467 int page_no;
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500468 int pages;
Chuck Lever9ec64052016-05-04 10:53:05 -0400469 int ret = -EIO;
Tom Tucker0e7f0112008-04-23 16:49:54 -0500470
Tom Tuckerc06b5402007-12-12 16:13:25 -0600471 /* Prepare the context */
Chuck Lever9ec64052016-05-04 10:53:05 -0400472 ctxt = svc_rdma_get_context(rdma);
473 ctxt->direction = DMA_TO_DEVICE;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600474 ctxt->pages[0] = page;
475 ctxt->count = 1;
476
477 /* Prepare the SGE for the RPCRDMA Header */
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800478 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
Chuck Levercbaf5802017-02-07 11:58:15 -0500479 ctxt->sge[0].length =
480 svc_rdma_xdr_get_reply_hdr_len((__be32 *)rdma_resp);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600481 ctxt->sge[0].addr =
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500482 ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
483 ctxt->sge[0].length, DMA_TO_DEVICE);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500484 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
485 goto err;
Chuck Levercace5642016-09-13 10:52:50 -0400486 svc_rdma_count_mappings(rdma, ctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500487
Tom Tuckerc06b5402007-12-12 16:13:25 -0600488 ctxt->direction = DMA_TO_DEVICE;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500489
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500490 /* Map the payload indicated by 'byte_count' */
Chuck Lever9d11b512015-07-09 16:45:18 -0400491 xdr_off = 0;
Tom Tucker34d16e42008-07-02 14:56:13 -0500492 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
493 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600494 byte_count -= sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500495 ctxt->sge[sge_no].addr =
496 dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
497 sge_bytes, DMA_TO_DEVICE);
498 xdr_off += sge_bytes;
499 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
500 ctxt->sge[sge_no].addr))
501 goto err;
Chuck Levercace5642016-09-13 10:52:50 -0400502 svc_rdma_count_mappings(rdma, ctxt);
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800503 ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
Tom Tucker34d16e42008-07-02 14:56:13 -0500504 ctxt->sge[sge_no].length = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600505 }
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500506 if (byte_count != 0) {
507 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
508 goto err;
509 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600510
511 /* Save all respages in the ctxt and remove them from the
512 * respages array. They are our pages until the I/O
513 * completes.
514 */
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500515 pages = rqstp->rq_next_page - rqstp->rq_respages;
516 for (page_no = 0; page_no < pages; page_no++) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600517 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
518 ctxt->count++;
519 rqstp->rq_respages[page_no] = NULL;
520 }
Tom Tucker7e4359e2014-03-25 15:14:57 -0500521 rqstp->rq_next_page = rqstp->rq_respages + 1;
Steve Wise0bf48282014-05-28 15:12:01 -0500522
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500523 if (sge_no > rdma->sc_max_sge) {
524 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
525 goto err;
526 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600527 memset(&send_wr, 0, sizeof send_wr);
Chuck Leverbe99bb12016-03-01 13:07:22 -0500528 ctxt->cqe.done = svc_rdma_wc_send;
529 send_wr.wr_cqe = &ctxt->cqe;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600530 send_wr.sg_list = ctxt->sge;
531 send_wr.num_sge = sge_no;
Chuck Lever25d552962016-09-13 10:53:23 -0400532 if (inv_rkey) {
533 send_wr.opcode = IB_WR_SEND_WITH_INV;
534 send_wr.ex.invalidate_rkey = inv_rkey;
535 } else
536 send_wr.opcode = IB_WR_SEND;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600537 send_wr.send_flags = IB_SEND_SIGNALED;
538
539 ret = svc_rdma_send(rdma, &send_wr);
540 if (ret)
Tom Tuckerafd566e2008-10-03 15:45:03 -0500541 goto err;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600542
Tom Tuckerafd566e2008-10-03 15:45:03 -0500543 return 0;
544
545 err:
Steve Wise21515e42009-04-29 14:14:00 -0500546 svc_rdma_unmap_dma(ctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500547 svc_rdma_put_context(ctxt, 1);
Chuck Lever9ec64052016-05-04 10:53:05 -0400548 return ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600549}
550
551void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
552{
553}
554
Tom Tuckerc06b5402007-12-12 16:13:25 -0600555int svc_rdma_sendto(struct svc_rqst *rqstp)
556{
557 struct svc_xprt *xprt = rqstp->rq_xprt;
558 struct svcxprt_rdma *rdma =
559 container_of(xprt, struct svcxprt_rdma, sc_xprt);
560 struct rpcrdma_msg *rdma_argp;
561 struct rpcrdma_msg *rdma_resp;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500562 struct rpcrdma_write_array *wr_ary, *rp_ary;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600563 int ret;
564 int inline_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600565 struct page *res_page;
Tom Tucker34d16e42008-07-02 14:56:13 -0500566 struct svc_rdma_req_map *vec;
Chuck Lever25d552962016-09-13 10:53:23 -0400567 u32 inv_rkey;
Chuck Lever98fc21d2017-02-07 11:58:23 -0500568 __be32 *p;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600569
570 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
571
Chuck Levere5523bd2015-01-13 11:03:11 -0500572 /* Get the RDMA request header. The receive logic always
573 * places this at the start of page 0.
574 */
575 rdma_argp = page_address(rqstp->rq_pages[0]);
Chuck Lever5fdca652016-11-29 11:04:42 -0500576 svc_rdma_get_write_arrays(rdma_argp, &wr_ary, &rp_ary);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600577
Chuck Lever25d552962016-09-13 10:53:23 -0400578 inv_rkey = 0;
579 if (rdma->sc_snd_w_inv)
580 inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);
581
Tom Tucker34d16e42008-07-02 14:56:13 -0500582 /* Build an req vec for the XDR */
Chuck Lever2fe81b22016-01-07 14:49:20 -0500583 vec = svc_rdma_get_req_map(rdma);
Chuck Leverf6763c22016-03-01 13:05:54 -0500584 ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500585 if (ret)
586 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600587 inline_bytes = rqstp->rq_res.len;
588
Chuck Levere4eb42c2016-11-29 11:04:50 -0500589 /* Create the RDMA response header. xprt->xpt_mutex,
590 * acquired in svc_send(), serializes RPC replies. The
591 * code path below that inserts the credit grant value
592 * into each transport header runs only inside this
593 * critical section.
594 */
Chuck Lever78da2b32016-01-07 14:49:45 -0500595 ret = -ENOMEM;
596 res_page = alloc_page(GFP_KERNEL);
597 if (!res_page)
598 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600599 rdma_resp = page_address(res_page);
Chuck Lever98fc21d2017-02-07 11:58:23 -0500600
601 p = &rdma_resp->rm_xid;
602 *p++ = rdma_argp->rm_xid;
603 *p++ = rdma_argp->rm_vers;
604 *p++ = rdma->sc_fc_credits;
605 *p++ = rp_ary ? rdma_nomsg : rdma_msg;
606
607 /* Start with empty chunks */
608 *p++ = xdr_zero;
609 *p++ = xdr_zero;
610 *p = xdr_zero;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600611
612 /* Send any write-chunk data and build resp write-list */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500613 if (wr_ary) {
614 ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
615 if (ret < 0)
616 goto err1;
Chuck Levercf570a92016-03-01 13:05:45 -0500617 inline_bytes -= ret + xdr_padsize(ret);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600618 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600619
620 /* Send any reply-list data and update resp reply-list */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500621 if (rp_ary) {
622 ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
623 if (ret < 0)
624 goto err1;
625 inline_bytes -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600626 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600627
Chuck Lever9ec64052016-05-04 10:53:05 -0400628 /* Post a fresh Receive buffer _before_ sending the reply */
629 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
630 if (ret)
631 goto err1;
632
633 ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
Chuck Lever25d552962016-09-13 10:53:23 -0400634 inline_bytes, inv_rkey);
Chuck Lever3e1eeb92016-03-01 13:06:11 -0500635 if (ret < 0)
Chuck Lever99952372016-09-13 10:52:59 -0400636 goto err0;
Chuck Lever3e1eeb92016-03-01 13:06:11 -0500637
Chuck Lever2fe81b22016-01-07 14:49:20 -0500638 svc_rdma_put_req_map(rdma, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600639 dprintk("svcrdma: send_reply returns %d\n", ret);
640 return ret;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500641
642 err1:
643 put_page(res_page);
644 err0:
Chuck Lever2fe81b22016-01-07 14:49:20 -0500645 svc_rdma_put_req_map(rdma, vec);
Chuck Lever9ec64052016-05-04 10:53:05 -0400646 pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
647 ret);
Chuck Lever3e1eeb92016-03-01 13:06:11 -0500648 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
649 return -ENOTCONN;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600650}
Chuck Levera6081b82016-03-01 13:06:38 -0500651
652void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
653 int status)
654{
655 struct ib_send_wr err_wr;
656 struct page *p;
657 struct svc_rdma_op_ctxt *ctxt;
658 enum rpcrdma_errcode err;
659 __be32 *va;
660 int length;
661 int ret;
662
663 ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
664 if (ret)
665 return;
666
667 p = alloc_page(GFP_KERNEL);
668 if (!p)
669 return;
670 va = page_address(p);
671
672 /* XDR encode an error reply */
673 err = ERR_CHUNK;
674 if (status == -EPROTONOSUPPORT)
675 err = ERR_VERS;
676 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
677
678 ctxt = svc_rdma_get_context(xprt);
679 ctxt->direction = DMA_TO_DEVICE;
680 ctxt->count = 1;
681 ctxt->pages[0] = p;
682
683 /* Prepare SGE for local address */
684 ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
685 ctxt->sge[0].length = length;
686 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
687 p, 0, length, DMA_TO_DEVICE);
688 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
689 dprintk("svcrdma: Error mapping buffer for protocol error\n");
690 svc_rdma_put_context(ctxt, 1);
691 return;
692 }
Chuck Levercace5642016-09-13 10:52:50 -0400693 svc_rdma_count_mappings(xprt, ctxt);
Chuck Levera6081b82016-03-01 13:06:38 -0500694
695 /* Prepare SEND WR */
696 memset(&err_wr, 0, sizeof(err_wr));
Chuck Leverbe99bb12016-03-01 13:07:22 -0500697 ctxt->cqe.done = svc_rdma_wc_send;
698 err_wr.wr_cqe = &ctxt->cqe;
Chuck Levera6081b82016-03-01 13:06:38 -0500699 err_wr.sg_list = ctxt->sge;
700 err_wr.num_sge = 1;
701 err_wr.opcode = IB_WR_SEND;
702 err_wr.send_flags = IB_SEND_SIGNALED;
703
704 /* Post It */
705 ret = svc_rdma_send(xprt, &err_wr);
706 if (ret) {
707 dprintk("svcrdma: Error %d posting send for protocol error\n",
708 ret);
709 svc_rdma_unmap_dma(ctxt);
710 svc_rdma_put_context(ctxt, 1);
711 }
712}