blob: 86fea5c59125af14b8545d082289c576b709007e [file] [log] [blame]
Tom Tuckerc06b5402007-12-12 16:13:25 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerc06b5402007-12-12 16:13:25 -06003 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
Chuck Levercf570a92016-03-01 13:05:45 -050053static u32 xdr_padsize(u32 len)
54{
55 return (len & 3) ? (4 - (len & 3)) : 0;
56}
57
Chuck Leverba986c92016-01-07 14:49:53 -050058int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
59 struct xdr_buf *xdr,
60 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -060061{
Tom Tuckerc06b5402007-12-12 16:13:25 -060062 int sge_no;
Tom Tuckerc06b5402007-12-12 16:13:25 -060063 u32 sge_bytes;
64 u32 page_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050065 u32 page_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -060066 int page_no;
67
Chuck Lever3fe04ee2015-01-13 11:03:03 -050068 if (xdr->len !=
69 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
Chuck Leverba986c92016-01-07 14:49:53 -050070 pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
Chuck Lever3fe04ee2015-01-13 11:03:03 -050071 return -EIO;
72 }
Tom Tucker34d16e42008-07-02 14:56:13 -050073
Tom Tuckerc06b5402007-12-12 16:13:25 -060074 /* Skip the first sge, this is for the RPCRDMA header */
75 sge_no = 1;
76
77 /* Head SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050078 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
79 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -060080 sge_no++;
81
82 /* pages SGE */
83 page_no = 0;
84 page_bytes = xdr->page_len;
85 page_off = xdr->page_base;
Tom Tucker34d16e42008-07-02 14:56:13 -050086 while (page_bytes) {
87 vec->sge[sge_no].iov_base =
88 page_address(xdr->pages[page_no]) + page_off;
89 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
Tom Tuckerc06b5402007-12-12 16:13:25 -060090 page_bytes -= sge_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050091 vec->sge[sge_no].iov_len = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -060092
93 sge_no++;
94 page_no++;
95 page_off = 0; /* reset for next time through loop */
96 }
97
98 /* Tail SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050099 if (xdr->tail[0].iov_len) {
100 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
101 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600102 sge_no++;
103 }
104
Chuck Leverba986c92016-01-07 14:49:53 -0500105 dprintk("svcrdma: %s: sge_no %d page_no %d "
Tom Talpey2e3c2302009-03-12 22:21:21 -0400106 "page_base %u page_len %u head_len %zu tail_len %zu\n",
Chuck Leverba986c92016-01-07 14:49:53 -0500107 __func__, sge_no, page_no, xdr->page_base, xdr->page_len,
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400108 xdr->head[0].iov_len, xdr->tail[0].iov_len);
109
Tom Tucker34d16e42008-07-02 14:56:13 -0500110 vec->count = sge_no;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500111 return 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600112}
113
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500114static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
115 struct xdr_buf *xdr,
116 u32 xdr_off, size_t len, int dir)
117{
118 struct page *page;
119 dma_addr_t dma_addr;
120 if (xdr_off < xdr->head[0].iov_len) {
121 /* This offset is in the head */
122 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
123 page = virt_to_page(xdr->head[0].iov_base);
124 } else {
125 xdr_off -= xdr->head[0].iov_len;
126 if (xdr_off < xdr->page_len) {
127 /* This offset is in the page list */
Jeff Layton3cbe01a2014-03-17 13:10:05 -0400128 xdr_off += xdr->page_base;
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500129 page = xdr->pages[xdr_off >> PAGE_SHIFT];
130 xdr_off &= ~PAGE_MASK;
131 } else {
132 /* This offset is in the tail */
133 xdr_off -= xdr->page_len;
134 xdr_off += (unsigned long)
135 xdr->tail[0].iov_base & ~PAGE_MASK;
136 page = virt_to_page(xdr->tail[0].iov_base);
137 }
138 }
139 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
140 min_t(size_t, PAGE_SIZE, len), dir);
141 return dma_addr;
142}
143
Chuck Lever10dc4512015-07-09 16:45:28 -0400144/* Returns the address of the first read chunk or <nul> if no read chunk
145 * is present
146 */
147struct rpcrdma_read_chunk *
148svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
149{
150 struct rpcrdma_read_chunk *ch =
151 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
152
153 if (ch->rc_discrim == xdr_zero)
154 return NULL;
155 return ch;
156}
157
158/* Returns the address of the first read write array element or <nul>
159 * if no write array list is present
160 */
161static struct rpcrdma_write_array *
162svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
163{
164 if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
165 rmsgp->rm_body.rm_chunks[1] == xdr_zero)
166 return NULL;
167 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
168}
169
170/* Returns the address of the first reply array element or <nul> if no
171 * reply array is present
172 */
173static struct rpcrdma_write_array *
Chuck Lever08ae4e72016-03-01 13:05:36 -0500174svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
175 struct rpcrdma_write_array *wr_ary)
Chuck Lever10dc4512015-07-09 16:45:28 -0400176{
177 struct rpcrdma_read_chunk *rch;
Chuck Lever10dc4512015-07-09 16:45:28 -0400178 struct rpcrdma_write_array *rp_ary;
179
180 /* XXX: Need to fix when reply chunk may occur with read list
181 * and/or write list.
182 */
183 if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
184 rmsgp->rm_body.rm_chunks[1] != xdr_zero)
185 return NULL;
186
187 rch = svc_rdma_get_read_chunk(rmsgp);
188 if (rch) {
189 while (rch->rc_discrim != xdr_zero)
190 rch++;
191
192 /* The reply chunk follows an empty write array located
193 * at 'rc_position' here. The reply array is at rc_target.
194 */
195 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
196 goto found_it;
197 }
198
Chuck Lever10dc4512015-07-09 16:45:28 -0400199 if (wr_ary) {
200 int chunk = be32_to_cpu(wr_ary->wc_nchunks);
201
202 rp_ary = (struct rpcrdma_write_array *)
203 &wr_ary->wc_array[chunk].wc_target.rs_length;
204 goto found_it;
205 }
206
207 /* No read list, no write list */
208 rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
209
210 found_it:
211 if (rp_ary->wc_discrim == xdr_zero)
212 return NULL;
213 return rp_ary;
214}
215
Tom Tuckerc06b5402007-12-12 16:13:25 -0600216/* Assumptions:
217 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
218 */
219static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
220 u32 rmr, u64 to,
221 u32 xdr_off, int write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500222 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600223{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100224 struct ib_rdma_wr write_wr;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600225 struct ib_sge *sge;
226 int xdr_sge_no;
227 int sge_no;
228 int sge_bytes;
229 int sge_off;
230 int bc;
231 struct svc_rdma_op_ctxt *ctxt;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600232
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500233 if (vec->count > RPCSVC_MAXPAGES) {
234 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
235 return -EIO;
236 }
237
Tom Tuckerc06b5402007-12-12 16:13:25 -0600238 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
Tom Tucker34d16e42008-07-02 14:56:13 -0500239 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
Roland Dreierbb50c802008-02-08 16:02:04 -0800240 rmr, (unsigned long long)to, xdr_off,
Tom Tucker34d16e42008-07-02 14:56:13 -0500241 write_len, vec->sge, vec->count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600242
243 ctxt = svc_rdma_get_context(xprt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500244 ctxt->direction = DMA_TO_DEVICE;
245 sge = ctxt->sge;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600246
247 /* Find the SGE associated with xdr_off */
Tom Tucker34d16e42008-07-02 14:56:13 -0500248 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600249 xdr_sge_no++) {
Tom Tucker34d16e42008-07-02 14:56:13 -0500250 if (vec->sge[xdr_sge_no].iov_len > bc)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600251 break;
Tom Tucker34d16e42008-07-02 14:56:13 -0500252 bc -= vec->sge[xdr_sge_no].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600253 }
254
255 sge_off = bc;
256 bc = write_len;
257 sge_no = 0;
258
259 /* Copy the remaining SGE */
Tom Tuckerafd566e2008-10-03 15:45:03 -0500260 while (bc != 0) {
261 sge_bytes = min_t(size_t,
262 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600263 sge[sge_no].length = sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500264 sge[sge_no].addr =
265 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
266 sge_bytes, DMA_TO_DEVICE);
267 xdr_off += sge_bytes;
268 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
269 sge[sge_no].addr))
270 goto err;
271 atomic_inc(&xprt->sc_dma_used);
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800272 sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500273 ctxt->count++;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600274 sge_off = 0;
275 sge_no++;
276 xdr_sge_no++;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500277 if (xdr_sge_no > vec->count) {
278 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
279 goto err;
280 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600281 bc -= sge_bytes;
Steve Wise25594292014-07-09 13:49:15 -0500282 if (sge_no == xprt->sc_max_sge)
283 break;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600284 }
285
Tom Tuckerc06b5402007-12-12 16:13:25 -0600286 /* Prepare WRITE WR */
287 memset(&write_wr, 0, sizeof write_wr);
288 ctxt->wr_op = IB_WR_RDMA_WRITE;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100289 write_wr.wr.wr_id = (unsigned long)ctxt;
290 write_wr.wr.sg_list = &sge[0];
291 write_wr.wr.num_sge = sge_no;
292 write_wr.wr.opcode = IB_WR_RDMA_WRITE;
293 write_wr.wr.send_flags = IB_SEND_SIGNALED;
294 write_wr.rkey = rmr;
295 write_wr.remote_addr = to;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600296
297 /* Post It */
298 atomic_inc(&rdma_stat_write);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100299 if (svc_rdma_send(xprt, &write_wr.wr))
Tom Tucker34d16e42008-07-02 14:56:13 -0500300 goto err;
Steve Wise25594292014-07-09 13:49:15 -0500301 return write_len - bc;
Tom Tucker34d16e42008-07-02 14:56:13 -0500302 err:
Tom Tucker4a843862010-10-12 15:33:57 -0500303 svc_rdma_unmap_dma(ctxt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500304 svc_rdma_put_context(ctxt, 0);
305 /* Fatal error, close transport */
306 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600307}
308
Chuck Lever08ae4e72016-03-01 13:05:36 -0500309noinline
Tom Tuckerc06b5402007-12-12 16:13:25 -0600310static int send_write_chunks(struct svcxprt_rdma *xprt,
Chuck Lever08ae4e72016-03-01 13:05:36 -0500311 struct rpcrdma_write_array *wr_ary,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600312 struct rpcrdma_msg *rdma_resp,
313 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500314 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600315{
Chuck Levercf570a92016-03-01 13:05:45 -0500316 u32 xfer_len = rqstp->rq_res.page_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600317 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600318 u32 xdr_off;
319 int chunk_off;
320 int chunk_no;
Chuck Lever70747c22015-06-04 11:20:39 -0400321 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600322 struct rpcrdma_write_array *res_ary;
323 int ret;
324
Tom Tuckerc06b5402007-12-12 16:13:25 -0600325 res_ary = (struct rpcrdma_write_array *)
326 &rdma_resp->rm_body.rm_chunks[1];
327
Tom Tuckerc06b5402007-12-12 16:13:25 -0600328 /* Write chunks start at the pagelist */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500329 nchunks = be32_to_cpu(wr_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600330 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
Chuck Lever70747c22015-06-04 11:20:39 -0400331 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600332 chunk_no++) {
333 struct rpcrdma_segment *arg_ch;
334 u64 rs_offset;
335
Chuck Lever08ae4e72016-03-01 13:05:36 -0500336 arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
Chuck Lever70747c22015-06-04 11:20:39 -0400337 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600338
339 /* Prepare the response chunk given the length actually
340 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600341 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600342 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600343 arg_ch->rs_handle,
344 arg_ch->rs_offset,
345 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600346 chunk_off = 0;
347 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600348 ret = send_write(xprt, rqstp,
Chuck Lever70747c22015-06-04 11:20:39 -0400349 be32_to_cpu(arg_ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600350 rs_offset + chunk_off,
351 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500352 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500353 vec);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500354 if (ret <= 0)
355 goto out_err;
Steve Wise25594292014-07-09 13:49:15 -0500356 chunk_off += ret;
357 xdr_off += ret;
358 xfer_len -= ret;
359 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600360 }
361 }
362 /* Update the req with the number of chunks actually used */
363 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
364
Chuck Levercf570a92016-03-01 13:05:45 -0500365 return rqstp->rq_res.page_len;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500366
367out_err:
368 pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
369 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600370}
371
Chuck Lever08ae4e72016-03-01 13:05:36 -0500372noinline
Tom Tuckerc06b5402007-12-12 16:13:25 -0600373static int send_reply_chunks(struct svcxprt_rdma *xprt,
Chuck Lever08ae4e72016-03-01 13:05:36 -0500374 struct rpcrdma_write_array *rp_ary,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600375 struct rpcrdma_msg *rdma_resp,
376 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500377 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600378{
379 u32 xfer_len = rqstp->rq_res.len;
380 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600381 u32 xdr_off;
382 int chunk_no;
383 int chunk_off;
Tom Tuckercec56c82012-02-15 11:30:00 -0600384 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600385 struct rpcrdma_segment *ch;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600386 struct rpcrdma_write_array *res_ary;
387 int ret;
388
Tom Tuckerc06b5402007-12-12 16:13:25 -0600389 /* XXX: need to fix when reply lists occur with read-list and or
390 * write-list */
391 res_ary = (struct rpcrdma_write_array *)
392 &rdma_resp->rm_body.rm_chunks[2];
393
Tom Tuckerc06b5402007-12-12 16:13:25 -0600394 /* xdr offset starts at RPC message */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500395 nchunks = be32_to_cpu(rp_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600396 for (xdr_off = 0, chunk_no = 0;
Tom Tuckercec56c82012-02-15 11:30:00 -0600397 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600398 chunk_no++) {
399 u64 rs_offset;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500400 ch = &rp_ary->wc_array[chunk_no].wc_target;
Chuck Lever70747c22015-06-04 11:20:39 -0400401 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600402
Tom Tuckerc06b5402007-12-12 16:13:25 -0600403 /* Prepare the reply chunk given the length actually
404 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600405 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600406 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600407 ch->rs_handle, ch->rs_offset,
408 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600409 chunk_off = 0;
410 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600411 ret = send_write(xprt, rqstp,
Chuck Lever70747c22015-06-04 11:20:39 -0400412 be32_to_cpu(ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600413 rs_offset + chunk_off,
414 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500415 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500416 vec);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500417 if (ret <= 0)
418 goto out_err;
Steve Wise25594292014-07-09 13:49:15 -0500419 chunk_off += ret;
420 xdr_off += ret;
421 xfer_len -= ret;
422 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600423 }
424 }
425 /* Update the req with the number of chunks actually used */
426 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
427
428 return rqstp->rq_res.len;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500429
430out_err:
431 pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
432 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600433}
434
435/* This function prepares the portion of the RPCRDMA message to be
436 * sent in the RDMA_SEND. This function is called after data sent via
437 * RDMA has already been transmitted. There are three cases:
438 * - The RPCRDMA header, RPC header, and payload are all sent in a
439 * single RDMA_SEND. This is the "inline" case.
440 * - The RPCRDMA header and some portion of the RPC header and data
441 * are sent via this RDMA_SEND and another portion of the data is
442 * sent via RDMA.
443 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
444 * header and data are all transmitted via RDMA.
445 * In all three cases, this function prepares the RPCRDMA header in
446 * sge[0], the 'type' parameter indicates the type to place in the
447 * RPCRDMA header, and the 'byte_count' field indicates how much of
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500448 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
449 * to send is zero in the XDR.
Tom Tuckerc06b5402007-12-12 16:13:25 -0600450 */
451static int send_reply(struct svcxprt_rdma *rdma,
452 struct svc_rqst *rqstp,
453 struct page *page,
454 struct rpcrdma_msg *rdma_resp,
455 struct svc_rdma_op_ctxt *ctxt,
Tom Tucker34d16e42008-07-02 14:56:13 -0500456 struct svc_rdma_req_map *vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600457 int byte_count)
458{
459 struct ib_send_wr send_wr;
Chuck Lever9d11b512015-07-09 16:45:18 -0400460 u32 xdr_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600461 int sge_no;
462 int sge_bytes;
463 int page_no;
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500464 int pages;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600465 int ret;
466
Tom Tucker0e7f0112008-04-23 16:49:54 -0500467 /* Post a recv buffer to handle another request. */
Chuck Lever39b09a12016-01-07 14:49:37 -0500468 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
Tom Tucker0e7f0112008-04-23 16:49:54 -0500469 if (ret) {
470 printk(KERN_INFO
471 "svcrdma: could not post a receive buffer, err=%d."
472 "Closing transport %p.\n", ret, rdma);
473 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
Tom Tucker5ac461a2008-04-25 18:08:59 -0500474 svc_rdma_put_context(ctxt, 0);
475 return -ENOTCONN;
Tom Tucker0e7f0112008-04-23 16:49:54 -0500476 }
477
Tom Tuckerc06b5402007-12-12 16:13:25 -0600478 /* Prepare the context */
479 ctxt->pages[0] = page;
480 ctxt->count = 1;
481
482 /* Prepare the SGE for the RPCRDMA Header */
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800483 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
Steve Wise98779be2009-05-14 16:34:28 -0500484 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600485 ctxt->sge[0].addr =
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500486 ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
487 ctxt->sge[0].length, DMA_TO_DEVICE);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500488 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
489 goto err;
490 atomic_inc(&rdma->sc_dma_used);
491
Tom Tuckerc06b5402007-12-12 16:13:25 -0600492 ctxt->direction = DMA_TO_DEVICE;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500493
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500494 /* Map the payload indicated by 'byte_count' */
Chuck Lever9d11b512015-07-09 16:45:18 -0400495 xdr_off = 0;
Tom Tucker34d16e42008-07-02 14:56:13 -0500496 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
497 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600498 byte_count -= sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500499 ctxt->sge[sge_no].addr =
500 dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
501 sge_bytes, DMA_TO_DEVICE);
502 xdr_off += sge_bytes;
503 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
504 ctxt->sge[sge_no].addr))
505 goto err;
506 atomic_inc(&rdma->sc_dma_used);
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800507 ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
Tom Tucker34d16e42008-07-02 14:56:13 -0500508 ctxt->sge[sge_no].length = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600509 }
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500510 if (byte_count != 0) {
511 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
512 goto err;
513 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600514
515 /* Save all respages in the ctxt and remove them from the
516 * respages array. They are our pages until the I/O
517 * completes.
518 */
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500519 pages = rqstp->rq_next_page - rqstp->rq_respages;
520 for (page_no = 0; page_no < pages; page_no++) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600521 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
522 ctxt->count++;
523 rqstp->rq_respages[page_no] = NULL;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500524 /*
525 * If there are more pages than SGE, terminate SGE
526 * list so that svc_rdma_unmap_dma doesn't attempt to
527 * unmap garbage.
528 */
Tom Tucker34d16e42008-07-02 14:56:13 -0500529 if (page_no+1 >= sge_no)
530 ctxt->sge[page_no+1].length = 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600531 }
Tom Tucker7e4359e2014-03-25 15:14:57 -0500532 rqstp->rq_next_page = rqstp->rq_respages + 1;
Steve Wise0bf48282014-05-28 15:12:01 -0500533
Chuck Lever9d11b512015-07-09 16:45:18 -0400534 /* The loop above bumps sc_dma_used for each sge. The
535 * xdr_buf.tail gets a separate sge, but resides in the
536 * same page as xdr_buf.head. Don't count it twice.
537 */
538 if (sge_no > ctxt->count)
539 atomic_dec(&rdma->sc_dma_used);
540
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500541 if (sge_no > rdma->sc_max_sge) {
542 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
543 goto err;
544 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600545 memset(&send_wr, 0, sizeof send_wr);
546 ctxt->wr_op = IB_WR_SEND;
547 send_wr.wr_id = (unsigned long)ctxt;
548 send_wr.sg_list = ctxt->sge;
549 send_wr.num_sge = sge_no;
550 send_wr.opcode = IB_WR_SEND;
551 send_wr.send_flags = IB_SEND_SIGNALED;
552
553 ret = svc_rdma_send(rdma, &send_wr);
554 if (ret)
Tom Tuckerafd566e2008-10-03 15:45:03 -0500555 goto err;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600556
Tom Tuckerafd566e2008-10-03 15:45:03 -0500557 return 0;
558
559 err:
Steve Wise21515e42009-04-29 14:14:00 -0500560 svc_rdma_unmap_dma(ctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500561 svc_rdma_put_context(ctxt, 1);
562 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600563}
564
565void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
566{
567}
568
Tom Tuckerc06b5402007-12-12 16:13:25 -0600569int svc_rdma_sendto(struct svc_rqst *rqstp)
570{
571 struct svc_xprt *xprt = rqstp->rq_xprt;
572 struct svcxprt_rdma *rdma =
573 container_of(xprt, struct svcxprt_rdma, sc_xprt);
574 struct rpcrdma_msg *rdma_argp;
575 struct rpcrdma_msg *rdma_resp;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500576 struct rpcrdma_write_array *wr_ary, *rp_ary;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600577 enum rpcrdma_proc reply_type;
578 int ret;
579 int inline_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600580 struct page *res_page;
581 struct svc_rdma_op_ctxt *ctxt;
Tom Tucker34d16e42008-07-02 14:56:13 -0500582 struct svc_rdma_req_map *vec;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600583
584 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
585
Chuck Levere5523bd2015-01-13 11:03:11 -0500586 /* Get the RDMA request header. The receive logic always
587 * places this at the start of page 0.
588 */
589 rdma_argp = page_address(rqstp->rq_pages[0]);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500590 wr_ary = svc_rdma_get_write_array(rdma_argp);
591 rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600592
Tom Tucker34d16e42008-07-02 14:56:13 -0500593 /* Build an req vec for the XDR */
Tom Tuckerc06b5402007-12-12 16:13:25 -0600594 ctxt = svc_rdma_get_context(rdma);
595 ctxt->direction = DMA_TO_DEVICE;
Chuck Lever2fe81b22016-01-07 14:49:20 -0500596 vec = svc_rdma_get_req_map(rdma);
Chuck Leverba986c92016-01-07 14:49:53 -0500597 ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500598 if (ret)
599 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600600 inline_bytes = rqstp->rq_res.len;
601
602 /* Create the RDMA response header */
Chuck Lever78da2b32016-01-07 14:49:45 -0500603 ret = -ENOMEM;
604 res_page = alloc_page(GFP_KERNEL);
605 if (!res_page)
606 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600607 rdma_resp = page_address(res_page);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500608 if (rp_ary)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600609 reply_type = RDMA_NOMSG;
610 else
611 reply_type = RDMA_MSG;
612 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
613 rdma_resp, reply_type);
614
615 /* Send any write-chunk data and build resp write-list */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500616 if (wr_ary) {
617 ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
618 if (ret < 0)
619 goto err1;
Chuck Levercf570a92016-03-01 13:05:45 -0500620 inline_bytes -= ret + xdr_padsize(ret);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600621 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600622
623 /* Send any reply-list data and update resp reply-list */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500624 if (rp_ary) {
625 ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
626 if (ret < 0)
627 goto err1;
628 inline_bytes -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600629 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600630
Tom Tucker34d16e42008-07-02 14:56:13 -0500631 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600632 inline_bytes);
Chuck Lever2fe81b22016-01-07 14:49:20 -0500633 svc_rdma_put_req_map(rdma, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600634 dprintk("svcrdma: send_reply returns %d\n", ret);
635 return ret;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500636
637 err1:
638 put_page(res_page);
639 err0:
Chuck Lever2fe81b22016-01-07 14:49:20 -0500640 svc_rdma_put_req_map(rdma, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600641 svc_rdma_put_context(ctxt, 0);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600642 return ret;
643}