blob: 79fa661295a23d0e6166245a28b32a3acb4e1b57 [file] [log] [blame]
Tom Tuckerc06b5402007-12-12 16:13:25 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerc06b5402007-12-12 16:13:25 -06003 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
Chuck Leverba986c92016-01-07 14:49:53 -050053int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
54 struct xdr_buf *xdr,
55 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -060056{
Tom Tuckerc06b5402007-12-12 16:13:25 -060057 int sge_no;
Tom Tuckerc06b5402007-12-12 16:13:25 -060058 u32 sge_bytes;
59 u32 page_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050060 u32 page_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -060061 int page_no;
62
Chuck Lever3fe04ee2015-01-13 11:03:03 -050063 if (xdr->len !=
64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
Chuck Leverba986c92016-01-07 14:49:53 -050065 pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
Chuck Lever3fe04ee2015-01-13 11:03:03 -050066 return -EIO;
67 }
Tom Tucker34d16e42008-07-02 14:56:13 -050068
Tom Tuckerc06b5402007-12-12 16:13:25 -060069 /* Skip the first sge, this is for the RPCRDMA header */
70 sge_no = 1;
71
72 /* Head SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050073 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
74 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -060075 sge_no++;
76
77 /* pages SGE */
78 page_no = 0;
79 page_bytes = xdr->page_len;
80 page_off = xdr->page_base;
Tom Tucker34d16e42008-07-02 14:56:13 -050081 while (page_bytes) {
82 vec->sge[sge_no].iov_base =
83 page_address(xdr->pages[page_no]) + page_off;
84 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
Tom Tuckerc06b5402007-12-12 16:13:25 -060085 page_bytes -= sge_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -050086 vec->sge[sge_no].iov_len = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -060087
88 sge_no++;
89 page_no++;
90 page_off = 0; /* reset for next time through loop */
91 }
92
93 /* Tail SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -050094 if (xdr->tail[0].iov_len) {
95 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
96 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -060097 sge_no++;
98 }
99
Chuck Leverba986c92016-01-07 14:49:53 -0500100 dprintk("svcrdma: %s: sge_no %d page_no %d "
Tom Talpey2e3c2302009-03-12 22:21:21 -0400101 "page_base %u page_len %u head_len %zu tail_len %zu\n",
Chuck Leverba986c92016-01-07 14:49:53 -0500102 __func__, sge_no, page_no, xdr->page_base, xdr->page_len,
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400103 xdr->head[0].iov_len, xdr->tail[0].iov_len);
104
Tom Tucker34d16e42008-07-02 14:56:13 -0500105 vec->count = sge_no;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500106 return 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600107}
108
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500109static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
110 struct xdr_buf *xdr,
111 u32 xdr_off, size_t len, int dir)
112{
113 struct page *page;
114 dma_addr_t dma_addr;
115 if (xdr_off < xdr->head[0].iov_len) {
116 /* This offset is in the head */
117 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
118 page = virt_to_page(xdr->head[0].iov_base);
119 } else {
120 xdr_off -= xdr->head[0].iov_len;
121 if (xdr_off < xdr->page_len) {
122 /* This offset is in the page list */
Jeff Layton3cbe01a2014-03-17 13:10:05 -0400123 xdr_off += xdr->page_base;
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500124 page = xdr->pages[xdr_off >> PAGE_SHIFT];
125 xdr_off &= ~PAGE_MASK;
126 } else {
127 /* This offset is in the tail */
128 xdr_off -= xdr->page_len;
129 xdr_off += (unsigned long)
130 xdr->tail[0].iov_base & ~PAGE_MASK;
131 page = virt_to_page(xdr->tail[0].iov_base);
132 }
133 }
134 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
135 min_t(size_t, PAGE_SIZE, len), dir);
136 return dma_addr;
137}
138
Chuck Lever10dc4512015-07-09 16:45:28 -0400139/* Returns the address of the first read chunk or <nul> if no read chunk
140 * is present
141 */
142struct rpcrdma_read_chunk *
143svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
144{
145 struct rpcrdma_read_chunk *ch =
146 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
147
148 if (ch->rc_discrim == xdr_zero)
149 return NULL;
150 return ch;
151}
152
153/* Returns the address of the first read write array element or <nul>
154 * if no write array list is present
155 */
156static struct rpcrdma_write_array *
157svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
158{
159 if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
160 rmsgp->rm_body.rm_chunks[1] == xdr_zero)
161 return NULL;
162 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
163}
164
165/* Returns the address of the first reply array element or <nul> if no
166 * reply array is present
167 */
168static struct rpcrdma_write_array *
Chuck Lever08ae4e72016-03-01 13:05:36 -0500169svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
170 struct rpcrdma_write_array *wr_ary)
Chuck Lever10dc4512015-07-09 16:45:28 -0400171{
172 struct rpcrdma_read_chunk *rch;
Chuck Lever10dc4512015-07-09 16:45:28 -0400173 struct rpcrdma_write_array *rp_ary;
174
175 /* XXX: Need to fix when reply chunk may occur with read list
176 * and/or write list.
177 */
178 if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
179 rmsgp->rm_body.rm_chunks[1] != xdr_zero)
180 return NULL;
181
182 rch = svc_rdma_get_read_chunk(rmsgp);
183 if (rch) {
184 while (rch->rc_discrim != xdr_zero)
185 rch++;
186
187 /* The reply chunk follows an empty write array located
188 * at 'rc_position' here. The reply array is at rc_target.
189 */
190 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
191 goto found_it;
192 }
193
Chuck Lever10dc4512015-07-09 16:45:28 -0400194 if (wr_ary) {
195 int chunk = be32_to_cpu(wr_ary->wc_nchunks);
196
197 rp_ary = (struct rpcrdma_write_array *)
198 &wr_ary->wc_array[chunk].wc_target.rs_length;
199 goto found_it;
200 }
201
202 /* No read list, no write list */
203 rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
204
205 found_it:
206 if (rp_ary->wc_discrim == xdr_zero)
207 return NULL;
208 return rp_ary;
209}
210
Tom Tuckerc06b5402007-12-12 16:13:25 -0600211/* Assumptions:
212 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
213 */
214static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
215 u32 rmr, u64 to,
216 u32 xdr_off, int write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500217 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600218{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100219 struct ib_rdma_wr write_wr;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600220 struct ib_sge *sge;
221 int xdr_sge_no;
222 int sge_no;
223 int sge_bytes;
224 int sge_off;
225 int bc;
226 struct svc_rdma_op_ctxt *ctxt;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600227
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500228 if (vec->count > RPCSVC_MAXPAGES) {
229 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
230 return -EIO;
231 }
232
Tom Tuckerc06b5402007-12-12 16:13:25 -0600233 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
Tom Tucker34d16e42008-07-02 14:56:13 -0500234 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
Roland Dreierbb50c802008-02-08 16:02:04 -0800235 rmr, (unsigned long long)to, xdr_off,
Tom Tucker34d16e42008-07-02 14:56:13 -0500236 write_len, vec->sge, vec->count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600237
238 ctxt = svc_rdma_get_context(xprt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500239 ctxt->direction = DMA_TO_DEVICE;
240 sge = ctxt->sge;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600241
242 /* Find the SGE associated with xdr_off */
Tom Tucker34d16e42008-07-02 14:56:13 -0500243 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600244 xdr_sge_no++) {
Tom Tucker34d16e42008-07-02 14:56:13 -0500245 if (vec->sge[xdr_sge_no].iov_len > bc)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600246 break;
Tom Tucker34d16e42008-07-02 14:56:13 -0500247 bc -= vec->sge[xdr_sge_no].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600248 }
249
250 sge_off = bc;
251 bc = write_len;
252 sge_no = 0;
253
254 /* Copy the remaining SGE */
Tom Tuckerafd566e2008-10-03 15:45:03 -0500255 while (bc != 0) {
256 sge_bytes = min_t(size_t,
257 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600258 sge[sge_no].length = sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500259 sge[sge_no].addr =
260 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
261 sge_bytes, DMA_TO_DEVICE);
262 xdr_off += sge_bytes;
263 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
264 sge[sge_no].addr))
265 goto err;
266 atomic_inc(&xprt->sc_dma_used);
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800267 sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500268 ctxt->count++;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600269 sge_off = 0;
270 sge_no++;
271 xdr_sge_no++;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500272 if (xdr_sge_no > vec->count) {
273 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
274 goto err;
275 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600276 bc -= sge_bytes;
Steve Wise25594292014-07-09 13:49:15 -0500277 if (sge_no == xprt->sc_max_sge)
278 break;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600279 }
280
Tom Tuckerc06b5402007-12-12 16:13:25 -0600281 /* Prepare WRITE WR */
282 memset(&write_wr, 0, sizeof write_wr);
283 ctxt->wr_op = IB_WR_RDMA_WRITE;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100284 write_wr.wr.wr_id = (unsigned long)ctxt;
285 write_wr.wr.sg_list = &sge[0];
286 write_wr.wr.num_sge = sge_no;
287 write_wr.wr.opcode = IB_WR_RDMA_WRITE;
288 write_wr.wr.send_flags = IB_SEND_SIGNALED;
289 write_wr.rkey = rmr;
290 write_wr.remote_addr = to;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600291
292 /* Post It */
293 atomic_inc(&rdma_stat_write);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100294 if (svc_rdma_send(xprt, &write_wr.wr))
Tom Tucker34d16e42008-07-02 14:56:13 -0500295 goto err;
Steve Wise25594292014-07-09 13:49:15 -0500296 return write_len - bc;
Tom Tucker34d16e42008-07-02 14:56:13 -0500297 err:
Tom Tucker4a843862010-10-12 15:33:57 -0500298 svc_rdma_unmap_dma(ctxt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500299 svc_rdma_put_context(ctxt, 0);
300 /* Fatal error, close transport */
301 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600302}
303
Chuck Lever08ae4e72016-03-01 13:05:36 -0500304noinline
Tom Tuckerc06b5402007-12-12 16:13:25 -0600305static int send_write_chunks(struct svcxprt_rdma *xprt,
Chuck Lever08ae4e72016-03-01 13:05:36 -0500306 struct rpcrdma_write_array *wr_ary,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600307 struct rpcrdma_msg *rdma_resp,
308 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500309 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600310{
311 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
312 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600313 u32 xdr_off;
314 int chunk_off;
315 int chunk_no;
Chuck Lever70747c22015-06-04 11:20:39 -0400316 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600317 struct rpcrdma_write_array *res_ary;
318 int ret;
319
Tom Tuckerc06b5402007-12-12 16:13:25 -0600320 res_ary = (struct rpcrdma_write_array *)
321 &rdma_resp->rm_body.rm_chunks[1];
322
Tom Tuckerc06b5402007-12-12 16:13:25 -0600323 /* Write chunks start at the pagelist */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500324 nchunks = be32_to_cpu(wr_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600325 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
Chuck Lever70747c22015-06-04 11:20:39 -0400326 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600327 chunk_no++) {
328 struct rpcrdma_segment *arg_ch;
329 u64 rs_offset;
330
Chuck Lever08ae4e72016-03-01 13:05:36 -0500331 arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
Chuck Lever70747c22015-06-04 11:20:39 -0400332 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600333
334 /* Prepare the response chunk given the length actually
335 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600336 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600337 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600338 arg_ch->rs_handle,
339 arg_ch->rs_offset,
340 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600341 chunk_off = 0;
342 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600343 ret = send_write(xprt, rqstp,
Chuck Lever70747c22015-06-04 11:20:39 -0400344 be32_to_cpu(arg_ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600345 rs_offset + chunk_off,
346 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500347 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500348 vec);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500349 if (ret <= 0)
350 goto out_err;
Steve Wise25594292014-07-09 13:49:15 -0500351 chunk_off += ret;
352 xdr_off += ret;
353 xfer_len -= ret;
354 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600355 }
356 }
357 /* Update the req with the number of chunks actually used */
358 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
359
360 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500361
362out_err:
363 pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
364 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600365}
366
Chuck Lever08ae4e72016-03-01 13:05:36 -0500367noinline
Tom Tuckerc06b5402007-12-12 16:13:25 -0600368static int send_reply_chunks(struct svcxprt_rdma *xprt,
Chuck Lever08ae4e72016-03-01 13:05:36 -0500369 struct rpcrdma_write_array *rp_ary,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600370 struct rpcrdma_msg *rdma_resp,
371 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500372 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600373{
374 u32 xfer_len = rqstp->rq_res.len;
375 int write_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600376 u32 xdr_off;
377 int chunk_no;
378 int chunk_off;
Tom Tuckercec56c82012-02-15 11:30:00 -0600379 int nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600380 struct rpcrdma_segment *ch;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600381 struct rpcrdma_write_array *res_ary;
382 int ret;
383
Tom Tuckerc06b5402007-12-12 16:13:25 -0600384 /* XXX: need to fix when reply lists occur with read-list and or
385 * write-list */
386 res_ary = (struct rpcrdma_write_array *)
387 &rdma_resp->rm_body.rm_chunks[2];
388
Tom Tuckerc06b5402007-12-12 16:13:25 -0600389 /* xdr offset starts at RPC message */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500390 nchunks = be32_to_cpu(rp_ary->wc_nchunks);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600391 for (xdr_off = 0, chunk_no = 0;
Tom Tuckercec56c82012-02-15 11:30:00 -0600392 xfer_len && chunk_no < nchunks;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600393 chunk_no++) {
394 u64 rs_offset;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500395 ch = &rp_ary->wc_array[chunk_no].wc_target;
Chuck Lever70747c22015-06-04 11:20:39 -0400396 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600397
Tom Tuckerc06b5402007-12-12 16:13:25 -0600398 /* Prepare the reply chunk given the length actually
399 * written */
Tom Tuckercec56c82012-02-15 11:30:00 -0600400 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600401 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
Tom Tuckercec56c82012-02-15 11:30:00 -0600402 ch->rs_handle, ch->rs_offset,
403 write_len);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600404 chunk_off = 0;
405 while (write_len) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600406 ret = send_write(xprt, rqstp,
Chuck Lever70747c22015-06-04 11:20:39 -0400407 be32_to_cpu(ch->rs_handle),
Tom Tuckerc06b5402007-12-12 16:13:25 -0600408 rs_offset + chunk_off,
409 xdr_off,
Steve Wise25594292014-07-09 13:49:15 -0500410 write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500411 vec);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500412 if (ret <= 0)
413 goto out_err;
Steve Wise25594292014-07-09 13:49:15 -0500414 chunk_off += ret;
415 xdr_off += ret;
416 xfer_len -= ret;
417 write_len -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600418 }
419 }
420 /* Update the req with the number of chunks actually used */
421 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
422
423 return rqstp->rq_res.len;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500424
425out_err:
426 pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
427 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600428}
429
430/* This function prepares the portion of the RPCRDMA message to be
431 * sent in the RDMA_SEND. This function is called after data sent via
432 * RDMA has already been transmitted. There are three cases:
433 * - The RPCRDMA header, RPC header, and payload are all sent in a
434 * single RDMA_SEND. This is the "inline" case.
435 * - The RPCRDMA header and some portion of the RPC header and data
436 * are sent via this RDMA_SEND and another portion of the data is
437 * sent via RDMA.
438 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
439 * header and data are all transmitted via RDMA.
440 * In all three cases, this function prepares the RPCRDMA header in
441 * sge[0], the 'type' parameter indicates the type to place in the
442 * RPCRDMA header, and the 'byte_count' field indicates how much of
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500443 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
444 * to send is zero in the XDR.
Tom Tuckerc06b5402007-12-12 16:13:25 -0600445 */
446static int send_reply(struct svcxprt_rdma *rdma,
447 struct svc_rqst *rqstp,
448 struct page *page,
449 struct rpcrdma_msg *rdma_resp,
450 struct svc_rdma_op_ctxt *ctxt,
Tom Tucker34d16e42008-07-02 14:56:13 -0500451 struct svc_rdma_req_map *vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600452 int byte_count)
453{
454 struct ib_send_wr send_wr;
Chuck Lever9d11b512015-07-09 16:45:18 -0400455 u32 xdr_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600456 int sge_no;
457 int sge_bytes;
458 int page_no;
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500459 int pages;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600460 int ret;
461
Tom Tucker0e7f0112008-04-23 16:49:54 -0500462 /* Post a recv buffer to handle another request. */
Chuck Lever39b09a12016-01-07 14:49:37 -0500463 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
Tom Tucker0e7f0112008-04-23 16:49:54 -0500464 if (ret) {
465 printk(KERN_INFO
466 "svcrdma: could not post a receive buffer, err=%d."
467 "Closing transport %p.\n", ret, rdma);
468 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
Tom Tucker5ac461a2008-04-25 18:08:59 -0500469 svc_rdma_put_context(ctxt, 0);
470 return -ENOTCONN;
Tom Tucker0e7f0112008-04-23 16:49:54 -0500471 }
472
Tom Tuckerc06b5402007-12-12 16:13:25 -0600473 /* Prepare the context */
474 ctxt->pages[0] = page;
475 ctxt->count = 1;
476
477 /* Prepare the SGE for the RPCRDMA Header */
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800478 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
Steve Wise98779be2009-05-14 16:34:28 -0500479 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600480 ctxt->sge[0].addr =
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500481 ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
482 ctxt->sge[0].length, DMA_TO_DEVICE);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500483 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
484 goto err;
485 atomic_inc(&rdma->sc_dma_used);
486
Tom Tuckerc06b5402007-12-12 16:13:25 -0600487 ctxt->direction = DMA_TO_DEVICE;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500488
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500489 /* Map the payload indicated by 'byte_count' */
Chuck Lever9d11b512015-07-09 16:45:18 -0400490 xdr_off = 0;
Tom Tucker34d16e42008-07-02 14:56:13 -0500491 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
492 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600493 byte_count -= sge_bytes;
Steve Wise0bf48282014-05-28 15:12:01 -0500494 ctxt->sge[sge_no].addr =
495 dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
496 sge_bytes, DMA_TO_DEVICE);
497 xdr_off += sge_bytes;
498 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
499 ctxt->sge[sge_no].addr))
500 goto err;
501 atomic_inc(&rdma->sc_dma_used);
Christoph Hellwig5fe10432016-01-07 23:53:41 -0800502 ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
Tom Tucker34d16e42008-07-02 14:56:13 -0500503 ctxt->sge[sge_no].length = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600504 }
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500505 if (byte_count != 0) {
506 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
507 goto err;
508 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600509
510 /* Save all respages in the ctxt and remove them from the
511 * respages array. They are our pages until the I/O
512 * completes.
513 */
J. Bruce Fieldsafc59402012-12-10 18:01:37 -0500514 pages = rqstp->rq_next_page - rqstp->rq_respages;
515 for (page_no = 0; page_no < pages; page_no++) {
Tom Tuckerc06b5402007-12-12 16:13:25 -0600516 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
517 ctxt->count++;
518 rqstp->rq_respages[page_no] = NULL;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500519 /*
520 * If there are more pages than SGE, terminate SGE
521 * list so that svc_rdma_unmap_dma doesn't attempt to
522 * unmap garbage.
523 */
Tom Tucker34d16e42008-07-02 14:56:13 -0500524 if (page_no+1 >= sge_no)
525 ctxt->sge[page_no+1].length = 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600526 }
Tom Tucker7e4359e2014-03-25 15:14:57 -0500527 rqstp->rq_next_page = rqstp->rq_respages + 1;
Steve Wise0bf48282014-05-28 15:12:01 -0500528
Chuck Lever9d11b512015-07-09 16:45:18 -0400529 /* The loop above bumps sc_dma_used for each sge. The
530 * xdr_buf.tail gets a separate sge, but resides in the
531 * same page as xdr_buf.head. Don't count it twice.
532 */
533 if (sge_no > ctxt->count)
534 atomic_dec(&rdma->sc_dma_used);
535
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500536 if (sge_no > rdma->sc_max_sge) {
537 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
538 goto err;
539 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600540 memset(&send_wr, 0, sizeof send_wr);
541 ctxt->wr_op = IB_WR_SEND;
542 send_wr.wr_id = (unsigned long)ctxt;
543 send_wr.sg_list = ctxt->sge;
544 send_wr.num_sge = sge_no;
545 send_wr.opcode = IB_WR_SEND;
546 send_wr.send_flags = IB_SEND_SIGNALED;
547
548 ret = svc_rdma_send(rdma, &send_wr);
549 if (ret)
Tom Tuckerafd566e2008-10-03 15:45:03 -0500550 goto err;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600551
Tom Tuckerafd566e2008-10-03 15:45:03 -0500552 return 0;
553
554 err:
Steve Wise21515e42009-04-29 14:14:00 -0500555 svc_rdma_unmap_dma(ctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500556 svc_rdma_put_context(ctxt, 1);
557 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600558}
559
560void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
561{
562}
563
Tom Tuckerc06b5402007-12-12 16:13:25 -0600564int svc_rdma_sendto(struct svc_rqst *rqstp)
565{
566 struct svc_xprt *xprt = rqstp->rq_xprt;
567 struct svcxprt_rdma *rdma =
568 container_of(xprt, struct svcxprt_rdma, sc_xprt);
569 struct rpcrdma_msg *rdma_argp;
570 struct rpcrdma_msg *rdma_resp;
Chuck Lever08ae4e72016-03-01 13:05:36 -0500571 struct rpcrdma_write_array *wr_ary, *rp_ary;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600572 enum rpcrdma_proc reply_type;
573 int ret;
574 int inline_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600575 struct page *res_page;
576 struct svc_rdma_op_ctxt *ctxt;
Tom Tucker34d16e42008-07-02 14:56:13 -0500577 struct svc_rdma_req_map *vec;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600578
579 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
580
Chuck Levere5523bd2015-01-13 11:03:11 -0500581 /* Get the RDMA request header. The receive logic always
582 * places this at the start of page 0.
583 */
584 rdma_argp = page_address(rqstp->rq_pages[0]);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500585 wr_ary = svc_rdma_get_write_array(rdma_argp);
586 rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600587
Tom Tucker34d16e42008-07-02 14:56:13 -0500588 /* Build an req vec for the XDR */
Tom Tuckerc06b5402007-12-12 16:13:25 -0600589 ctxt = svc_rdma_get_context(rdma);
590 ctxt->direction = DMA_TO_DEVICE;
Chuck Lever2fe81b22016-01-07 14:49:20 -0500591 vec = svc_rdma_get_req_map(rdma);
Chuck Leverba986c92016-01-07 14:49:53 -0500592 ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500593 if (ret)
594 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600595 inline_bytes = rqstp->rq_res.len;
596
597 /* Create the RDMA response header */
Chuck Lever78da2b32016-01-07 14:49:45 -0500598 ret = -ENOMEM;
599 res_page = alloc_page(GFP_KERNEL);
600 if (!res_page)
601 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600602 rdma_resp = page_address(res_page);
Chuck Lever08ae4e72016-03-01 13:05:36 -0500603 if (rp_ary)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600604 reply_type = RDMA_NOMSG;
605 else
606 reply_type = RDMA_MSG;
607 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
608 rdma_resp, reply_type);
609
610 /* Send any write-chunk data and build resp write-list */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500611 if (wr_ary) {
612 ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
613 if (ret < 0)
614 goto err1;
615 inline_bytes -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600616 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600617
618 /* Send any reply-list data and update resp reply-list */
Chuck Lever08ae4e72016-03-01 13:05:36 -0500619 if (rp_ary) {
620 ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
621 if (ret < 0)
622 goto err1;
623 inline_bytes -= ret;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600624 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600625
Tom Tucker34d16e42008-07-02 14:56:13 -0500626 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600627 inline_bytes);
Chuck Lever2fe81b22016-01-07 14:49:20 -0500628 svc_rdma_put_req_map(rdma, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600629 dprintk("svcrdma: send_reply returns %d\n", ret);
630 return ret;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500631
632 err1:
633 put_page(res_page);
634 err0:
Chuck Lever2fe81b22016-01-07 14:49:20 -0500635 svc_rdma_put_req_map(rdma, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600636 svc_rdma_put_context(ctxt, 0);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600637 return ret;
638}