blob: b15e1ebb2bfaca8ea39753a05d3fdd0fc630b92c [file] [log] [blame]
Tom Tuckerc06b5402007-12-12 16:13:25 -06001/*
2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#include <linux/sunrpc/debug.h>
43#include <linux/sunrpc/rpc_rdma.h>
44#include <linux/spinlock.h>
45#include <asm/unaligned.h>
46#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h>
48#include <linux/sunrpc/svc_rdma.h>
49
50#define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52/* Encode an XDR as an array of IB SGE
53 *
54 * Assumptions:
55 * - head[0] is physically contiguous.
56 * - tail[0] is physically contiguous.
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020057 * - pages[] is not physically or virtually contiguous and consists of
Tom Tuckerc06b5402007-12-12 16:13:25 -060058 * PAGE_SIZE elements.
59 *
60 * Output:
61 * SGE[0] reserved for RCPRDMA header
62 * SGE[1] data from xdr->head[]
63 * SGE[2..sge_count-2] data from xdr->pages[]
64 * SGE[sge_count-1] data from xdr->tail.
65 *
Tom Tucker34d16e42008-07-02 14:56:13 -050066 * The max SGE we need is the length of the XDR / pagesize + one for
67 * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68 * reserves a page for both the request and the reply header, and this
69 * array is only concerned with the reply we are assured that we have
70 * on extra page for the RPCRMDA header.
Tom Tuckerc06b5402007-12-12 16:13:25 -060071 */
Roel Kluin5eaa65b2008-12-10 15:18:31 -080072static int fast_reg_xdr(struct svcxprt_rdma *xprt,
Tom Tuckerafd566e2008-10-03 15:45:03 -050073 struct xdr_buf *xdr,
74 struct svc_rdma_req_map *vec)
75{
76 int sge_no;
77 u32 sge_bytes;
78 u32 page_bytes;
79 u32 page_off;
80 int page_no = 0;
81 u8 *frva;
82 struct svc_rdma_fastreg_mr *frmr;
83
84 frmr = svc_rdma_get_frmr(xprt);
85 if (IS_ERR(frmr))
86 return -ENOMEM;
87 vec->frmr = frmr;
88
89 /* Skip the RPCRDMA header */
90 sge_no = 1;
91
92 /* Map the head. */
93 frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
94 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
95 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
96 vec->count = 2;
97 sge_no++;
98
99 /* Build the FRMR */
100 frmr->kva = frva;
101 frmr->direction = DMA_TO_DEVICE;
102 frmr->access_flags = 0;
103 frmr->map_len = PAGE_SIZE;
104 frmr->page_list_len = 1;
105 frmr->page_list->page_list[page_no] =
106 ib_dma_map_single(xprt->sc_cm_id->device,
107 (void *)xdr->head[0].iov_base,
108 PAGE_SIZE, DMA_TO_DEVICE);
109 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
110 frmr->page_list->page_list[page_no]))
111 goto fatal_err;
112 atomic_inc(&xprt->sc_dma_used);
113
114 page_off = xdr->page_base;
115 page_bytes = xdr->page_len + page_off;
116 if (!page_bytes)
117 goto encode_tail;
118
119 /* Map the pages */
120 vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
121 vec->sge[sge_no].iov_len = page_bytes;
122 sge_no++;
123 while (page_bytes) {
124 struct page *page;
125
126 page = xdr->pages[page_no++];
127 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
128 page_bytes -= sge_bytes;
129
130 frmr->page_list->page_list[page_no] =
Steve Wise98779be2009-05-14 16:34:28 -0500131 ib_dma_map_single(xprt->sc_cm_id->device,
132 page_address(page),
Tom Tuckerafd566e2008-10-03 15:45:03 -0500133 PAGE_SIZE, DMA_TO_DEVICE);
134 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
135 frmr->page_list->page_list[page_no]))
136 goto fatal_err;
137
138 atomic_inc(&xprt->sc_dma_used);
139 page_off = 0; /* reset for next time through loop */
140 frmr->map_len += PAGE_SIZE;
141 frmr->page_list_len++;
142 }
143 vec->count++;
144
145 encode_tail:
146 /* Map tail */
147 if (0 == xdr->tail[0].iov_len)
148 goto done;
149
150 vec->count++;
151 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
152
153 if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
154 ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
155 /*
156 * If head and tail use the same page, we don't need
157 * to map it again.
158 */
159 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
160 } else {
161 void *va;
162
163 /* Map another page for the tail */
164 page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
165 va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
166 vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
167
168 frmr->page_list->page_list[page_no] =
169 ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE,
170 DMA_TO_DEVICE);
171 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
172 frmr->page_list->page_list[page_no]))
173 goto fatal_err;
174 atomic_inc(&xprt->sc_dma_used);
175 frmr->map_len += PAGE_SIZE;
176 frmr->page_list_len++;
177 }
178
179 done:
180 if (svc_rdma_fastreg(xprt, frmr))
181 goto fatal_err;
182
183 return 0;
184
185 fatal_err:
186 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
Steve Wise21515e42009-04-29 14:14:00 -0500187 vec->frmr = NULL;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500188 svc_rdma_put_frmr(xprt, frmr);
189 return -EIO;
190}
191
192static int map_xdr(struct svcxprt_rdma *xprt,
193 struct xdr_buf *xdr,
194 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600195{
Tom Tuckerc06b5402007-12-12 16:13:25 -0600196 int sge_no;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600197 u32 sge_bytes;
198 u32 page_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -0500199 u32 page_off;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600200 int page_no;
201
Tom Tucker34d16e42008-07-02 14:56:13 -0500202 BUG_ON(xdr->len !=
203 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
204
Tom Tuckerafd566e2008-10-03 15:45:03 -0500205 if (xprt->sc_frmr_pg_list_len)
206 return fast_reg_xdr(xprt, xdr, vec);
207
Tom Tuckerc06b5402007-12-12 16:13:25 -0600208 /* Skip the first sge, this is for the RPCRDMA header */
209 sge_no = 1;
210
211 /* Head SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -0500212 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
213 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600214 sge_no++;
215
216 /* pages SGE */
217 page_no = 0;
218 page_bytes = xdr->page_len;
219 page_off = xdr->page_base;
Tom Tucker34d16e42008-07-02 14:56:13 -0500220 while (page_bytes) {
221 vec->sge[sge_no].iov_base =
222 page_address(xdr->pages[page_no]) + page_off;
223 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
Tom Tuckerc06b5402007-12-12 16:13:25 -0600224 page_bytes -= sge_bytes;
Tom Tucker34d16e42008-07-02 14:56:13 -0500225 vec->sge[sge_no].iov_len = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600226
227 sge_no++;
228 page_no++;
229 page_off = 0; /* reset for next time through loop */
230 }
231
232 /* Tail SGE */
Tom Tucker34d16e42008-07-02 14:56:13 -0500233 if (xdr->tail[0].iov_len) {
234 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
235 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600236 sge_no++;
237 }
238
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400239 dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
Tom Talpey2e3c2302009-03-12 22:21:21 -0400240 "page_base %u page_len %u head_len %zu tail_len %zu\n",
Tom Talpeyb1e1e152009-03-11 14:37:55 -0400241 sge_no, page_no, xdr->page_base, xdr->page_len,
242 xdr->head[0].iov_len, xdr->tail[0].iov_len);
243
Tom Tucker34d16e42008-07-02 14:56:13 -0500244 vec->count = sge_no;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500245 return 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600246}
247
Tom Tuckerc06b5402007-12-12 16:13:25 -0600248/* Assumptions:
Tom Tuckerafd566e2008-10-03 15:45:03 -0500249 * - We are using FRMR
250 * - or -
Tom Tuckerc06b5402007-12-12 16:13:25 -0600251 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
252 */
253static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
254 u32 rmr, u64 to,
255 u32 xdr_off, int write_len,
Tom Tucker34d16e42008-07-02 14:56:13 -0500256 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600257{
Tom Tuckerc06b5402007-12-12 16:13:25 -0600258 struct ib_send_wr write_wr;
259 struct ib_sge *sge;
260 int xdr_sge_no;
261 int sge_no;
262 int sge_bytes;
263 int sge_off;
264 int bc;
265 struct svc_rdma_op_ctxt *ctxt;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600266
Tom Tucker34d16e42008-07-02 14:56:13 -0500267 BUG_ON(vec->count > RPCSVC_MAXPAGES);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600268 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
Tom Tucker34d16e42008-07-02 14:56:13 -0500269 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
Roland Dreierbb50c802008-02-08 16:02:04 -0800270 rmr, (unsigned long long)to, xdr_off,
Tom Tucker34d16e42008-07-02 14:56:13 -0500271 write_len, vec->sge, vec->count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600272
273 ctxt = svc_rdma_get_context(xprt);
Tom Tucker34d16e42008-07-02 14:56:13 -0500274 ctxt->direction = DMA_TO_DEVICE;
275 sge = ctxt->sge;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600276
277 /* Find the SGE associated with xdr_off */
Tom Tucker34d16e42008-07-02 14:56:13 -0500278 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600279 xdr_sge_no++) {
Tom Tucker34d16e42008-07-02 14:56:13 -0500280 if (vec->sge[xdr_sge_no].iov_len > bc)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600281 break;
Tom Tucker34d16e42008-07-02 14:56:13 -0500282 bc -= vec->sge[xdr_sge_no].iov_len;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600283 }
284
285 sge_off = bc;
286 bc = write_len;
287 sge_no = 0;
288
289 /* Copy the remaining SGE */
Tom Tuckerafd566e2008-10-03 15:45:03 -0500290 while (bc != 0) {
291 sge_bytes = min_t(size_t,
292 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600293 sge[sge_no].length = sge_bytes;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500294 if (!vec->frmr) {
295 sge[sge_no].addr =
296 ib_dma_map_single(xprt->sc_cm_id->device,
297 (void *)
298 vec->sge[xdr_sge_no].iov_base + sge_off,
299 sge_bytes, DMA_TO_DEVICE);
300 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
301 sge[sge_no].addr))
302 goto err;
303 atomic_inc(&xprt->sc_dma_used);
304 sge[sge_no].lkey = xprt->sc_dma_lkey;
305 } else {
306 sge[sge_no].addr = (unsigned long)
307 vec->sge[xdr_sge_no].iov_base + sge_off;
308 sge[sge_no].lkey = vec->frmr->mr->lkey;
309 }
310 ctxt->count++;
311 ctxt->frmr = vec->frmr;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600312 sge_off = 0;
313 sge_no++;
314 xdr_sge_no++;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500315 BUG_ON(xdr_sge_no > vec->count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600316 bc -= sge_bytes;
317 }
318
Tom Tuckerc06b5402007-12-12 16:13:25 -0600319 /* Prepare WRITE WR */
320 memset(&write_wr, 0, sizeof write_wr);
321 ctxt->wr_op = IB_WR_RDMA_WRITE;
322 write_wr.wr_id = (unsigned long)ctxt;
323 write_wr.sg_list = &sge[0];
324 write_wr.num_sge = sge_no;
325 write_wr.opcode = IB_WR_RDMA_WRITE;
326 write_wr.send_flags = IB_SEND_SIGNALED;
327 write_wr.wr.rdma.rkey = rmr;
328 write_wr.wr.rdma.remote_addr = to;
329
330 /* Post It */
331 atomic_inc(&rdma_stat_write);
Tom Tucker34d16e42008-07-02 14:56:13 -0500332 if (svc_rdma_send(xprt, &write_wr))
333 goto err;
334 return 0;
335 err:
336 svc_rdma_put_context(ctxt, 0);
337 /* Fatal error, close transport */
338 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600339}
340
341static int send_write_chunks(struct svcxprt_rdma *xprt,
342 struct rpcrdma_msg *rdma_argp,
343 struct rpcrdma_msg *rdma_resp,
344 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500345 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600346{
347 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
348 int write_len;
349 int max_write;
350 u32 xdr_off;
351 int chunk_off;
352 int chunk_no;
353 struct rpcrdma_write_array *arg_ary;
354 struct rpcrdma_write_array *res_ary;
355 int ret;
356
357 arg_ary = svc_rdma_get_write_array(rdma_argp);
358 if (!arg_ary)
359 return 0;
360 res_ary = (struct rpcrdma_write_array *)
361 &rdma_resp->rm_body.rm_chunks[1];
362
Tom Tuckerafd566e2008-10-03 15:45:03 -0500363 if (vec->frmr)
364 max_write = vec->frmr->map_len;
365 else
366 max_write = xprt->sc_max_sge * PAGE_SIZE;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600367
368 /* Write chunks start at the pagelist */
369 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
370 xfer_len && chunk_no < arg_ary->wc_nchunks;
371 chunk_no++) {
372 struct rpcrdma_segment *arg_ch;
373 u64 rs_offset;
374
375 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
376 write_len = min(xfer_len, arg_ch->rs_length);
377
378 /* Prepare the response chunk given the length actually
379 * written */
380 rs_offset = get_unaligned(&(arg_ch->rs_offset));
381 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
382 arg_ch->rs_handle,
383 rs_offset,
384 write_len);
385 chunk_off = 0;
386 while (write_len) {
387 int this_write;
388 this_write = min(write_len, max_write);
389 ret = send_write(xprt, rqstp,
390 arg_ch->rs_handle,
391 rs_offset + chunk_off,
392 xdr_off,
393 this_write,
Tom Tucker34d16e42008-07-02 14:56:13 -0500394 vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600395 if (ret) {
396 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
397 ret);
398 return -EIO;
399 }
400 chunk_off += this_write;
401 xdr_off += this_write;
402 xfer_len -= this_write;
403 write_len -= this_write;
404 }
405 }
406 /* Update the req with the number of chunks actually used */
407 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
408
409 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
410}
411
412static int send_reply_chunks(struct svcxprt_rdma *xprt,
413 struct rpcrdma_msg *rdma_argp,
414 struct rpcrdma_msg *rdma_resp,
415 struct svc_rqst *rqstp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500416 struct svc_rdma_req_map *vec)
Tom Tuckerc06b5402007-12-12 16:13:25 -0600417{
418 u32 xfer_len = rqstp->rq_res.len;
419 int write_len;
420 int max_write;
421 u32 xdr_off;
422 int chunk_no;
423 int chunk_off;
424 struct rpcrdma_segment *ch;
425 struct rpcrdma_write_array *arg_ary;
426 struct rpcrdma_write_array *res_ary;
427 int ret;
428
429 arg_ary = svc_rdma_get_reply_array(rdma_argp);
430 if (!arg_ary)
431 return 0;
432 /* XXX: need to fix when reply lists occur with read-list and or
433 * write-list */
434 res_ary = (struct rpcrdma_write_array *)
435 &rdma_resp->rm_body.rm_chunks[2];
436
Tom Tuckerafd566e2008-10-03 15:45:03 -0500437 if (vec->frmr)
438 max_write = vec->frmr->map_len;
439 else
440 max_write = xprt->sc_max_sge * PAGE_SIZE;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600441
442 /* xdr offset starts at RPC message */
443 for (xdr_off = 0, chunk_no = 0;
444 xfer_len && chunk_no < arg_ary->wc_nchunks;
445 chunk_no++) {
446 u64 rs_offset;
447 ch = &arg_ary->wc_array[chunk_no].wc_target;
448 write_len = min(xfer_len, ch->rs_length);
449
Tom Tuckerc06b5402007-12-12 16:13:25 -0600450 /* Prepare the reply chunk given the length actually
451 * written */
452 rs_offset = get_unaligned(&(ch->rs_offset));
453 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
454 ch->rs_handle, rs_offset,
455 write_len);
456 chunk_off = 0;
457 while (write_len) {
458 int this_write;
459
460 this_write = min(write_len, max_write);
461 ret = send_write(xprt, rqstp,
462 ch->rs_handle,
463 rs_offset + chunk_off,
464 xdr_off,
465 this_write,
Tom Tucker34d16e42008-07-02 14:56:13 -0500466 vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600467 if (ret) {
468 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
469 ret);
470 return -EIO;
471 }
472 chunk_off += this_write;
473 xdr_off += this_write;
474 xfer_len -= this_write;
475 write_len -= this_write;
476 }
477 }
478 /* Update the req with the number of chunks actually used */
479 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
480
481 return rqstp->rq_res.len;
482}
483
484/* This function prepares the portion of the RPCRDMA message to be
485 * sent in the RDMA_SEND. This function is called after data sent via
486 * RDMA has already been transmitted. There are three cases:
487 * - The RPCRDMA header, RPC header, and payload are all sent in a
488 * single RDMA_SEND. This is the "inline" case.
489 * - The RPCRDMA header and some portion of the RPC header and data
490 * are sent via this RDMA_SEND and another portion of the data is
491 * sent via RDMA.
492 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
493 * header and data are all transmitted via RDMA.
494 * In all three cases, this function prepares the RPCRDMA header in
495 * sge[0], the 'type' parameter indicates the type to place in the
496 * RPCRDMA header, and the 'byte_count' field indicates how much of
497 * the XDR to include in this RDMA_SEND.
498 */
499static int send_reply(struct svcxprt_rdma *rdma,
500 struct svc_rqst *rqstp,
501 struct page *page,
502 struct rpcrdma_msg *rdma_resp,
503 struct svc_rdma_op_ctxt *ctxt,
Tom Tucker34d16e42008-07-02 14:56:13 -0500504 struct svc_rdma_req_map *vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600505 int byte_count)
506{
507 struct ib_send_wr send_wr;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500508 struct ib_send_wr inv_wr;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600509 int sge_no;
510 int sge_bytes;
511 int page_no;
512 int ret;
513
Tom Tucker0e7f0112008-04-23 16:49:54 -0500514 /* Post a recv buffer to handle another request. */
515 ret = svc_rdma_post_recv(rdma);
516 if (ret) {
517 printk(KERN_INFO
518 "svcrdma: could not post a receive buffer, err=%d."
519 "Closing transport %p.\n", ret, rdma);
520 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
Steve Wise21515e42009-04-29 14:14:00 -0500521 svc_rdma_put_frmr(rdma, vec->frmr);
Tom Tucker5ac461a2008-04-25 18:08:59 -0500522 svc_rdma_put_context(ctxt, 0);
523 return -ENOTCONN;
Tom Tucker0e7f0112008-04-23 16:49:54 -0500524 }
525
Tom Tuckerc06b5402007-12-12 16:13:25 -0600526 /* Prepare the context */
527 ctxt->pages[0] = page;
528 ctxt->count = 1;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500529 ctxt->frmr = vec->frmr;
530 if (vec->frmr)
531 set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
532 else
533 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600534
535 /* Prepare the SGE for the RPCRDMA Header */
Steve Wise98779be2009-05-14 16:34:28 -0500536 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
537 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600538 ctxt->sge[0].addr =
Steve Wise98779be2009-05-14 16:34:28 -0500539 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
540 ctxt->sge[0].length, DMA_TO_DEVICE);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500541 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
542 goto err;
543 atomic_inc(&rdma->sc_dma_used);
544
Tom Tuckerc06b5402007-12-12 16:13:25 -0600545 ctxt->direction = DMA_TO_DEVICE;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500546
Tom Tuckerc06b5402007-12-12 16:13:25 -0600547 /* Determine how many of our SGE are to be transmitted */
Tom Tucker34d16e42008-07-02 14:56:13 -0500548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600550 byte_count -= sge_bytes;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500551 if (!vec->frmr) {
552 ctxt->sge[sge_no].addr =
553 ib_dma_map_single(rdma->sc_cm_id->device,
554 vec->sge[sge_no].iov_base,
555 sge_bytes, DMA_TO_DEVICE);
556 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
557 ctxt->sge[sge_no].addr))
558 goto err;
559 atomic_inc(&rdma->sc_dma_used);
560 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
561 } else {
562 ctxt->sge[sge_no].addr = (unsigned long)
563 vec->sge[sge_no].iov_base;
564 ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
565 }
Tom Tucker34d16e42008-07-02 14:56:13 -0500566 ctxt->sge[sge_no].length = sge_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600567 }
568 BUG_ON(byte_count != 0);
569
570 /* Save all respages in the ctxt and remove them from the
571 * respages array. They are our pages until the I/O
572 * completes.
573 */
574 for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
575 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
576 ctxt->count++;
577 rqstp->rq_respages[page_no] = NULL;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500578 /*
579 * If there are more pages than SGE, terminate SGE
580 * list so that svc_rdma_unmap_dma doesn't attempt to
581 * unmap garbage.
582 */
Tom Tucker34d16e42008-07-02 14:56:13 -0500583 if (page_no+1 >= sge_no)
584 ctxt->sge[page_no+1].length = 0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600585 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600586 BUG_ON(sge_no > rdma->sc_max_sge);
587 memset(&send_wr, 0, sizeof send_wr);
588 ctxt->wr_op = IB_WR_SEND;
589 send_wr.wr_id = (unsigned long)ctxt;
590 send_wr.sg_list = ctxt->sge;
591 send_wr.num_sge = sge_no;
592 send_wr.opcode = IB_WR_SEND;
593 send_wr.send_flags = IB_SEND_SIGNALED;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500594 if (vec->frmr) {
595 /* Prepare INVALIDATE WR */
596 memset(&inv_wr, 0, sizeof inv_wr);
597 inv_wr.opcode = IB_WR_LOCAL_INV;
598 inv_wr.send_flags = IB_SEND_SIGNALED;
599 inv_wr.ex.invalidate_rkey =
600 vec->frmr->mr->lkey;
601 send_wr.next = &inv_wr;
602 }
Tom Tuckerc06b5402007-12-12 16:13:25 -0600603
604 ret = svc_rdma_send(rdma, &send_wr);
605 if (ret)
Tom Tuckerafd566e2008-10-03 15:45:03 -0500606 goto err;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600607
Tom Tuckerafd566e2008-10-03 15:45:03 -0500608 return 0;
609
610 err:
Steve Wise21515e42009-04-29 14:14:00 -0500611 svc_rdma_unmap_dma(ctxt);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500612 svc_rdma_put_frmr(rdma, vec->frmr);
613 svc_rdma_put_context(ctxt, 1);
614 return -EIO;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600615}
616
617void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
618{
619}
620
621/*
622 * Return the start of an xdr buffer.
623 */
624static void *xdr_start(struct xdr_buf *xdr)
625{
626 return xdr->head[0].iov_base -
627 (xdr->len -
628 xdr->page_len -
629 xdr->tail[0].iov_len -
630 xdr->head[0].iov_len);
631}
632
633int svc_rdma_sendto(struct svc_rqst *rqstp)
634{
635 struct svc_xprt *xprt = rqstp->rq_xprt;
636 struct svcxprt_rdma *rdma =
637 container_of(xprt, struct svcxprt_rdma, sc_xprt);
638 struct rpcrdma_msg *rdma_argp;
639 struct rpcrdma_msg *rdma_resp;
640 struct rpcrdma_write_array *reply_ary;
641 enum rpcrdma_proc reply_type;
642 int ret;
643 int inline_bytes;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600644 struct page *res_page;
645 struct svc_rdma_op_ctxt *ctxt;
Tom Tucker34d16e42008-07-02 14:56:13 -0500646 struct svc_rdma_req_map *vec;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600647
648 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
649
650 /* Get the RDMA request header. */
651 rdma_argp = xdr_start(&rqstp->rq_arg);
652
Tom Tucker34d16e42008-07-02 14:56:13 -0500653 /* Build an req vec for the XDR */
Tom Tuckerc06b5402007-12-12 16:13:25 -0600654 ctxt = svc_rdma_get_context(rdma);
655 ctxt->direction = DMA_TO_DEVICE;
Tom Tucker34d16e42008-07-02 14:56:13 -0500656 vec = svc_rdma_get_req_map();
Tom Tuckerafd566e2008-10-03 15:45:03 -0500657 ret = map_xdr(rdma, &rqstp->rq_res, vec);
658 if (ret)
659 goto err0;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600660 inline_bytes = rqstp->rq_res.len;
661
662 /* Create the RDMA response header */
663 res_page = svc_rdma_get_page();
664 rdma_resp = page_address(res_page);
665 reply_ary = svc_rdma_get_reply_array(rdma_argp);
666 if (reply_ary)
667 reply_type = RDMA_NOMSG;
668 else
669 reply_type = RDMA_MSG;
670 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
671 rdma_resp, reply_type);
672
673 /* Send any write-chunk data and build resp write-list */
674 ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500675 rqstp, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600676 if (ret < 0) {
677 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
678 ret);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500679 goto err1;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600680 }
681 inline_bytes -= ret;
682
683 /* Send any reply-list data and update resp reply-list */
684 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
Tom Tucker34d16e42008-07-02 14:56:13 -0500685 rqstp, vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600686 if (ret < 0) {
687 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
688 ret);
Tom Tuckerafd566e2008-10-03 15:45:03 -0500689 goto err1;
Tom Tuckerc06b5402007-12-12 16:13:25 -0600690 }
691 inline_bytes -= ret;
692
Tom Tucker34d16e42008-07-02 14:56:13 -0500693 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
Tom Tuckerc06b5402007-12-12 16:13:25 -0600694 inline_bytes);
Tom Tucker34d16e42008-07-02 14:56:13 -0500695 svc_rdma_put_req_map(vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600696 dprintk("svcrdma: send_reply returns %d\n", ret);
697 return ret;
Tom Tuckerafd566e2008-10-03 15:45:03 -0500698
699 err1:
700 put_page(res_page);
701 err0:
Tom Tucker34d16e42008-07-02 14:56:13 -0500702 svc_rdma_put_req_map(vec);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600703 svc_rdma_put_context(ctxt, 0);
Tom Tuckerc06b5402007-12-12 16:13:25 -0600704 return ret;
705}