blob: cb0991345816e4ab2c2c99c6242726bef7e6c98a [file] [log] [blame]
Tom Tuckerd5b31be2007-12-12 16:13:23 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerd5b31be2007-12-12 16:13:23 -06003 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <asm/unaligned.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#include <linux/sunrpc/svc_rdma.h>
50
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
53/*
54 * Replace the pages in the rq_argpages array with the pages from the SGE in
55 * the RDMA_RECV completion. The SGL should contain full pages up until the
56 * last one.
57 */
58static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
59 struct svc_rdma_op_ctxt *ctxt,
60 u32 byte_count)
61{
Chuck Lever0b056c22015-01-13 11:03:37 -050062 struct rpcrdma_msg *rmsgp;
Tom Tuckerd5b31be2007-12-12 16:13:23 -060063 struct page *page;
64 u32 bc;
65 int sge_no;
66
67 /* Swap the page in the SGE with the page in argpages */
68 page = ctxt->pages[0];
69 put_page(rqstp->rq_pages[0]);
70 rqstp->rq_pages[0] = page;
71
72 /* Set up the XDR head */
73 rqstp->rq_arg.head[0].iov_base = page_address(page);
Steve Wise0bf48282014-05-28 15:12:01 -050074 rqstp->rq_arg.head[0].iov_len =
75 min_t(size_t, byte_count, ctxt->sge[0].length);
Tom Tuckerd5b31be2007-12-12 16:13:23 -060076 rqstp->rq_arg.len = byte_count;
77 rqstp->rq_arg.buflen = byte_count;
78
79 /* Compute bytes past head in the SGL */
80 bc = byte_count - rqstp->rq_arg.head[0].iov_len;
81
82 /* If data remains, store it in the pagelist */
83 rqstp->rq_arg.page_len = bc;
84 rqstp->rq_arg.page_base = 0;
Chuck Lever0b056c22015-01-13 11:03:37 -050085
86 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
Chuck Lever30b7e242015-06-04 11:21:10 -040088 if (rmsgp->rm_type == rdma_nomsg)
Chuck Lever0b056c22015-01-13 11:03:37 -050089 rqstp->rq_arg.pages = &rqstp->rq_pages[0];
90 else
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
92
Tom Tuckerd5b31be2007-12-12 16:13:23 -060093 sge_no = 1;
94 while (bc && sge_no < ctxt->count) {
95 page = ctxt->pages[sge_no];
96 put_page(rqstp->rq_pages[sge_no]);
97 rqstp->rq_pages[sge_no] = page;
Steve Wise0bf48282014-05-28 15:12:01 -050098 bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
Tom Tuckerd5b31be2007-12-12 16:13:23 -060099 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
100 sge_no++;
101 }
102 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
Tom Tucker7e4359e2014-03-25 15:14:57 -0500103 rqstp->rq_next_page = rqstp->rq_respages + 1;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600104
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600105 /* If not all pages were used from the SGL, free the remaining ones */
106 bc = sge_no;
107 while (sge_no < ctxt->count) {
108 page = ctxt->pages[sge_no++];
109 put_page(page);
110 }
111 ctxt->count = bc;
112
113 /* Set up tail */
114 rqstp->rq_arg.tail[0].iov_base = NULL;
115 rqstp->rq_arg.tail[0].iov_len = 0;
116}
117
Steve Wise0bf48282014-05-28 15:12:01 -0500118/* Issue an RDMA_READ using the local lkey to map the data sink */
Chuck Levere5452412015-01-13 11:03:20 -0500119int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
120 struct svc_rqst *rqstp,
121 struct svc_rdma_op_ctxt *head,
122 int *page_no,
123 u32 *page_offset,
124 u32 rs_handle,
125 u32 rs_length,
126 u64 rs_offset,
127 bool last)
Steve Wise0bf48282014-05-28 15:12:01 -0500128{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100129 struct ib_rdma_wr read_wr;
Steve Wise0bf48282014-05-28 15:12:01 -0500130 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
131 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
132 int ret, read, pno;
133 u32 pg_off = *page_offset;
134 u32 pg_no = *page_no;
135
136 ctxt->direction = DMA_FROM_DEVICE;
137 ctxt->read_hdr = head;
Steve Wisebc3fe2e2015-07-27 18:10:12 -0500138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
Steve Wise0bf48282014-05-28 15:12:01 -0500139 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
140
141 for (pno = 0; pno < pages_needed; pno++) {
142 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
143
144 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
145 head->arg.page_len += len;
146 head->arg.len += len;
147 if (!pg_off)
148 head->count++;
149 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
150 rqstp->rq_next_page = rqstp->rq_respages + 1;
151 ctxt->sge[pno].addr =
152 ib_dma_map_page(xprt->sc_cm_id->device,
153 head->arg.pages[pg_no], pg_off,
154 PAGE_SIZE - pg_off,
155 DMA_FROM_DEVICE);
156 ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
157 ctxt->sge[pno].addr);
158 if (ret)
159 goto err;
160 atomic_inc(&xprt->sc_dma_used);
161
162 /* The lkey here is either a local dma lkey or a dma_mr lkey */
163 ctxt->sge[pno].lkey = xprt->sc_dma_lkey;
164 ctxt->sge[pno].length = len;
165 ctxt->count++;
166
167 /* adjust offset and wrap to next page if needed */
168 pg_off += len;
169 if (pg_off == PAGE_SIZE) {
170 pg_off = 0;
171 pg_no++;
172 }
173 rs_length -= len;
174 }
175
176 if (last && rs_length == 0)
177 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
178 else
179 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
180
181 memset(&read_wr, 0, sizeof(read_wr));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100182 read_wr.wr.wr_id = (unsigned long)ctxt;
183 read_wr.wr.opcode = IB_WR_RDMA_READ;
184 ctxt->wr_op = read_wr.wr.opcode;
185 read_wr.wr.send_flags = IB_SEND_SIGNALED;
186 read_wr.rkey = rs_handle;
187 read_wr.remote_addr = rs_offset;
188 read_wr.wr.sg_list = ctxt->sge;
189 read_wr.wr.num_sge = pages_needed;
Steve Wise0bf48282014-05-28 15:12:01 -0500190
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100191 ret = svc_rdma_send(xprt, &read_wr.wr);
Steve Wise0bf48282014-05-28 15:12:01 -0500192 if (ret) {
193 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
194 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
195 goto err;
196 }
197
198 /* return current location in page array */
199 *page_no = pg_no;
200 *page_offset = pg_off;
201 ret = read;
202 atomic_inc(&rdma_stat_read);
203 return ret;
204 err:
205 svc_rdma_unmap_dma(ctxt);
206 svc_rdma_put_context(ctxt, 0);
207 return ret;
208}
209
210/* Issue an RDMA_READ using an FRMR to map the data sink */
Chuck Levere5452412015-01-13 11:03:20 -0500211int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
212 struct svc_rqst *rqstp,
213 struct svc_rdma_op_ctxt *head,
214 int *page_no,
215 u32 *page_offset,
216 u32 rs_handle,
217 u32 rs_length,
218 u64 rs_offset,
219 bool last)
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600220{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100221 struct ib_rdma_wr read_wr;
Tom Tucker146b6df2008-08-12 15:12:10 -0500222 struct ib_send_wr inv_wr;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300223 struct ib_reg_wr reg_wr;
Steve Wise0bf48282014-05-28 15:12:01 -0500224 u8 key;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300225 int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
Steve Wise0bf48282014-05-28 15:12:01 -0500226 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
227 struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300228 int ret, read, pno, dma_nents, n;
Steve Wise0bf48282014-05-28 15:12:01 -0500229 u32 pg_off = *page_offset;
230 u32 pg_no = *page_no;
231
232 if (IS_ERR(frmr))
233 return -ENOMEM;
234
235 ctxt->direction = DMA_FROM_DEVICE;
236 ctxt->frmr = frmr;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300237 nents = min_t(unsigned int, nents, xprt->sc_frmr_pg_list_len);
238 read = min_t(int, nents << PAGE_SHIFT, rs_length);
Steve Wise0bf48282014-05-28 15:12:01 -0500239
Steve Wise0bf48282014-05-28 15:12:01 -0500240 frmr->direction = DMA_FROM_DEVICE;
241 frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300242 frmr->sg_nents = nents;
Steve Wise0bf48282014-05-28 15:12:01 -0500243
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300244 for (pno = 0; pno < nents; pno++) {
Steve Wise0bf48282014-05-28 15:12:01 -0500245 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
246
247 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
248 head->arg.page_len += len;
249 head->arg.len += len;
250 if (!pg_off)
251 head->count++;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300252
253 sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no],
254 len, pg_off);
255
Steve Wise0bf48282014-05-28 15:12:01 -0500256 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
257 rqstp->rq_next_page = rqstp->rq_respages + 1;
Steve Wise0bf48282014-05-28 15:12:01 -0500258
259 /* adjust offset and wrap to next page if needed */
260 pg_off += len;
261 if (pg_off == PAGE_SIZE) {
262 pg_off = 0;
263 pg_no++;
264 }
265 rs_length -= len;
266 }
267
268 if (last && rs_length == 0)
269 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
270 else
271 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
272
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300273 dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device,
274 frmr->sg, frmr->sg_nents,
275 frmr->direction);
276 if (!dma_nents) {
277 pr_err("svcrdma: failed to dma map sg %p\n",
278 frmr->sg);
279 return -ENOMEM;
280 }
281 atomic_inc(&xprt->sc_dma_used);
282
283 n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, PAGE_SIZE);
284 if (unlikely(n != frmr->sg_nents)) {
285 pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
286 frmr->mr, n, frmr->sg_nents);
287 return n < 0 ? n : -EINVAL;
288 }
289
Steve Wise0bf48282014-05-28 15:12:01 -0500290 /* Bump the key */
291 key = (u8)(frmr->mr->lkey & 0x000000FF);
292 ib_update_fast_reg_key(frmr->mr, ++key);
293
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300294 ctxt->sge[0].addr = frmr->mr->iova;
Steve Wise0bf48282014-05-28 15:12:01 -0500295 ctxt->sge[0].lkey = frmr->mr->lkey;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300296 ctxt->sge[0].length = frmr->mr->length;
Steve Wise0bf48282014-05-28 15:12:01 -0500297 ctxt->count = 1;
298 ctxt->read_hdr = head;
299
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300300 /* Prepare REG WR */
301 reg_wr.wr.opcode = IB_WR_REG_MR;
302 reg_wr.wr.wr_id = 0;
303 reg_wr.wr.send_flags = IB_SEND_SIGNALED;
304 reg_wr.wr.num_sge = 0;
305 reg_wr.mr = frmr->mr;
306 reg_wr.key = frmr->mr->lkey;
307 reg_wr.access = frmr->access_flags;
308 reg_wr.wr.next = &read_wr.wr;
Steve Wise0bf48282014-05-28 15:12:01 -0500309
310 /* Prepare RDMA_READ */
311 memset(&read_wr, 0, sizeof(read_wr));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100312 read_wr.wr.send_flags = IB_SEND_SIGNALED;
313 read_wr.rkey = rs_handle;
314 read_wr.remote_addr = rs_offset;
315 read_wr.wr.sg_list = ctxt->sge;
316 read_wr.wr.num_sge = 1;
Steve Wise0bf48282014-05-28 15:12:01 -0500317 if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100318 read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
319 read_wr.wr.wr_id = (unsigned long)ctxt;
320 read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
Steve Wise0bf48282014-05-28 15:12:01 -0500321 } else {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100322 read_wr.wr.opcode = IB_WR_RDMA_READ;
323 read_wr.wr.next = &inv_wr;
Steve Wise0bf48282014-05-28 15:12:01 -0500324 /* Prepare invalidate */
325 memset(&inv_wr, 0, sizeof(inv_wr));
326 inv_wr.wr_id = (unsigned long)ctxt;
327 inv_wr.opcode = IB_WR_LOCAL_INV;
Steve Wise83710fc2014-06-05 09:54:31 -0500328 inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
Steve Wise0bf48282014-05-28 15:12:01 -0500329 inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
330 }
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100331 ctxt->wr_op = read_wr.wr.opcode;
Steve Wise0bf48282014-05-28 15:12:01 -0500332
333 /* Post the chain */
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300334 ret = svc_rdma_send(xprt, &reg_wr.wr);
Steve Wise0bf48282014-05-28 15:12:01 -0500335 if (ret) {
336 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
337 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
338 goto err;
339 }
340
341 /* return current location in page array */
342 *page_no = pg_no;
343 *page_offset = pg_off;
344 ret = read;
345 atomic_inc(&rdma_stat_read);
346 return ret;
347 err:
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300348 ib_dma_unmap_sg(xprt->sc_cm_id->device,
349 frmr->sg, frmr->sg_nents, frmr->direction);
Steve Wise0bf48282014-05-28 15:12:01 -0500350 svc_rdma_put_context(ctxt, 0);
351 svc_rdma_put_frmr(xprt, frmr);
352 return ret;
353}
354
Chuck Lever2397aa82015-01-13 11:02:54 -0500355static unsigned int
356rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
357{
358 unsigned int count;
359
360 for (count = 0; ch->rc_discrim != xdr_zero; ch++)
361 count++;
362 return count;
363}
364
Chuck Levera97c3312015-01-13 11:03:53 -0500365/* If there was additional inline content, append it to the end of arg.pages.
366 * Tail copy has to be done after the reader function has determined how many
367 * pages are needed for RDMA READ.
368 */
369static int
370rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
371 u32 position, u32 byte_count, u32 page_offset, int page_no)
372{
373 char *srcp, *destp;
374 int ret;
375
376 ret = 0;
377 srcp = head->arg.head[0].iov_base + position;
378 byte_count = head->arg.head[0].iov_len - position;
379 if (byte_count > PAGE_SIZE) {
380 dprintk("svcrdma: large tail unsupported\n");
381 return 0;
382 }
383
384 /* Fit as much of the tail on the current page as possible */
385 if (page_offset != PAGE_SIZE) {
386 destp = page_address(rqstp->rq_arg.pages[page_no]);
387 destp += page_offset;
388 while (byte_count--) {
389 *destp++ = *srcp++;
390 page_offset++;
391 if (page_offset == PAGE_SIZE && byte_count)
392 goto more;
393 }
394 goto done;
395 }
396
397more:
398 /* Fit the rest on the next page */
399 page_no++;
400 destp = page_address(rqstp->rq_arg.pages[page_no]);
401 while (byte_count--)
402 *destp++ = *srcp++;
403
404 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
405 rqstp->rq_next_page = rqstp->rq_respages + 1;
406
407done:
408 byte_count = head->arg.head[0].iov_len - position;
409 head->arg.page_len += byte_count;
410 head->arg.len += byte_count;
411 head->arg.buflen += byte_count;
412 return 1;
413}
414
Steve Wise0bf48282014-05-28 15:12:01 -0500415static int rdma_read_chunks(struct svcxprt_rdma *xprt,
416 struct rpcrdma_msg *rmsgp,
417 struct svc_rqst *rqstp,
418 struct svc_rdma_op_ctxt *head)
419{
Chuck Lever2397aa82015-01-13 11:02:54 -0500420 int page_no, ret;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600421 struct rpcrdma_read_chunk *ch;
Chuck Levere5452412015-01-13 11:03:20 -0500422 u32 handle, page_offset, byte_count;
Chuck Lever61edbcb2015-01-13 11:03:28 -0500423 u32 position;
Steve Wise0bf48282014-05-28 15:12:01 -0500424 u64 rs_offset;
Chuck Levere5452412015-01-13 11:03:20 -0500425 bool last;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600426
427 /* If no read list is present, return 0 */
428 ch = svc_rdma_get_read_chunk(rmsgp);
429 if (!ch)
430 return 0;
431
Chuck Lever2397aa82015-01-13 11:02:54 -0500432 if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
Tom Tuckera6f911c2008-05-13 09:16:05 -0500433 return -EINVAL;
Tom Tucker146b6df2008-08-12 15:12:10 -0500434
Steve Wise0bf48282014-05-28 15:12:01 -0500435 /* The request is completed when the RDMA_READs complete. The
436 * head context keeps all the pages that comprise the
437 * request.
438 */
439 head->arg.head[0] = rqstp->rq_arg.head[0];
440 head->arg.tail[0] = rqstp->rq_arg.tail[0];
Steve Wise0bf48282014-05-28 15:12:01 -0500441 head->hdr_count = head->count;
442 head->arg.page_base = 0;
443 head->arg.page_len = 0;
444 head->arg.len = rqstp->rq_arg.len;
445 head->arg.buflen = rqstp->rq_arg.buflen;
Christian Engelmayer59fb3062009-06-14 00:05:26 +0200446
Chuck Lever61edbcb2015-01-13 11:03:28 -0500447 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
448 position = be32_to_cpu(ch->rc_position);
449
Chuck Lever0b056c22015-01-13 11:03:37 -0500450 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
451 if (position == 0) {
452 head->arg.pages = &head->pages[0];
453 page_offset = head->byte_len;
454 } else {
455 head->arg.pages = &head->pages[head->count];
456 page_offset = 0;
457 }
458
Chuck Lever61edbcb2015-01-13 11:03:28 -0500459 ret = 0;
460 page_no = 0;
Chuck Lever61edbcb2015-01-13 11:03:28 -0500461 for (; ch->rc_discrim != xdr_zero; ch++) {
462 if (be32_to_cpu(ch->rc_position) != position)
463 goto err;
464
465 handle = be32_to_cpu(ch->rc_target.rs_handle),
Chuck Levere5452412015-01-13 11:03:20 -0500466 byte_count = be32_to_cpu(ch->rc_target.rs_length);
Tom Tuckercec56c82012-02-15 11:30:00 -0600467 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
468 &rs_offset);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600469
Steve Wise0bf48282014-05-28 15:12:01 -0500470 while (byte_count > 0) {
Chuck Levere5452412015-01-13 11:03:20 -0500471 last = (ch + 1)->rc_discrim == xdr_zero;
472 ret = xprt->sc_reader(xprt, rqstp, head,
473 &page_no, &page_offset,
474 handle, byte_count,
475 rs_offset, last);
Steve Wise0bf48282014-05-28 15:12:01 -0500476 if (ret < 0)
477 goto err;
478 byte_count -= ret;
479 rs_offset += ret;
480 head->arg.buflen += ret;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600481 }
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600482 }
Chuck Lever0b056c22015-01-13 11:03:37 -0500483
Chuck Leverfcbeced2015-01-13 11:03:45 -0500484 /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
485 if (page_offset & 3) {
486 u32 pad = 4 - (page_offset & 3);
487
488 head->arg.page_len += pad;
489 head->arg.len += pad;
490 head->arg.buflen += pad;
Chuck Levera97c3312015-01-13 11:03:53 -0500491 page_offset += pad;
Chuck Leverfcbeced2015-01-13 11:03:45 -0500492 }
493
Steve Wise0bf48282014-05-28 15:12:01 -0500494 ret = 1;
Chuck Levera97c3312015-01-13 11:03:53 -0500495 if (position && position < head->arg.head[0].iov_len)
496 ret = rdma_copy_tail(rqstp, head, position,
497 byte_count, page_offset, page_no);
498 head->arg.head[0].iov_len = position;
Chuck Lever0b056c22015-01-13 11:03:37 -0500499 head->position = position;
500
Steve Wise0bf48282014-05-28 15:12:01 -0500501 err:
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600502 /* Detach arg pages. svc_recv will replenish them */
Steve Wise0bf48282014-05-28 15:12:01 -0500503 for (page_no = 0;
504 &rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
505 rqstp->rq_pages[page_no] = NULL;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600506
Steve Wise0bf48282014-05-28 15:12:01 -0500507 return ret;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600508}
509
510static int rdma_read_complete(struct svc_rqst *rqstp,
Tom Tucker02e74522008-04-30 19:50:56 -0500511 struct svc_rdma_op_ctxt *head)
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600512{
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600513 int page_no;
514 int ret;
515
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600516 /* Copy RPC pages */
517 for (page_no = 0; page_no < head->count; page_no++) {
518 put_page(rqstp->rq_pages[page_no]);
519 rqstp->rq_pages[page_no] = head->pages[page_no];
520 }
Chuck Lever0b056c22015-01-13 11:03:37 -0500521
522 /* Adjustments made for RDMA_NOMSG type requests */
523 if (head->position == 0) {
524 if (head->arg.len <= head->sge[0].length) {
525 head->arg.head[0].iov_len = head->arg.len -
526 head->byte_len;
527 head->arg.page_len = 0;
528 } else {
529 head->arg.head[0].iov_len = head->sge[0].length -
530 head->byte_len;
531 head->arg.page_len = head->arg.len -
532 head->sge[0].length;
533 }
534 }
535
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600536 /* Point rq_arg.pages past header */
Tom Tuckerf820c572008-05-27 17:03:14 -0500537 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600538 rqstp->rq_arg.page_len = head->arg.page_len;
539 rqstp->rq_arg.page_base = head->arg.page_base;
540
541 /* rq_respages starts after the last arg page */
542 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
Tom Tucker7e4359e2014-03-25 15:14:57 -0500543 rqstp->rq_next_page = rqstp->rq_respages + 1;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600544
545 /* Rebuild rq_arg head and tail. */
546 rqstp->rq_arg.head[0] = head->arg.head[0];
547 rqstp->rq_arg.tail[0] = head->arg.tail[0];
548 rqstp->rq_arg.len = head->arg.len;
549 rqstp->rq_arg.buflen = head->arg.buflen;
550
Tom Tucker02e74522008-04-30 19:50:56 -0500551 /* Free the context */
552 svc_rdma_put_context(head, 0);
553
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600554 /* XXX: What should this be? */
555 rqstp->rq_prot = IPPROTO_MAX;
Tom Tucker69500c42008-05-07 13:49:58 -0500556 svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600557
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600558 ret = rqstp->rq_arg.head[0].iov_len
559 + rqstp->rq_arg.page_len
560 + rqstp->rq_arg.tail[0].iov_len;
Chuck Lever597561b2015-01-13 11:02:37 -0500561 dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
562 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600563 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
564 rqstp->rq_arg.head[0].iov_len);
565
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600566 return ret;
567}
568
569/*
570 * Set up the rqstp thread context to point to the RQ buffer. If
571 * necessary, pull additional data from the client with an RDMA_READ
572 * request.
573 */
574int svc_rdma_recvfrom(struct svc_rqst *rqstp)
575{
576 struct svc_xprt *xprt = rqstp->rq_xprt;
577 struct svcxprt_rdma *rdma_xprt =
578 container_of(xprt, struct svcxprt_rdma, sc_xprt);
579 struct svc_rdma_op_ctxt *ctxt = NULL;
580 struct rpcrdma_msg *rmsgp;
581 int ret = 0;
582 int len;
583
584 dprintk("svcrdma: rqstp=%p\n", rqstp);
585
Tom Tucker24b8b442008-08-13 11:05:41 -0500586 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600587 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
588 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
589 struct svc_rdma_op_ctxt,
590 dto_q);
591 list_del_init(&ctxt->dto_q);
Tom Tucker24b8b442008-08-13 11:05:41 -0500592 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600593 return rdma_read_complete(rqstp, ctxt);
Steve Wise0bf48282014-05-28 15:12:01 -0500594 } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600595 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
596 struct svc_rdma_op_ctxt,
597 dto_q);
598 list_del_init(&ctxt->dto_q);
599 } else {
600 atomic_inc(&rdma_stat_rq_starve);
601 clear_bit(XPT_DATA, &xprt->xpt_flags);
602 ctxt = NULL;
603 }
604 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
605 if (!ctxt) {
606 /* This is the EAGAIN path. The svc_recv routine will
607 * return -EAGAIN, the nfsd thread will go to call into
608 * svc_recv again and we shouldn't be on the active
609 * transport list
610 */
611 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
612 goto close_out;
613
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600614 goto out;
615 }
616 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
617 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600618 atomic_inc(&rdma_stat_recv);
619
620 /* Build up the XDR from the receive buffers. */
621 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
622
623 /* Decode the RDMA header. */
624 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
625 rqstp->rq_xprt_hlen = len;
626
627 /* If the request is invalid, reply with an error */
628 if (len < 0) {
629 if (len == -ENOSYS)
Tom Tucker008fdbc2008-05-07 15:47:42 -0500630 svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600631 goto close_out;
632 }
633
Tom Tuckerd16d4002008-05-06 10:04:50 -0500634 /* Read read-list data. */
Steve Wise0bf48282014-05-28 15:12:01 -0500635 ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
Tom Tuckerd16d4002008-05-06 10:04:50 -0500636 if (ret > 0) {
637 /* read-list posted, defer until data received from client. */
Ilpo Järvinenb1721d22008-12-14 23:19:48 -0800638 goto defer;
Steve Wise0bf48282014-05-28 15:12:01 -0500639 } else if (ret < 0) {
Tom Tuckerd16d4002008-05-06 10:04:50 -0500640 /* Post of read-list failed, free context. */
641 svc_rdma_put_context(ctxt, 1);
642 return 0;
643 }
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600644
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600645 ret = rqstp->rq_arg.head[0].iov_len
646 + rqstp->rq_arg.page_len
647 + rqstp->rq_arg.tail[0].iov_len;
648 svc_rdma_put_context(ctxt, 0);
649 out:
Chuck Lever597561b2015-01-13 11:02:37 -0500650 dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
651 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600652 ret, rqstp->rq_arg.len,
653 rqstp->rq_arg.head[0].iov_base,
654 rqstp->rq_arg.head[0].iov_len);
655 rqstp->rq_prot = IPPROTO_MAX;
656 svc_xprt_copy_addrs(rqstp, xprt);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600657 return ret;
658
659 close_out:
Tom Tucker0e7f0112008-04-23 16:49:54 -0500660 if (ctxt)
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600661 svc_rdma_put_context(ctxt, 1);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600662 dprintk("svcrdma: transport %p is closing\n", xprt);
663 /*
664 * Set the close bit and enqueue it. svc_recv will see the
665 * close bit and call svc_xprt_delete
666 */
667 set_bit(XPT_CLOSE, &xprt->xpt_flags);
Ilpo Järvinenb1721d22008-12-14 23:19:48 -0800668defer:
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600669 return 0;
670}