blob: b7d9c55ee896b3c9c5b4a400ffd77e6c04f5ccdb [file] [log] [blame]
Chuck Leverbcf3ffd2018-05-07 15:26:55 -04001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
Tom Tuckerd5b31be2007-12-12 16:13:23 -06002/*
Chuck Leverecf85b22018-05-07 15:27:21 -04003 * Copyright (c) 2016-2018 Oracle. All rights reserved.
Steve Wise0bf48282014-05-28 15:12:01 -05004 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tuckerd5b31be2007-12-12 16:13:23 -06005 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
Chuck Levercafc7392017-06-23 17:18:33 -040045/* Operation
46 *
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
51 *
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
54 *
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
59 *
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
Chuck Leverecf85b22018-05-07 15:27:21 -040064 * transferred to an svc_rdma_recv_ctxt for the duration of the
Chuck Levercafc7392017-06-23 17:18:33 -040065 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
67 *
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
Chuck Leverecf85b22018-05-07 15:27:21 -040073 * svc_rdma_recv_ctxt.
Chuck Levercafc7392017-06-23 17:18:33 -040074 *
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
Chuck Leverecf85b22018-05-07 15:27:21 -040077 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
Chuck Levercafc7392017-06-23 17:18:33 -040079 * the length of the completed RPC Call message.
80 *
81 * Page Management
82 *
83 * Pages under I/O must be transferred from the first svc_rqst to an
Chuck Leverecf85b22018-05-07 15:27:21 -040084 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
Chuck Levercafc7392017-06-23 17:18:33 -040085 *
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
90 *
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
Chuck Leverecf85b22018-05-07 15:27:21 -040092 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
Chuck Levercafc7392017-06-23 17:18:33 -040093 * (see rdma_read_complete() below).
94 */
95
Chuck Lever98895ed2018-05-07 15:27:11 -040096#include <linux/spinlock.h>
Tom Tuckerd5b31be2007-12-12 16:13:23 -060097#include <asm/unaligned.h>
98#include <rdma/ib_verbs.h>
99#include <rdma/rdma_cm.h>
Chuck Levercafc7392017-06-23 17:18:33 -0400100
Chuck Levercafc7392017-06-23 17:18:33 -0400101#include <linux/sunrpc/xdr.h>
102#include <linux/sunrpc/debug.h>
103#include <linux/sunrpc/rpc_rdma.h>
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600104#include <linux/sunrpc/svc_rdma.h>
105
Chuck Lever98895ed2018-05-07 15:27:11 -0400106#include "xprt_rdma.h"
107#include <trace/events/rpcrdma.h>
108
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600109#define RPCDBG_FACILITY RPCDBG_SVCXPRT
110
Chuck Leverecf85b22018-05-07 15:27:21 -0400111static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
112
113static inline struct svc_rdma_recv_ctxt *
114svc_rdma_next_recv_ctxt(struct list_head *list)
115{
116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
117 rc_list);
118}
119
120/**
121 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
122 * @rdma: svcxprt_rdma being torn down
123 *
124 */
125void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
126{
127 struct svc_rdma_recv_ctxt *ctxt;
128
129 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) {
130 list_del(&ctxt->rc_list);
131 kfree(ctxt);
132 }
133}
134
135static struct svc_rdma_recv_ctxt *
136svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
137{
138 struct svc_rdma_recv_ctxt *ctxt;
139
140 spin_lock(&rdma->sc_recv_lock);
141 ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts);
142 if (!ctxt)
143 goto out_empty;
144 list_del(&ctxt->rc_list);
145 spin_unlock(&rdma->sc_recv_lock);
146
147out:
148 ctxt->rc_recv_wr.num_sge = 0;
149 ctxt->rc_page_count = 0;
150 return ctxt;
151
152out_empty:
153 spin_unlock(&rdma->sc_recv_lock);
154
155 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
156 if (!ctxt)
157 return NULL;
158 goto out;
159}
160
161static void svc_rdma_recv_ctxt_unmap(struct svcxprt_rdma *rdma,
162 struct svc_rdma_recv_ctxt *ctxt)
163{
164 struct ib_device *device = rdma->sc_cm_id->device;
165 int i;
166
167 for (i = 0; i < ctxt->rc_recv_wr.num_sge; i++)
168 ib_dma_unmap_page(device,
169 ctxt->rc_sges[i].addr,
170 ctxt->rc_sges[i].length,
171 DMA_FROM_DEVICE);
172}
173
174/**
175 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
176 * @rdma: controlling svcxprt_rdma
177 * @ctxt: object to return to the free list
178 * @free_pages: Non-zero if rc_pages should be freed
179 *
180 */
181void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
182 struct svc_rdma_recv_ctxt *ctxt,
183 int free_pages)
184{
185 unsigned int i;
186
187 if (free_pages)
188 for (i = 0; i < ctxt->rc_page_count; i++)
189 put_page(ctxt->rc_pages[i]);
190 spin_lock(&rdma->sc_recv_lock);
191 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
192 spin_unlock(&rdma->sc_recv_lock);
193}
194
195static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
196{
197 struct ib_device *device = rdma->sc_cm_id->device;
198 struct svc_rdma_recv_ctxt *ctxt;
199 struct ib_recv_wr *bad_recv_wr;
200 int sge_no, buflen, ret;
201 struct page *page;
202 dma_addr_t pa;
203
204 ctxt = svc_rdma_recv_ctxt_get(rdma);
205 if (!ctxt)
206 return -ENOMEM;
207
208 buflen = 0;
209 ctxt->rc_cqe.done = svc_rdma_wc_receive;
210 for (sge_no = 0; buflen < rdma->sc_max_req_size; sge_no++) {
211 if (sge_no >= rdma->sc_max_sge) {
212 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
213 goto err_put_ctxt;
214 }
215
216 page = alloc_page(GFP_KERNEL);
217 if (!page)
218 goto err_put_ctxt;
219 ctxt->rc_pages[sge_no] = page;
220 ctxt->rc_page_count++;
221
222 pa = ib_dma_map_page(device, ctxt->rc_pages[sge_no],
223 0, PAGE_SIZE, DMA_FROM_DEVICE);
224 if (ib_dma_mapping_error(device, pa))
225 goto err_put_ctxt;
226 ctxt->rc_sges[sge_no].addr = pa;
227 ctxt->rc_sges[sge_no].length = PAGE_SIZE;
228 ctxt->rc_sges[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
229 ctxt->rc_recv_wr.num_sge++;
230
231 buflen += PAGE_SIZE;
232 }
233 ctxt->rc_recv_wr.next = NULL;
234 ctxt->rc_recv_wr.sg_list = &ctxt->rc_sges[0];
235 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
236
237 svc_xprt_get(&rdma->sc_xprt);
238 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr);
239 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
240 if (ret)
241 goto err_post;
242 return 0;
243
244err_put_ctxt:
245 svc_rdma_recv_ctxt_unmap(rdma, ctxt);
246 svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
247 return -ENOMEM;
248err_post:
249 svc_rdma_recv_ctxt_unmap(rdma, ctxt);
250 svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
251 svc_xprt_put(&rdma->sc_xprt);
252 return ret;
253}
254
255/**
256 * svc_rdma_post_recvs - Post initial set of Recv WRs
257 * @rdma: fresh svcxprt_rdma
258 *
259 * Returns true if successful, otherwise false.
260 */
261bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
262{
263 unsigned int i;
264 int ret;
265
266 for (i = 0; i < rdma->sc_max_requests; i++) {
267 ret = svc_rdma_post_recv(rdma);
268 if (ret) {
269 pr_err("svcrdma: failure posting recv buffers: %d\n",
270 ret);
271 return false;
272 }
273 }
274 return true;
275}
276
277/**
278 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
279 * @cq: Completion Queue context
280 * @wc: Work Completion object
281 *
282 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
283 * the Receive completion handler could be running.
284 */
285static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
286{
287 struct svcxprt_rdma *rdma = cq->cq_context;
288 struct ib_cqe *cqe = wc->wr_cqe;
289 struct svc_rdma_recv_ctxt *ctxt;
290
291 trace_svcrdma_wc_receive(wc);
292
293 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
294 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
295 svc_rdma_recv_ctxt_unmap(rdma, ctxt);
296
297 if (wc->status != IB_WC_SUCCESS)
298 goto flushed;
299
300 if (svc_rdma_post_recv(rdma))
301 goto post_err;
302
303 /* All wc fields are now known to be valid */
304 ctxt->rc_byte_len = wc->byte_len;
305 spin_lock(&rdma->sc_rq_dto_lock);
306 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
307 spin_unlock(&rdma->sc_rq_dto_lock);
308 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
309 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
310 svc_xprt_enqueue(&rdma->sc_xprt);
311 goto out;
312
313flushed:
314 if (wc->status != IB_WC_WR_FLUSH_ERR)
315 pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
316 ib_wc_status_msg(wc->status),
317 wc->status, wc->vendor_err);
318post_err:
319 svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
320 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
321 svc_xprt_enqueue(&rdma->sc_xprt);
322out:
323 svc_xprt_put(&rdma->sc_xprt);
324}
325
326/**
327 * svc_rdma_flush_recv_queues - Drain pending Receive work
328 * @rdma: svcxprt_rdma being shut down
329 *
330 */
331void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
332{
333 struct svc_rdma_recv_ctxt *ctxt;
334
335 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
336 list_del(&ctxt->rc_list);
337 svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
338 }
339 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
340 list_del(&ctxt->rc_list);
341 svc_rdma_recv_ctxt_put(rdma, ctxt, 1);
342 }
343}
344
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600345/*
346 * Replace the pages in the rq_argpages array with the pages from the SGE in
347 * the RDMA_RECV completion. The SGL should contain full pages up until the
348 * last one.
349 */
Chuck Lever6f29d072018-03-20 17:05:20 -0400350static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
Chuck Leverecf85b22018-05-07 15:27:21 -0400351 struct svc_rdma_recv_ctxt *ctxt)
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600352{
353 struct page *page;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600354 int sge_no;
Chuck Lever6f29d072018-03-20 17:05:20 -0400355 u32 len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600356
Chuck Lever6f29d072018-03-20 17:05:20 -0400357 /* The reply path assumes the Call's transport header resides
358 * in rqstp->rq_pages[0].
359 */
Chuck Leverecf85b22018-05-07 15:27:21 -0400360 page = ctxt->rc_pages[0];
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600361 put_page(rqstp->rq_pages[0]);
362 rqstp->rq_pages[0] = page;
363
364 /* Set up the XDR head */
365 rqstp->rq_arg.head[0].iov_base = page_address(page);
Steve Wise0bf48282014-05-28 15:12:01 -0500366 rqstp->rq_arg.head[0].iov_len =
Chuck Leverecf85b22018-05-07 15:27:21 -0400367 min_t(size_t, ctxt->rc_byte_len, ctxt->rc_sges[0].length);
368 rqstp->rq_arg.len = ctxt->rc_byte_len;
369 rqstp->rq_arg.buflen = ctxt->rc_byte_len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600370
371 /* Compute bytes past head in the SGL */
Chuck Leverecf85b22018-05-07 15:27:21 -0400372 len = ctxt->rc_byte_len - rqstp->rq_arg.head[0].iov_len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600373
374 /* If data remains, store it in the pagelist */
Chuck Lever6f29d072018-03-20 17:05:20 -0400375 rqstp->rq_arg.page_len = len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600376 rqstp->rq_arg.page_base = 0;
Chuck Lever0b056c22015-01-13 11:03:37 -0500377
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600378 sge_no = 1;
Chuck Leverecf85b22018-05-07 15:27:21 -0400379 while (len && sge_no < ctxt->rc_recv_wr.num_sge) {
380 page = ctxt->rc_pages[sge_no];
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600381 put_page(rqstp->rq_pages[sge_no]);
382 rqstp->rq_pages[sge_no] = page;
Chuck Leverecf85b22018-05-07 15:27:21 -0400383 len -= min_t(u32, len, ctxt->rc_sges[sge_no].length);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600384 sge_no++;
385 }
386 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
Tom Tucker7e4359e2014-03-25 15:14:57 -0500387 rqstp->rq_next_page = rqstp->rq_respages + 1;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600388
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600389 /* If not all pages were used from the SGL, free the remaining ones */
Chuck Lever6f29d072018-03-20 17:05:20 -0400390 len = sge_no;
Chuck Leverecf85b22018-05-07 15:27:21 -0400391 while (sge_no < ctxt->rc_recv_wr.num_sge) {
392 page = ctxt->rc_pages[sge_no++];
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600393 put_page(page);
394 }
Chuck Leverecf85b22018-05-07 15:27:21 -0400395 ctxt->rc_page_count = len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600396
397 /* Set up tail */
398 rqstp->rq_arg.tail[0].iov_base = NULL;
399 rqstp->rq_arg.tail[0].iov_len = 0;
400}
401
Chuck Lever3c22f322017-06-23 17:17:52 -0400402/* This accommodates the largest possible Write chunk,
403 * in one segment.
404 */
405#define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
406
Chuck Levere77340e2017-06-23 17:17:44 -0400407/* This accommodates the largest possible Position-Zero
408 * Read chunk or Reply chunk, in one segment.
409 */
410#define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
Chuck Levera80a3232017-06-23 17:17:35 -0400411
Chuck Levere77340e2017-06-23 17:17:44 -0400412/* Sanity check the Read list.
413 *
414 * Implementation limits:
415 * - This implementation supports only one Read chunk.
416 *
417 * Sanity checks:
418 * - Read list does not overflow buffer.
419 * - Segment size limited by largest NFS data payload.
420 *
421 * The segment count is limited to how many segments can
422 * fit in the transport header without overflowing the
423 * buffer. That's about 40 Read segments for a 1KB inline
424 * threshold.
425 *
426 * Returns pointer to the following Write list.
427 */
428static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
429{
430 u32 position;
431 bool first;
432
433 first = true;
Chuck Levera80a3232017-06-23 17:17:35 -0400434 while (*p++ != xdr_zero) {
Chuck Levere77340e2017-06-23 17:17:44 -0400435 if (first) {
436 position = be32_to_cpup(p++);
437 first = false;
438 } else if (be32_to_cpup(p++) != position) {
Chuck Levera80a3232017-06-23 17:17:35 -0400439 return NULL;
Chuck Levere77340e2017-06-23 17:17:44 -0400440 }
441 p++; /* handle */
442 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
443 return NULL;
444 p += 2; /* offset */
445
446 if (p > end)
447 return NULL;
Chuck Levera80a3232017-06-23 17:17:35 -0400448 }
449 return p;
450}
451
Chuck Lever3c22f322017-06-23 17:17:52 -0400452/* The segment count is limited to how many segments can
453 * fit in the transport header without overflowing the
454 * buffer. That's about 60 Write segments for a 1KB inline
455 * threshold.
456 */
457static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
458 u32 maxlen)
Chuck Levera80a3232017-06-23 17:17:35 -0400459{
Chuck Lever3c22f322017-06-23 17:17:52 -0400460 u32 i, segcount;
Chuck Levera80a3232017-06-23 17:17:35 -0400461
Chuck Lever3c22f322017-06-23 17:17:52 -0400462 segcount = be32_to_cpup(p++);
463 for (i = 0; i < segcount; i++) {
464 p++; /* handle */
465 if (be32_to_cpup(p++) > maxlen)
Chuck Levera80a3232017-06-23 17:17:35 -0400466 return NULL;
Chuck Lever3c22f322017-06-23 17:17:52 -0400467 p += 2; /* offset */
468
469 if (p > end)
470 return NULL;
471 }
472
473 return p;
474}
475
476/* Sanity check the Write list.
477 *
478 * Implementation limits:
479 * - This implementation supports only one Write chunk.
480 *
481 * Sanity checks:
482 * - Write list does not overflow buffer.
483 * - Segment size limited by largest NFS data payload.
484 *
485 * Returns pointer to the following Reply chunk.
486 */
487static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
488{
489 u32 chcount;
490
491 chcount = 0;
492 while (*p++ != xdr_zero) {
493 p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
494 if (!p)
495 return NULL;
496 if (chcount++ > 1)
497 return NULL;
Chuck Levera80a3232017-06-23 17:17:35 -0400498 }
499 return p;
500}
501
Chuck Leverca5c76a2017-06-23 17:18:00 -0400502/* Sanity check the Reply chunk.
503 *
504 * Sanity checks:
505 * - Reply chunk does not overflow buffer.
506 * - Segment size limited by largest NFS data payload.
507 *
508 * Returns pointer to the following RPC header.
509 */
510static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
Chuck Levera80a3232017-06-23 17:17:35 -0400511{
Chuck Levera80a3232017-06-23 17:17:35 -0400512 if (*p++ != xdr_zero) {
Chuck Leverca5c76a2017-06-23 17:18:00 -0400513 p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
514 if (!p)
Chuck Levera80a3232017-06-23 17:17:35 -0400515 return NULL;
Chuck Levera80a3232017-06-23 17:17:35 -0400516 }
517 return p;
518}
519
520/* On entry, xdr->head[0].iov_base points to first byte in the
521 * RPC-over-RDMA header.
522 *
523 * On successful exit, head[0] points to first byte past the
524 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
525 * The length of the RPC-over-RDMA header is returned.
526 *
527 * Assumptions:
528 * - The transport header is entirely contained in the head iovec.
529 */
530static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
531{
532 __be32 *p, *end, *rdma_argp;
533 unsigned int hdr_len;
Chuck Levera80a3232017-06-23 17:17:35 -0400534
535 /* Verify that there's enough bytes for header + something */
536 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
537 goto out_short;
538
539 rdma_argp = rq_arg->head[0].iov_base;
540 if (*(rdma_argp + 1) != rpcrdma_version)
541 goto out_version;
542
543 switch (*(rdma_argp + 3)) {
544 case rdma_msg:
Chuck Levera80a3232017-06-23 17:17:35 -0400545 break;
546 case rdma_nomsg:
Chuck Levera80a3232017-06-23 17:17:35 -0400547 break;
548
549 case rdma_done:
550 goto out_drop;
551
552 case rdma_error:
553 goto out_drop;
554
555 default:
556 goto out_proc;
557 }
558
559 end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
560 p = xdr_check_read_list(rdma_argp + 4, end);
561 if (!p)
562 goto out_inval;
563 p = xdr_check_write_list(p, end);
564 if (!p)
565 goto out_inval;
566 p = xdr_check_reply_chunk(p, end);
567 if (!p)
568 goto out_inval;
569 if (p > end)
570 goto out_inval;
571
572 rq_arg->head[0].iov_base = p;
573 hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
574 rq_arg->head[0].iov_len -= hdr_len;
Chuck Lever71641d92017-06-23 17:18:41 -0400575 rq_arg->len -= hdr_len;
Chuck Lever98895ed2018-05-07 15:27:11 -0400576 trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
Chuck Levera80a3232017-06-23 17:17:35 -0400577 return hdr_len;
578
579out_short:
Chuck Lever98895ed2018-05-07 15:27:11 -0400580 trace_svcrdma_decode_short(rq_arg->len);
Chuck Levera80a3232017-06-23 17:17:35 -0400581 return -EINVAL;
582
583out_version:
Chuck Lever98895ed2018-05-07 15:27:11 -0400584 trace_svcrdma_decode_badvers(rdma_argp);
Chuck Levera80a3232017-06-23 17:17:35 -0400585 return -EPROTONOSUPPORT;
586
587out_drop:
Chuck Lever98895ed2018-05-07 15:27:11 -0400588 trace_svcrdma_decode_drop(rdma_argp);
Chuck Levera80a3232017-06-23 17:17:35 -0400589 return 0;
590
591out_proc:
Chuck Lever98895ed2018-05-07 15:27:11 -0400592 trace_svcrdma_decode_badproc(rdma_argp);
Chuck Levera80a3232017-06-23 17:17:35 -0400593 return -EINVAL;
594
595out_inval:
Chuck Lever98895ed2018-05-07 15:27:11 -0400596 trace_svcrdma_decode_parse(rdma_argp);
Chuck Levera80a3232017-06-23 17:17:35 -0400597 return -EINVAL;
598}
599
Chuck Lever84f225c2016-05-04 10:53:39 -0400600static void rdma_read_complete(struct svc_rqst *rqstp,
Chuck Leverecf85b22018-05-07 15:27:21 -0400601 struct svc_rdma_recv_ctxt *head)
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600602{
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600603 int page_no;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600604
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600605 /* Copy RPC pages */
Chuck Leverecf85b22018-05-07 15:27:21 -0400606 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600607 put_page(rqstp->rq_pages[page_no]);
Chuck Leverecf85b22018-05-07 15:27:21 -0400608 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600609 }
Chuck Lever0b056c22015-01-13 11:03:37 -0500610
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600611 /* Point rq_arg.pages past header */
Chuck Leverecf85b22018-05-07 15:27:21 -0400612 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
613 rqstp->rq_arg.page_len = head->rc_arg.page_len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600614
615 /* rq_respages starts after the last arg page */
Chuck Lever3be7f322015-10-12 10:53:39 -0400616 rqstp->rq_respages = &rqstp->rq_pages[page_no];
Tom Tucker7e4359e2014-03-25 15:14:57 -0500617 rqstp->rq_next_page = rqstp->rq_respages + 1;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600618
619 /* Rebuild rq_arg head and tail. */
Chuck Leverecf85b22018-05-07 15:27:21 -0400620 rqstp->rq_arg.head[0] = head->rc_arg.head[0];
621 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
622 rqstp->rq_arg.len = head->rc_arg.len;
623 rqstp->rq_arg.buflen = head->rc_arg.buflen;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600624}
625
Chuck Lever6b19cc52017-04-09 13:06:33 -0400626static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
627 __be32 *rdma_argp, int status)
628{
629 struct svc_rdma_op_ctxt *ctxt;
630 __be32 *p, *err_msgp;
631 unsigned int length;
632 struct page *page;
633 int ret;
634
Chuck Lever6b19cc52017-04-09 13:06:33 -0400635 page = alloc_page(GFP_KERNEL);
636 if (!page)
637 return;
638 err_msgp = page_address(page);
639
640 p = err_msgp;
641 *p++ = *rdma_argp;
642 *p++ = *(rdma_argp + 1);
643 *p++ = xprt->sc_fc_credits;
644 *p++ = rdma_error;
Chuck Lever98895ed2018-05-07 15:27:11 -0400645 switch (status) {
646 case -EPROTONOSUPPORT:
Chuck Lever6b19cc52017-04-09 13:06:33 -0400647 *p++ = err_vers;
648 *p++ = rpcrdma_version;
649 *p++ = rpcrdma_version;
Chuck Lever98895ed2018-05-07 15:27:11 -0400650 trace_svcrdma_err_vers(*rdma_argp);
651 break;
652 default:
Chuck Lever6b19cc52017-04-09 13:06:33 -0400653 *p++ = err_chunk;
Chuck Lever98895ed2018-05-07 15:27:11 -0400654 trace_svcrdma_err_chunk(*rdma_argp);
Chuck Lever6b19cc52017-04-09 13:06:33 -0400655 }
656 length = (unsigned long)p - (unsigned long)err_msgp;
657
658 /* Map transport header; no RPC message payload */
659 ctxt = svc_rdma_get_context(xprt);
660 ret = svc_rdma_map_reply_hdr(xprt, ctxt, err_msgp, length);
661 if (ret) {
662 dprintk("svcrdma: Error %d mapping send for protocol error\n",
663 ret);
664 return;
665 }
666
667 ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0);
668 if (ret) {
Chuck Lever6b19cc52017-04-09 13:06:33 -0400669 svc_rdma_unmap_dma(ctxt);
670 svc_rdma_put_context(ctxt, 1);
671 }
672}
673
Chuck Lever5d252f92016-01-07 14:50:10 -0500674/* By convention, backchannel calls arrive via rdma_msg type
675 * messages, and never populate the chunk lists. This makes
676 * the RPC/RDMA header small and fixed in size, so it is
677 * straightforward to check the RPC header's direction field.
678 */
Chuck Leverf5821c72017-04-09 13:06:49 -0400679static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
680 __be32 *rdma_resp)
Chuck Lever5d252f92016-01-07 14:50:10 -0500681{
Chuck Leverf5821c72017-04-09 13:06:49 -0400682 __be32 *p;
Chuck Lever5d252f92016-01-07 14:50:10 -0500683
684 if (!xprt->xpt_bc_xprt)
685 return false;
686
Chuck Leverf5821c72017-04-09 13:06:49 -0400687 p = rdma_resp + 3;
688 if (*p++ != rdma_msg)
Chuck Lever5d252f92016-01-07 14:50:10 -0500689 return false;
690
Chuck Leverf5821c72017-04-09 13:06:49 -0400691 if (*p++ != xdr_zero)
692 return false;
693 if (*p++ != xdr_zero)
694 return false;
695 if (*p++ != xdr_zero)
696 return false;
697
698 /* XID sanity */
699 if (*p++ != *rdma_resp)
Chuck Lever5d252f92016-01-07 14:50:10 -0500700 return false;
701 /* call direction */
Chuck Leverf5821c72017-04-09 13:06:49 -0400702 if (*p == cpu_to_be32(RPC_CALL))
Chuck Lever5d252f92016-01-07 14:50:10 -0500703 return false;
704
705 return true;
706}
707
Chuck Levercafc7392017-06-23 17:18:33 -0400708/**
709 * svc_rdma_recvfrom - Receive an RPC call
710 * @rqstp: request structure into which to receive an RPC Call
711 *
712 * Returns:
713 * The positive number of bytes in the RPC Call message,
714 * %0 if there were no Calls ready to return,
715 * %-EINVAL if the Read chunk data is too large,
716 * %-ENOMEM if rdma_rw context pool was exhausted,
717 * %-ENOTCONN if posting failed (connection is lost),
718 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
719 *
720 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
721 * when there are no remaining ctxt's to process.
722 *
723 * The next ctxt is removed from the "receive" lists.
724 *
725 * - If the ctxt completes a Read, then finish assembling the Call
726 * message and return the number of bytes in the message.
727 *
728 * - If the ctxt completes a Receive, then construct the Call
729 * message from the contents of the Receive buffer.
730 *
731 * - If there are no Read chunks in this message, then finish
732 * assembling the Call message and return the number of bytes
733 * in the message.
734 *
735 * - If there are Read chunks in this message, post Read WRs to
736 * pull that payload and return 0.
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600737 */
738int svc_rdma_recvfrom(struct svc_rqst *rqstp)
739{
740 struct svc_xprt *xprt = rqstp->rq_xprt;
741 struct svcxprt_rdma *rdma_xprt =
742 container_of(xprt, struct svcxprt_rdma, sc_xprt);
Chuck Leverecf85b22018-05-07 15:27:21 -0400743 struct svc_rdma_recv_ctxt *ctxt;
Chuck Levercafc7392017-06-23 17:18:33 -0400744 __be32 *p;
Chuck Lever2d6491a2017-06-23 17:18:08 -0400745 int ret;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600746
Chuck Lever81fa3272017-02-07 11:59:04 -0500747 spin_lock(&rdma_xprt->sc_rq_dto_lock);
Chuck Leverecf85b22018-05-07 15:27:21 -0400748 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
749 if (ctxt) {
750 list_del(&ctxt->rc_list);
Chuck Lever81fa3272017-02-07 11:59:04 -0500751 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
Chuck Lever84f225c2016-05-04 10:53:39 -0400752 rdma_read_complete(rqstp, ctxt);
753 goto complete;
Chuck Leverecf85b22018-05-07 15:27:21 -0400754 }
755 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
756 if (!ctxt) {
Chuck Lever2d6491a2017-06-23 17:18:08 -0400757 /* No new incoming requests, terminate the loop */
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600758 clear_bit(XPT_DATA, &xprt->xpt_flags);
Chuck Lever2d6491a2017-06-23 17:18:08 -0400759 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
760 return 0;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600761 }
Chuck Leverecf85b22018-05-07 15:27:21 -0400762 list_del(&ctxt->rc_list);
Chuck Lever81fa3272017-02-07 11:59:04 -0500763 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
Chuck Lever2d6491a2017-06-23 17:18:08 -0400764
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600765 atomic_inc(&rdma_stat_recv);
766
Chuck Lever6f29d072018-03-20 17:05:20 -0400767 svc_rdma_build_arg_xdr(rqstp, ctxt);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600768
Chuck Levercafc7392017-06-23 17:18:33 -0400769 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
Chuck Leverd9e40842016-05-04 10:53:47 -0400770 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
Chuck Levera6081b82016-03-01 13:06:38 -0500771 if (ret < 0)
772 goto out_err;
Chuck Levera0544c92016-03-01 13:06:56 -0500773 if (ret == 0)
774 goto out_drop;
Chuck Levera6081b82016-03-01 13:06:38 -0500775 rqstp->rq_xprt_hlen = ret;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600776
Chuck Levercafc7392017-06-23 17:18:33 -0400777 if (svc_rdma_is_backchannel_reply(xprt, p)) {
778 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
Chuck Lever5d252f92016-01-07 14:50:10 -0500779 &rqstp->rq_arg);
Chuck Leverecf85b22018-05-07 15:27:21 -0400780 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0);
Chuck Lever5d252f92016-01-07 14:50:10 -0500781 return ret;
782 }
783
Chuck Levercafc7392017-06-23 17:18:33 -0400784 p += rpcrdma_fixed_maxsz;
785 if (*p != xdr_zero)
786 goto out_readchunk;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600787
Chuck Lever84f225c2016-05-04 10:53:39 -0400788complete:
Chuck Leverecf85b22018-05-07 15:27:21 -0400789 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0);
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600790 rqstp->rq_prot = IPPROTO_MAX;
791 svc_xprt_copy_addrs(rqstp, xprt);
Chuck Lever71641d92017-06-23 17:18:41 -0400792 return rqstp->rq_arg.len;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600793
Chuck Levercafc7392017-06-23 17:18:33 -0400794out_readchunk:
795 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
796 if (ret < 0)
797 goto out_postfail;
798 return 0;
799
Chuck Levera6081b82016-03-01 13:06:38 -0500800out_err:
Chuck Levercafc7392017-06-23 17:18:33 -0400801 svc_rdma_send_error(rdma_xprt, p, ret);
Chuck Leverecf85b22018-05-07 15:27:21 -0400802 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0);
Chuck Levera6081b82016-03-01 13:06:38 -0500803 return 0;
804
Chuck Levercafc7392017-06-23 17:18:33 -0400805out_postfail:
806 if (ret == -EINVAL)
807 svc_rdma_send_error(rdma_xprt, p, ret);
Chuck Leverecf85b22018-05-07 15:27:21 -0400808 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1);
Chuck Levercafc7392017-06-23 17:18:33 -0400809 return ret;
Chuck Lever5d252f92016-01-07 14:50:10 -0500810
Chuck Levera0544c92016-03-01 13:06:56 -0500811out_drop:
Chuck Leverecf85b22018-05-07 15:27:21 -0400812 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1);
Chuck Lever48272502018-01-03 15:42:18 -0500813 return 0;
Tom Tuckerd5b31be2007-12-12 16:13:23 -0600814}