blob: 45d1824a5426d1cf8a87abea2a8a4e68f09d7cce [file] [log] [blame]
Tom Tucker377f9b22007-12-12 16:13:21 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tucker377f9b22007-12-12 16:13:21 -06003 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/svc_xprt.h>
44#include <linux/sunrpc/debug.h>
45#include <linux/sunrpc/rpc_rdma.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000046#include <linux/interrupt.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040047#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090048#include <linux/slab.h>
Tom Tucker377f9b22007-12-12 16:13:21 -060049#include <linux/spinlock.h>
Tejun Heoa25e7582010-10-15 17:49:27 +020050#include <linux/workqueue.h>
Tom Tucker377f9b22007-12-12 16:13:21 -060051#include <rdma/ib_verbs.h>
52#include <rdma/rdma_cm.h>
53#include <linux/sunrpc/svc_rdma.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040054#include <linux/export.h>
Tom Tuckercec56c82012-02-15 11:30:00 -060055#include "xprt_rdma.h"
Tom Tucker377f9b22007-12-12 16:13:21 -060056
57#define RPCDBG_FACILITY RPCDBG_SVCXPRT
58
Chuck Lever94684312015-10-24 17:28:16 -040059static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
Tom Tucker377f9b22007-12-12 16:13:21 -060060static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
Pavel Emelyanov62832c02010-09-29 16:04:18 +040061 struct net *net,
Tom Tucker377f9b22007-12-12 16:13:21 -060062 struct sockaddr *sa, int salen,
63 int flags);
64static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
65static void svc_rdma_release_rqst(struct svc_rqst *);
Tom Tucker377f9b22007-12-12 16:13:21 -060066static void dto_tasklet_func(unsigned long data);
67static void svc_rdma_detach(struct svc_xprt *xprt);
68static void svc_rdma_free(struct svc_xprt *xprt);
69static int svc_rdma_has_wspace(struct svc_xprt *xprt);
Chuck Lever16e4d932014-05-19 13:40:22 -040070static int svc_rdma_secure_port(struct svc_rqst *);
Tom Tucker377f9b22007-12-12 16:13:21 -060071static void rq_cq_reap(struct svcxprt_rdma *xprt);
72static void sq_cq_reap(struct svcxprt_rdma *xprt);
73
Roel Kluin5eaa65b2008-12-10 15:18:31 -080074static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
Tom Tucker377f9b22007-12-12 16:13:21 -060075static DEFINE_SPINLOCK(dto_lock);
76static LIST_HEAD(dto_xprt_q);
77
78static struct svc_xprt_ops svc_rdma_ops = {
79 .xpo_create = svc_rdma_create,
80 .xpo_recvfrom = svc_rdma_recvfrom,
81 .xpo_sendto = svc_rdma_sendto,
82 .xpo_release_rqst = svc_rdma_release_rqst,
83 .xpo_detach = svc_rdma_detach,
84 .xpo_free = svc_rdma_free,
85 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
86 .xpo_has_wspace = svc_rdma_has_wspace,
87 .xpo_accept = svc_rdma_accept,
Chuck Lever16e4d932014-05-19 13:40:22 -040088 .xpo_secure_port = svc_rdma_secure_port,
Tom Tucker377f9b22007-12-12 16:13:21 -060089};
90
91struct svc_xprt_class svc_rdma_class = {
92 .xcl_name = "rdma",
93 .xcl_owner = THIS_MODULE,
94 .xcl_ops = &svc_rdma_ops,
Chuck Levercc9a9032015-08-07 16:55:46 -040095 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
Chuck Lever3c45ddf2014-07-16 15:38:32 -040096 .xcl_ident = XPRT_TRANSPORT_RDMA,
Tom Tucker377f9b22007-12-12 16:13:21 -060097};
98
Chuck Lever94684312015-10-24 17:28:16 -040099#if defined(CONFIG_SUNRPC_BACKCHANNEL)
100static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *,
101 struct sockaddr *, int, int);
102static void svc_rdma_bc_detach(struct svc_xprt *);
103static void svc_rdma_bc_free(struct svc_xprt *);
104
105static struct svc_xprt_ops svc_rdma_bc_ops = {
106 .xpo_create = svc_rdma_bc_create,
107 .xpo_detach = svc_rdma_bc_detach,
108 .xpo_free = svc_rdma_bc_free,
109 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
110 .xpo_secure_port = svc_rdma_secure_port,
111};
112
113struct svc_xprt_class svc_rdma_bc_class = {
114 .xcl_name = "rdma-bc",
115 .xcl_owner = THIS_MODULE,
116 .xcl_ops = &svc_rdma_bc_ops,
117 .xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN)
118};
119
120static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
121 struct net *net,
122 struct sockaddr *sa, int salen,
123 int flags)
124{
125 struct svcxprt_rdma *cma_xprt;
126 struct svc_xprt *xprt;
127
128 cma_xprt = rdma_create_xprt(serv, 0);
129 if (!cma_xprt)
130 return ERR_PTR(-ENOMEM);
131 xprt = &cma_xprt->sc_xprt;
132
133 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
134 serv->sv_bc_xprt = xprt;
135
136 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
137 return xprt;
138}
139
140static void svc_rdma_bc_detach(struct svc_xprt *xprt)
141{
142 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
143}
144
145static void svc_rdma_bc_free(struct svc_xprt *xprt)
146{
147 struct svcxprt_rdma *rdma =
148 container_of(xprt, struct svcxprt_rdma, sc_xprt);
149
150 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
151 if (xprt)
152 kfree(rdma);
153}
154#endif /* CONFIG_SUNRPC_BACKCHANNEL */
155
Tom Tucker377f9b22007-12-12 16:13:21 -0600156struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
157{
158 struct svc_rdma_op_ctxt *ctxt;
159
Chuck Leverb7e0b9a2015-06-04 11:21:20 -0400160 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
161 GFP_KERNEL | __GFP_NOFAIL);
Tom Tucker89488962008-05-28 15:14:02 -0500162 ctxt->xprt = xprt;
163 INIT_LIST_HEAD(&ctxt->dto_q);
164 ctxt->count = 0;
Tom Tucker64be86082008-10-06 14:45:18 -0500165 ctxt->frmr = NULL;
Tom Tucker89488962008-05-28 15:14:02 -0500166 atomic_inc(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600167 return ctxt;
168}
169
Tom Tucker146b6df2008-08-12 15:12:10 -0500170void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
Tom Tuckere6ab9142008-05-28 12:08:48 -0500171{
172 struct svcxprt_rdma *xprt = ctxt->xprt;
173 int i;
174 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
Tom Tucker64be86082008-10-06 14:45:18 -0500175 /*
176 * Unmap the DMA addr in the SGE if the lkey matches
177 * the sc_dma_lkey, otherwise, ignore it since it is
178 * an FRMR lkey and will be unmapped later when the
179 * last WR that uses it completes.
180 */
181 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
182 atomic_dec(&xprt->sc_dma_used);
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500183 ib_dma_unmap_page(xprt->sc_cm_id->device,
Tom Tucker64be86082008-10-06 14:45:18 -0500184 ctxt->sge[i].addr,
185 ctxt->sge[i].length,
186 ctxt->direction);
187 }
Tom Tuckere6ab9142008-05-28 12:08:48 -0500188 }
189}
190
Tom Tucker377f9b22007-12-12 16:13:21 -0600191void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
192{
193 struct svcxprt_rdma *xprt;
194 int i;
195
Tom Tucker377f9b22007-12-12 16:13:21 -0600196 xprt = ctxt->xprt;
197 if (free_pages)
198 for (i = 0; i < ctxt->count; i++)
199 put_page(ctxt->pages[i]);
200
Tom Tucker89488962008-05-28 15:14:02 -0500201 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
Tom Tucker87407672008-04-30 20:44:39 -0500202 atomic_dec(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600203}
204
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500205/*
206 * Temporary NFS req mappings are shared across all transport
207 * instances. These are short lived and should be bounded by the number
208 * of concurrent server threads * depth of the SQ.
209 */
210struct svc_rdma_req_map *svc_rdma_get_req_map(void)
211{
212 struct svc_rdma_req_map *map;
Chuck Leverb7e0b9a2015-06-04 11:21:20 -0400213 map = kmem_cache_alloc(svc_rdma_map_cachep,
214 GFP_KERNEL | __GFP_NOFAIL);
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500215 map->count = 0;
216 return map;
217}
218
219void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
220{
221 kmem_cache_free(svc_rdma_map_cachep, map);
222}
223
Tom Tucker377f9b22007-12-12 16:13:21 -0600224/* ib_cq event handler */
225static void cq_event_handler(struct ib_event *event, void *context)
226{
227 struct svc_xprt *xprt = context;
Sagi Grimberg76357c72015-05-18 13:40:32 +0300228 dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
229 ib_event_msg(event->event), event->event, context);
Tom Tucker377f9b22007-12-12 16:13:21 -0600230 set_bit(XPT_CLOSE, &xprt->xpt_flags);
231}
232
233/* QP event handler */
234static void qp_event_handler(struct ib_event *event, void *context)
235{
236 struct svc_xprt *xprt = context;
237
238 switch (event->event) {
239 /* These are considered benign events */
240 case IB_EVENT_PATH_MIG:
241 case IB_EVENT_COMM_EST:
242 case IB_EVENT_SQ_DRAINED:
243 case IB_EVENT_QP_LAST_WQE_REACHED:
Sagi Grimberg76357c72015-05-18 13:40:32 +0300244 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
245 ib_event_msg(event->event), event->event,
246 event->element.qp);
Tom Tucker377f9b22007-12-12 16:13:21 -0600247 break;
248 /* These are considered fatal events */
249 case IB_EVENT_PATH_MIG_ERR:
250 case IB_EVENT_QP_FATAL:
251 case IB_EVENT_QP_REQ_ERR:
252 case IB_EVENT_QP_ACCESS_ERR:
253 case IB_EVENT_DEVICE_FATAL:
254 default:
Sagi Grimberg76357c72015-05-18 13:40:32 +0300255 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
Tom Tucker377f9b22007-12-12 16:13:21 -0600256 "closing transport\n",
Sagi Grimberg76357c72015-05-18 13:40:32 +0300257 ib_event_msg(event->event), event->event,
258 event->element.qp);
Tom Tucker377f9b22007-12-12 16:13:21 -0600259 set_bit(XPT_CLOSE, &xprt->xpt_flags);
260 break;
261 }
262}
263
264/*
265 * Data Transfer Operation Tasklet
266 *
267 * Walks a list of transports with I/O pending, removing entries as
268 * they are added to the server's I/O pending list. Two bits indicate
269 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
270 * spinlock that serializes access to the transport list with the RQ
271 * and SQ interrupt handlers.
272 */
273static void dto_tasklet_func(unsigned long data)
274{
275 struct svcxprt_rdma *xprt;
276 unsigned long flags;
277
278 spin_lock_irqsave(&dto_lock, flags);
279 while (!list_empty(&dto_xprt_q)) {
280 xprt = list_entry(dto_xprt_q.next,
281 struct svcxprt_rdma, sc_dto_q);
282 list_del_init(&xprt->sc_dto_q);
283 spin_unlock_irqrestore(&dto_lock, flags);
284
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500285 rq_cq_reap(xprt);
286 sq_cq_reap(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600287
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400288 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600289 spin_lock_irqsave(&dto_lock, flags);
290 }
291 spin_unlock_irqrestore(&dto_lock, flags);
292}
293
294/*
295 * Receive Queue Completion Handler
296 *
297 * Since an RQ completion handler is called on interrupt context, we
298 * need to defer the handling of the I/O to a tasklet
299 */
300static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
301{
302 struct svcxprt_rdma *xprt = cq_context;
303 unsigned long flags;
304
Tom Tucker17113862008-05-01 11:13:50 -0500305 /* Guard against unconditional flush call for destroyed QP */
306 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
307 return;
308
Tom Tucker377f9b22007-12-12 16:13:21 -0600309 /*
310 * Set the bit regardless of whether or not it's on the list
311 * because it may be on the list already due to an SQ
312 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500313 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600314 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
315
316 /*
317 * If this transport is not already on the DTO transport queue,
318 * add it
319 */
320 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400321 if (list_empty(&xprt->sc_dto_q)) {
322 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600323 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400324 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600325 spin_unlock_irqrestore(&dto_lock, flags);
326
327 /* Tasklet does all the work to avoid irqsave locks. */
328 tasklet_schedule(&dto_tasklet);
329}
330
331/*
332 * rq_cq_reap - Process the RQ CQ.
333 *
334 * Take all completing WC off the CQE and enqueue the associated DTO
335 * context on the dto_q for the transport.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500336 *
337 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600338 */
339static void rq_cq_reap(struct svcxprt_rdma *xprt)
340{
341 int ret;
342 struct ib_wc wc;
343 struct svc_rdma_op_ctxt *ctxt = NULL;
344
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500345 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
346 return;
347
348 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600349 atomic_inc(&rdma_stat_rq_poll);
350
Tom Tucker377f9b22007-12-12 16:13:21 -0600351 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
352 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
353 ctxt->wc_status = wc.status;
354 ctxt->byte_len = wc.byte_len;
Tom Tuckere6ab9142008-05-28 12:08:48 -0500355 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600356 if (wc.status != IB_WC_SUCCESS) {
357 /* Close the transport */
Tom Tucker0905c0f2008-05-01 10:49:03 -0500358 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600359 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
360 svc_rdma_put_context(ctxt, 1);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500361 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600362 continue;
363 }
Tom Tucker47698e02008-05-06 11:49:05 -0500364 spin_lock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600365 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
Tom Tucker47698e02008-05-06 11:49:05 -0500366 spin_unlock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500367 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600368 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600369
370 if (ctxt)
371 atomic_inc(&rdma_stat_rq_prod);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500372
373 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
374 /*
375 * If data arrived before established event,
376 * don't enqueue. This defers RPC I/O until the
377 * RDMA connection is complete.
378 */
379 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
380 svc_xprt_enqueue(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600381}
382
383/*
Justin P. Mattock70f23fd2011-05-10 10:16:21 +0200384 * Process a completion context
Tom Tuckere1183212008-10-03 15:22:18 -0500385 */
386static void process_context(struct svcxprt_rdma *xprt,
387 struct svc_rdma_op_ctxt *ctxt)
388{
389 svc_rdma_unmap_dma(ctxt);
390
391 switch (ctxt->wr_op) {
392 case IB_WR_SEND:
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500393 if (ctxt->frmr)
394 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
Tom Tuckere1183212008-10-03 15:22:18 -0500395 svc_rdma_put_context(ctxt, 1);
396 break;
397
398 case IB_WR_RDMA_WRITE:
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500399 if (ctxt->frmr)
400 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
Tom Tuckere1183212008-10-03 15:22:18 -0500401 svc_rdma_put_context(ctxt, 0);
402 break;
403
404 case IB_WR_RDMA_READ:
Tom Tucker146b6df2008-08-12 15:12:10 -0500405 case IB_WR_RDMA_READ_WITH_INV:
Steve Wise0bf48282014-05-28 15:12:01 -0500406 svc_rdma_put_frmr(xprt, ctxt->frmr);
Tom Tuckere1183212008-10-03 15:22:18 -0500407 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
408 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500409 if (read_hdr) {
410 spin_lock_bh(&xprt->sc_rq_dto_lock);
411 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
412 list_add_tail(&read_hdr->dto_q,
413 &xprt->sc_read_complete_q);
414 spin_unlock_bh(&xprt->sc_rq_dto_lock);
415 } else {
416 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
417 }
Tom Tuckere1183212008-10-03 15:22:18 -0500418 svc_xprt_enqueue(&xprt->sc_xprt);
419 }
420 svc_rdma_put_context(ctxt, 0);
421 break;
422
423 default:
424 printk(KERN_ERR "svcrdma: unexpected completion type, "
425 "opcode=%d\n",
426 ctxt->wr_op);
427 break;
428 }
429}
430
431/*
Tom Tucker377f9b22007-12-12 16:13:21 -0600432 * Send Queue Completion Handler - potentially called on interrupt context.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500433 *
434 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600435 */
436static void sq_cq_reap(struct svcxprt_rdma *xprt)
437{
438 struct svc_rdma_op_ctxt *ctxt = NULL;
Steve Wise0bf48282014-05-28 15:12:01 -0500439 struct ib_wc wc_a[6];
440 struct ib_wc *wc;
Tom Tucker377f9b22007-12-12 16:13:21 -0600441 struct ib_cq *cq = xprt->sc_sq_cq;
442 int ret;
443
Steve Wise0bf48282014-05-28 15:12:01 -0500444 memset(wc_a, 0, sizeof(wc_a));
445
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500446 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
447 return;
448
449 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600450 atomic_inc(&rdma_stat_sq_poll);
Steve Wise0bf48282014-05-28 15:12:01 -0500451 while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
452 int i;
Tom Tucker377f9b22007-12-12 16:13:21 -0600453
Steve Wise0bf48282014-05-28 15:12:01 -0500454 for (i = 0; i < ret; i++) {
455 wc = &wc_a[i];
456 if (wc->status != IB_WC_SUCCESS) {
Sagi Grimberg76357c72015-05-18 13:40:32 +0300457 dprintk("svcrdma: sq wc err status %s (%d)\n",
458 ib_wc_status_msg(wc->status),
Steve Wise0bf48282014-05-28 15:12:01 -0500459 wc->status);
Tom Tucker377f9b22007-12-12 16:13:21 -0600460
Steve Wise0bf48282014-05-28 15:12:01 -0500461 /* Close the transport */
462 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
463 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600464
Steve Wise0bf48282014-05-28 15:12:01 -0500465 /* Decrement used SQ WR count */
466 atomic_dec(&xprt->sc_sq_count);
467 wake_up(&xprt->sc_send_wait);
468
469 ctxt = (struct svc_rdma_op_ctxt *)
470 (unsigned long)wc->wr_id;
471 if (ctxt)
472 process_context(xprt, ctxt);
473
474 svc_xprt_put(&xprt->sc_xprt);
475 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600476 }
477
478 if (ctxt)
479 atomic_inc(&rdma_stat_sq_prod);
480}
481
482static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
483{
484 struct svcxprt_rdma *xprt = cq_context;
485 unsigned long flags;
486
Tom Tucker17113862008-05-01 11:13:50 -0500487 /* Guard against unconditional flush call for destroyed QP */
488 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
489 return;
490
Tom Tucker377f9b22007-12-12 16:13:21 -0600491 /*
492 * Set the bit regardless of whether or not it's on the list
493 * because it may be on the list already due to an RQ
494 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500495 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600496 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
497
498 /*
499 * If this transport is not already on the DTO transport queue,
500 * add it
501 */
502 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400503 if (list_empty(&xprt->sc_dto_q)) {
504 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600505 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400506 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600507 spin_unlock_irqrestore(&dto_lock, flags);
508
509 /* Tasklet does all the work to avoid irqsave locks. */
510 tasklet_schedule(&dto_tasklet);
511}
512
Tom Tucker377f9b22007-12-12 16:13:21 -0600513static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
514 int listener)
515{
516 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
517
518 if (!cma_xprt)
519 return NULL;
Stanislav Kinsburskybd4620d2011-12-06 14:19:10 +0300520 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
Tom Tucker377f9b22007-12-12 16:13:21 -0600521 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
522 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
523 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
524 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
Tom Tucker64be86082008-10-06 14:45:18 -0500525 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
Tom Tucker377f9b22007-12-12 16:13:21 -0600526 init_waitqueue_head(&cma_xprt->sc_send_wait);
527
528 spin_lock_init(&cma_xprt->sc_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600529 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
Tom Tucker64be86082008-10-06 14:45:18 -0500530 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600531
Tom Tucker89488962008-05-28 15:14:02 -0500532 if (listener)
Tom Tucker377f9b22007-12-12 16:13:21 -0600533 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
534
535 return cma_xprt;
536}
537
Tom Tucker377f9b22007-12-12 16:13:21 -0600538int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
539{
540 struct ib_recv_wr recv_wr, *bad_recv_wr;
541 struct svc_rdma_op_ctxt *ctxt;
542 struct page *page;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500543 dma_addr_t pa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600544 int sge_no;
545 int buflen;
546 int ret;
547
548 ctxt = svc_rdma_get_context(xprt);
549 buflen = 0;
550 ctxt->direction = DMA_FROM_DEVICE;
551 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500552 if (sge_no >= xprt->sc_max_sge) {
553 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
554 goto err_put_ctxt;
555 }
Chuck Leverb7e0b9a2015-06-04 11:21:20 -0400556 page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
Tom Tucker377f9b22007-12-12 16:13:21 -0600557 ctxt->pages[sge_no] = page;
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500558 pa = ib_dma_map_page(xprt->sc_cm_id->device,
559 page, 0, PAGE_SIZE,
Tom Tucker377f9b22007-12-12 16:13:21 -0600560 DMA_FROM_DEVICE);
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500561 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
562 goto err_put_ctxt;
563 atomic_inc(&xprt->sc_dma_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600564 ctxt->sge[sge_no].addr = pa;
565 ctxt->sge[sge_no].length = PAGE_SIZE;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500566 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
Tom Tucker4a843862010-10-12 15:33:57 -0500567 ctxt->count = sge_no + 1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600568 buflen += PAGE_SIZE;
569 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600570 recv_wr.next = NULL;
571 recv_wr.sg_list = &ctxt->sge[0];
572 recv_wr.num_sge = ctxt->count;
573 recv_wr.wr_id = (u64)(unsigned long)ctxt;
574
Tom Tucker0905c0f2008-05-01 10:49:03 -0500575 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600576 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500577 if (ret) {
Steve Wise21515e42009-04-29 14:14:00 -0500578 svc_rdma_unmap_dma(ctxt);
Tom Tucker05a08262008-04-25 14:11:31 -0500579 svc_rdma_put_context(ctxt, 1);
Steve Wise21515e42009-04-29 14:14:00 -0500580 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500581 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600582 return ret;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500583
584 err_put_ctxt:
Tom Tucker4a843862010-10-12 15:33:57 -0500585 svc_rdma_unmap_dma(ctxt);
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500586 svc_rdma_put_context(ctxt, 1);
587 return -ENOMEM;
Tom Tucker377f9b22007-12-12 16:13:21 -0600588}
589
590/*
591 * This function handles the CONNECT_REQUEST event on a listening
592 * endpoint. It is passed the cma_id for the _new_ connection. The context in
593 * this cma_id is inherited from the listening cma_id and is the svc_xprt
594 * structure for the listening endpoint.
595 *
596 * This function creates a new xprt for the new connection and enqueues it on
597 * the accept queue for the listent xprt. When the listen thread is kicked, it
598 * will call the recvfrom method on the listen xprt which will accept the new
599 * connection.
600 */
Tom Tucker36ef25e2008-05-19 19:00:24 -0500601static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
Tom Tucker377f9b22007-12-12 16:13:21 -0600602{
603 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
604 struct svcxprt_rdma *newxprt;
Tom Tuckeraf261af2008-05-07 13:52:42 -0500605 struct sockaddr *sa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600606
607 /* Create a new transport */
608 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
609 if (!newxprt) {
610 dprintk("svcrdma: failed to create new transport\n");
611 return;
612 }
613 newxprt->sc_cm_id = new_cma_id;
614 new_cma_id->context = newxprt;
615 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
616 newxprt, newxprt->sc_cm_id, listen_xprt);
617
Tom Tucker36ef25e2008-05-19 19:00:24 -0500618 /* Save client advertised inbound read limit for use later in accept. */
619 newxprt->sc_ord = client_ird;
620
Tom Tuckeraf261af2008-05-07 13:52:42 -0500621 /* Set the local and remote addresses in the transport */
622 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
623 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
624 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
625 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
626
Tom Tucker377f9b22007-12-12 16:13:21 -0600627 /*
628 * Enqueue the new transport on the accept queue of the listening
629 * transport
630 */
631 spin_lock_bh(&listen_xprt->sc_lock);
632 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
633 spin_unlock_bh(&listen_xprt->sc_lock);
634
Tom Tucker377f9b22007-12-12 16:13:21 -0600635 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
636 svc_xprt_enqueue(&listen_xprt->sc_xprt);
637}
638
639/*
640 * Handles events generated on the listening endpoint. These events will be
641 * either be incoming connect requests or adapter removal events.
642 */
643static int rdma_listen_handler(struct rdma_cm_id *cma_id,
644 struct rdma_cm_event *event)
645{
646 struct svcxprt_rdma *xprt = cma_id->context;
647 int ret = 0;
648
649 switch (event->event) {
650 case RDMA_CM_EVENT_CONNECT_REQUEST:
651 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
Sagi Grimberg76357c72015-05-18 13:40:32 +0300652 "event = %s (%d)\n", cma_id, cma_id->context,
653 rdma_event_msg(event->event), event->event);
Tom Tucker36ef25e2008-05-19 19:00:24 -0500654 handle_connect_req(cma_id,
Tom Tucker67080c82008-10-03 12:41:14 -0500655 event->param.conn.initiator_depth);
Tom Tucker377f9b22007-12-12 16:13:21 -0600656 break;
657
658 case RDMA_CM_EVENT_ESTABLISHED:
659 /* Accept complete */
660 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
661 "cm_id=%p\n", xprt, cma_id);
662 break;
663
664 case RDMA_CM_EVENT_DEVICE_REMOVAL:
665 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
666 xprt, cma_id);
667 if (xprt)
668 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
669 break;
670
671 default:
672 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
Sagi Grimberg76357c72015-05-18 13:40:32 +0300673 "event = %s (%d)\n", cma_id,
674 rdma_event_msg(event->event), event->event);
Tom Tucker377f9b22007-12-12 16:13:21 -0600675 break;
676 }
677
678 return ret;
679}
680
681static int rdma_cma_handler(struct rdma_cm_id *cma_id,
682 struct rdma_cm_event *event)
683{
684 struct svc_xprt *xprt = cma_id->context;
685 struct svcxprt_rdma *rdma =
686 container_of(xprt, struct svcxprt_rdma, sc_xprt);
687 switch (event->event) {
688 case RDMA_CM_EVENT_ESTABLISHED:
689 /* Accept complete */
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400690 svc_xprt_get(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600691 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
692 "cm_id=%p\n", xprt, cma_id);
693 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
694 svc_xprt_enqueue(xprt);
695 break;
696 case RDMA_CM_EVENT_DISCONNECTED:
697 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
698 xprt, cma_id);
699 if (xprt) {
700 set_bit(XPT_CLOSE, &xprt->xpt_flags);
701 svc_xprt_enqueue(xprt);
Tom Tucker120693d2008-04-24 14:17:21 -0500702 svc_xprt_put(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600703 }
704 break;
705 case RDMA_CM_EVENT_DEVICE_REMOVAL:
706 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
Sagi Grimberg76357c72015-05-18 13:40:32 +0300707 "event = %s (%d)\n", cma_id, xprt,
708 rdma_event_msg(event->event), event->event);
Tom Tucker377f9b22007-12-12 16:13:21 -0600709 if (xprt) {
710 set_bit(XPT_CLOSE, &xprt->xpt_flags);
711 svc_xprt_enqueue(xprt);
Shirley Maff79c742015-07-09 16:45:08 -0400712 svc_xprt_put(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600713 }
714 break;
715 default:
716 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
Sagi Grimberg76357c72015-05-18 13:40:32 +0300717 "event = %s (%d)\n", cma_id,
718 rdma_event_msg(event->event), event->event);
Tom Tucker377f9b22007-12-12 16:13:21 -0600719 break;
720 }
721 return 0;
722}
723
724/*
725 * Create a listening RDMA service endpoint.
726 */
727static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
Pavel Emelyanov62832c02010-09-29 16:04:18 +0400728 struct net *net,
Tom Tucker377f9b22007-12-12 16:13:21 -0600729 struct sockaddr *sa, int salen,
730 int flags)
731{
732 struct rdma_cm_id *listen_id;
733 struct svcxprt_rdma *cma_xprt;
Tom Tucker377f9b22007-12-12 16:13:21 -0600734 int ret;
735
736 dprintk("svcrdma: Creating RDMA socket\n");
Tom Tuckerbade7322010-04-03 08:27:29 -0500737 if (sa->sa_family != AF_INET) {
738 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
739 return ERR_PTR(-EAFNOSUPPORT);
740 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600741 cma_xprt = rdma_create_xprt(serv, 1);
742 if (!cma_xprt)
Tom Tucker58e8f622008-05-06 09:45:54 -0500743 return ERR_PTR(-ENOMEM);
Tom Tucker377f9b22007-12-12 16:13:21 -0600744
Guy Shapirofa201052015-10-22 15:20:10 +0300745 listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt,
746 RDMA_PS_TCP, IB_QPT_RC);
Tom Tucker377f9b22007-12-12 16:13:21 -0600747 if (IS_ERR(listen_id)) {
Tom Tucker58e8f622008-05-06 09:45:54 -0500748 ret = PTR_ERR(listen_id);
749 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
750 goto err0;
Tom Tucker377f9b22007-12-12 16:13:21 -0600751 }
Tom Tucker58e8f622008-05-06 09:45:54 -0500752
Tom Tucker377f9b22007-12-12 16:13:21 -0600753 ret = rdma_bind_addr(listen_id, sa);
754 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600755 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500756 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600757 }
758 cma_xprt->sc_cm_id = listen_id;
759
760 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
761 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600762 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500763 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600764 }
765
766 /*
767 * We need to use the address from the cm_id in case the
768 * caller specified 0 for the port number.
769 */
770 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
771 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
772
773 return &cma_xprt->sc_xprt;
Tom Tucker58e8f622008-05-06 09:45:54 -0500774
775 err1:
776 rdma_destroy_id(listen_id);
777 err0:
778 kfree(cma_xprt);
779 return ERR_PTR(ret);
Tom Tucker377f9b22007-12-12 16:13:21 -0600780}
781
Tom Tucker64be86082008-10-06 14:45:18 -0500782static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
783{
784 struct ib_mr *mr;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300785 struct scatterlist *sg;
Tom Tucker64be86082008-10-06 14:45:18 -0500786 struct svc_rdma_fastreg_mr *frmr;
Steve Wise9ac07502015-08-07 11:11:20 -0500787 u32 num_sg;
Tom Tucker64be86082008-10-06 14:45:18 -0500788
789 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
790 if (!frmr)
791 goto err;
792
Steve Wise9ac07502015-08-07 11:11:20 -0500793 num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
794 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
Wei Yongjun846d8e72009-06-25 16:35:44 +0800795 if (IS_ERR(mr))
Tom Tucker64be86082008-10-06 14:45:18 -0500796 goto err_free_frmr;
797
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300798 sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL);
799 if (!sg)
Tom Tucker64be86082008-10-06 14:45:18 -0500800 goto err_free_mr;
801
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300802 sg_init_table(sg, RPCSVC_MAXPAGES);
803
Tom Tucker64be86082008-10-06 14:45:18 -0500804 frmr->mr = mr;
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300805 frmr->sg = sg;
Tom Tucker64be86082008-10-06 14:45:18 -0500806 INIT_LIST_HEAD(&frmr->frmr_list);
807 return frmr;
808
809 err_free_mr:
810 ib_dereg_mr(mr);
811 err_free_frmr:
812 kfree(frmr);
813 err:
814 return ERR_PTR(-ENOMEM);
815}
816
817static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
818{
819 struct svc_rdma_fastreg_mr *frmr;
820
821 while (!list_empty(&xprt->sc_frmr_q)) {
822 frmr = list_entry(xprt->sc_frmr_q.next,
823 struct svc_rdma_fastreg_mr, frmr_list);
824 list_del_init(&frmr->frmr_list);
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300825 kfree(frmr->sg);
Tom Tucker64be86082008-10-06 14:45:18 -0500826 ib_dereg_mr(frmr->mr);
Tom Tucker64be86082008-10-06 14:45:18 -0500827 kfree(frmr);
828 }
829}
830
831struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
832{
833 struct svc_rdma_fastreg_mr *frmr = NULL;
834
835 spin_lock_bh(&rdma->sc_frmr_q_lock);
836 if (!list_empty(&rdma->sc_frmr_q)) {
837 frmr = list_entry(rdma->sc_frmr_q.next,
838 struct svc_rdma_fastreg_mr, frmr_list);
839 list_del_init(&frmr->frmr_list);
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300840 frmr->sg_nents = 0;
Tom Tucker64be86082008-10-06 14:45:18 -0500841 }
842 spin_unlock_bh(&rdma->sc_frmr_q_lock);
843 if (frmr)
844 return frmr;
845
846 return rdma_alloc_frmr(rdma);
847}
848
Tom Tucker64be86082008-10-06 14:45:18 -0500849void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
850 struct svc_rdma_fastreg_mr *frmr)
851{
852 if (frmr) {
Sagi Grimberg412a15c2015-10-13 19:11:36 +0300853 ib_dma_unmap_sg(rdma->sc_cm_id->device,
854 frmr->sg, frmr->sg_nents, frmr->direction);
855 atomic_dec(&rdma->sc_dma_used);
Tom Tucker64be86082008-10-06 14:45:18 -0500856 spin_lock_bh(&rdma->sc_frmr_q_lock);
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500857 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
Tom Tucker64be86082008-10-06 14:45:18 -0500858 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
859 spin_unlock_bh(&rdma->sc_frmr_q_lock);
860 }
861}
862
Tom Tucker377f9b22007-12-12 16:13:21 -0600863/*
864 * This is the xpo_recvfrom function for listening endpoints. Its
865 * purpose is to accept incoming connections. The CMA callback handler
866 * has already created a new transport and attached it to the new CMA
867 * ID.
868 *
869 * There is a queue of pending connections hung on the listening
870 * transport. This queue contains the new svc_xprt structure. This
871 * function takes svc_xprt structures off the accept_q and completes
872 * the connection.
873 */
874static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
875{
876 struct svcxprt_rdma *listen_rdma;
877 struct svcxprt_rdma *newxprt = NULL;
878 struct rdma_conn_param conn_param;
Matan Barak8e372102015-06-11 16:35:21 +0300879 struct ib_cq_init_attr cq_attr = {};
Tom Tucker377f9b22007-12-12 16:13:21 -0600880 struct ib_qp_init_attr qp_attr;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200881 struct ib_device *dev;
Ingo Molnared72b9c2008-11-25 16:49:37 -0800882 int uninitialized_var(dma_mr_acc);
Michael Wang3de2c312015-05-05 14:50:25 +0200883 int need_dma_mr = 0;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200884 int ret = 0;
Tom Tucker377f9b22007-12-12 16:13:21 -0600885 int i;
886
887 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
888 clear_bit(XPT_CONN, &xprt->xpt_flags);
889 /* Get the next entry off the accept list */
890 spin_lock_bh(&listen_rdma->sc_lock);
891 if (!list_empty(&listen_rdma->sc_accept_q)) {
892 newxprt = list_entry(listen_rdma->sc_accept_q.next,
893 struct svcxprt_rdma, sc_accept_q);
894 list_del_init(&newxprt->sc_accept_q);
895 }
896 if (!list_empty(&listen_rdma->sc_accept_q))
897 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
898 spin_unlock_bh(&listen_rdma->sc_lock);
899 if (!newxprt)
900 return NULL;
901
902 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
903 newxprt, newxprt->sc_cm_id);
904
Or Gerlitze3e45b12015-12-18 10:59:48 +0200905 dev = newxprt->sc_cm_id->device;
Tom Tucker377f9b22007-12-12 16:13:21 -0600906
907 /* Qualify the transport resource defaults with the
908 * capabilities of this particular device */
Or Gerlitze3e45b12015-12-18 10:59:48 +0200909 newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
Tom Tucker377f9b22007-12-12 16:13:21 -0600910 (size_t)RPCSVC_MAXPAGES);
Or Gerlitze3e45b12015-12-18 10:59:48 +0200911 newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
Steve Wisebc3fe2e2015-07-27 18:10:12 -0500912 RPCSVC_MAXPAGES);
Chuck Lever3d616772016-01-07 14:48:55 -0500913 newxprt->sc_max_req_size = svcrdma_max_req_size;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200914 newxprt->sc_max_requests = min((size_t)dev->attrs.max_qp_wr,
Tom Tucker377f9b22007-12-12 16:13:21 -0600915 (size_t)svcrdma_max_requests);
916 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
917
Tom Tucker36ef25e2008-05-19 19:00:24 -0500918 /*
919 * Limit ORD based on client limit, local device limit, and
920 * configured svcrdma limit.
921 */
Or Gerlitze3e45b12015-12-18 10:59:48 +0200922 newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord);
Tom Tucker36ef25e2008-05-19 19:00:24 -0500923 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
Tom Tucker377f9b22007-12-12 16:13:21 -0600924
Or Gerlitze3e45b12015-12-18 10:59:48 +0200925 newxprt->sc_pd = ib_alloc_pd(dev);
Tom Tucker377f9b22007-12-12 16:13:21 -0600926 if (IS_ERR(newxprt->sc_pd)) {
927 dprintk("svcrdma: error creating PD for connect request\n");
928 goto errout;
929 }
Matan Barak8e372102015-06-11 16:35:21 +0300930 cq_attr.cqe = newxprt->sc_sq_depth;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200931 newxprt->sc_sq_cq = ib_create_cq(dev,
Tom Tucker377f9b22007-12-12 16:13:21 -0600932 sq_comp_handler,
933 cq_event_handler,
934 newxprt,
Matan Barak8e372102015-06-11 16:35:21 +0300935 &cq_attr);
Tom Tucker377f9b22007-12-12 16:13:21 -0600936 if (IS_ERR(newxprt->sc_sq_cq)) {
937 dprintk("svcrdma: error creating SQ CQ for connect request\n");
938 goto errout;
939 }
Matan Barak8e372102015-06-11 16:35:21 +0300940 cq_attr.cqe = newxprt->sc_max_requests;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200941 newxprt->sc_rq_cq = ib_create_cq(dev,
Tom Tucker377f9b22007-12-12 16:13:21 -0600942 rq_comp_handler,
943 cq_event_handler,
944 newxprt,
Matan Barak8e372102015-06-11 16:35:21 +0300945 &cq_attr);
Tom Tucker377f9b22007-12-12 16:13:21 -0600946 if (IS_ERR(newxprt->sc_rq_cq)) {
947 dprintk("svcrdma: error creating RQ CQ for connect request\n");
948 goto errout;
949 }
950
951 memset(&qp_attr, 0, sizeof qp_attr);
952 qp_attr.event_handler = qp_event_handler;
953 qp_attr.qp_context = &newxprt->sc_xprt;
954 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
955 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
956 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
957 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
958 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
959 qp_attr.qp_type = IB_QPT_RC;
960 qp_attr.send_cq = newxprt->sc_sq_cq;
961 qp_attr.recv_cq = newxprt->sc_rq_cq;
962 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
963 " cm_id->device=%p, sc_pd->device=%p\n"
964 " cap.max_send_wr = %d\n"
965 " cap.max_recv_wr = %d\n"
966 " cap.max_send_sge = %d\n"
967 " cap.max_recv_sge = %d\n",
968 newxprt->sc_cm_id, newxprt->sc_pd,
Or Gerlitze3e45b12015-12-18 10:59:48 +0200969 dev, newxprt->sc_pd->device,
Tom Tucker377f9b22007-12-12 16:13:21 -0600970 qp_attr.cap.max_send_wr,
971 qp_attr.cap.max_recv_wr,
972 qp_attr.cap.max_send_sge,
973 qp_attr.cap.max_recv_sge);
974
975 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
976 if (ret) {
Steve Wised1e458f2014-07-31 15:26:07 -0500977 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
978 goto errout;
Tom Tucker377f9b22007-12-12 16:13:21 -0600979 }
980 newxprt->sc_qp = newxprt->sc_cm_id->qp;
981
Tom Tucker3a5c6382008-09-30 13:46:13 -0500982 /*
983 * Use the most secure set of MR resources based on the
984 * transport type and available memory management features in
985 * the device. Here's the table implemented below:
986 *
987 * Fast Global DMA Remote WR
988 * Reg LKEY MR Access
989 * Sup'd Sup'd Needed Needed
990 *
991 * IWARP N N Y Y
992 * N Y Y Y
993 * Y N Y N
994 * Y Y N -
995 *
996 * IB N N Y N
997 * N Y N -
998 * Y N Y N
999 * Y Y N -
1000 *
1001 * NB: iWARP requires remote write access for the data sink
1002 * of an RDMA_READ. IB does not.
1003 */
Chuck Levere5452412015-01-13 11:03:20 -05001004 newxprt->sc_reader = rdma_read_chunk_lcl;
Or Gerlitze3e45b12015-12-18 10:59:48 +02001005 if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
Tom Tucker3a5c6382008-09-30 13:46:13 -05001006 newxprt->sc_frmr_pg_list_len =
Or Gerlitze3e45b12015-12-18 10:59:48 +02001007 dev->attrs.max_fast_reg_page_list_len;
Tom Tucker3a5c6382008-09-30 13:46:13 -05001008 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
Chuck Levere5452412015-01-13 11:03:20 -05001009 newxprt->sc_reader = rdma_read_chunk_frmr;
Tom Tucker3a5c6382008-09-30 13:46:13 -05001010 }
1011
1012 /*
1013 * Determine if a DMA MR is required and if so, what privs are required
1014 */
Or Gerlitze3e45b12015-12-18 10:59:48 +02001015 if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
1016 !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
Tom Tucker377f9b22007-12-12 16:13:21 -06001017 goto errout;
Michael Wang3de2c312015-05-05 14:50:25 +02001018
1019 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
Or Gerlitze3e45b12015-12-18 10:59:48 +02001020 !(dev->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
Michael Wang3de2c312015-05-05 14:50:25 +02001021 need_dma_mr = 1;
1022 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
Or Gerlitze3e45b12015-12-18 10:59:48 +02001023 if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
Michael Wang3de2c312015-05-05 14:50:25 +02001024 !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
1025 dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
Tom Tucker377f9b22007-12-12 16:13:21 -06001026 }
1027
Or Gerlitze3e45b12015-12-18 10:59:48 +02001028 if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
Michael Wang3de2c312015-05-05 14:50:25 +02001029 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
1030
Tom Tucker3a5c6382008-09-30 13:46:13 -05001031 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1032 if (need_dma_mr) {
1033 /* Register all of physical memory */
1034 newxprt->sc_phys_mr =
1035 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1036 if (IS_ERR(newxprt->sc_phys_mr)) {
1037 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1038 ret);
1039 goto errout;
1040 }
1041 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1042 } else
Or Gerlitze3e45b12015-12-18 10:59:48 +02001043 newxprt->sc_dma_lkey = dev->local_dma_lkey;
Tom Tucker3a5c6382008-09-30 13:46:13 -05001044
Tom Tucker377f9b22007-12-12 16:13:21 -06001045 /* Post receive buffers */
1046 for (i = 0; i < newxprt->sc_max_requests; i++) {
1047 ret = svc_rdma_post_recv(newxprt);
1048 if (ret) {
1049 dprintk("svcrdma: failure posting receive buffers\n");
1050 goto errout;
1051 }
1052 }
1053
1054 /* Swap out the handler */
1055 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1056
Tom Tuckeraf261af2008-05-07 13:52:42 -05001057 /*
1058 * Arm the CQs for the SQ and RQ before accepting so we can't
1059 * miss the first message
1060 */
1061 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1062 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1063
Tom Tucker377f9b22007-12-12 16:13:21 -06001064 /* Accept Connection */
1065 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1066 memset(&conn_param, 0, sizeof conn_param);
1067 conn_param.responder_resources = 0;
1068 conn_param.initiator_depth = newxprt->sc_ord;
1069 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1070 if (ret) {
1071 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1072 ret);
1073 goto errout;
1074 }
1075
1076 dprintk("svcrdma: new connection %p accepted with the following "
1077 "attributes:\n"
Harvey Harrison21454aa2008-10-31 00:54:56 -07001078 " local_ip : %pI4\n"
Tom Tucker377f9b22007-12-12 16:13:21 -06001079 " local_port : %d\n"
Harvey Harrison21454aa2008-10-31 00:54:56 -07001080 " remote_ip : %pI4\n"
Tom Tucker377f9b22007-12-12 16:13:21 -06001081 " remote_port : %d\n"
1082 " max_sge : %d\n"
Steve Wisebc3fe2e2015-07-27 18:10:12 -05001083 " max_sge_rd : %d\n"
Tom Tucker377f9b22007-12-12 16:13:21 -06001084 " sq_depth : %d\n"
1085 " max_requests : %d\n"
1086 " ord : %d\n",
1087 newxprt,
Harvey Harrison21454aa2008-10-31 00:54:56 -07001088 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1089 route.addr.src_addr)->sin_addr.s_addr,
Tom Tucker377f9b22007-12-12 16:13:21 -06001090 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1091 route.addr.src_addr)->sin_port),
Harvey Harrison21454aa2008-10-31 00:54:56 -07001092 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1093 route.addr.dst_addr)->sin_addr.s_addr,
Tom Tucker377f9b22007-12-12 16:13:21 -06001094 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1095 route.addr.dst_addr)->sin_port),
1096 newxprt->sc_max_sge,
Steve Wisebc3fe2e2015-07-27 18:10:12 -05001097 newxprt->sc_max_sge_rd,
Tom Tucker377f9b22007-12-12 16:13:21 -06001098 newxprt->sc_sq_depth,
1099 newxprt->sc_max_requests,
1100 newxprt->sc_ord);
1101
Tom Tucker377f9b22007-12-12 16:13:21 -06001102 return &newxprt->sc_xprt;
1103
1104 errout:
1105 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001106 /* Take a reference in case the DTO handler runs */
1107 svc_xprt_get(&newxprt->sc_xprt);
Tom Tucker17113862008-05-01 11:13:50 -05001108 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001109 ib_destroy_qp(newxprt->sc_qp);
Tom Tucker377f9b22007-12-12 16:13:21 -06001110 rdma_destroy_id(newxprt->sc_cm_id);
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001111 /* This call to put will destroy the transport */
1112 svc_xprt_put(&newxprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001113 return NULL;
1114}
1115
Tom Tucker377f9b22007-12-12 16:13:21 -06001116static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1117{
Tom Tucker377f9b22007-12-12 16:13:21 -06001118}
1119
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001120/*
Tom Tucker17113862008-05-01 11:13:50 -05001121 * When connected, an svc_xprt has at least two references:
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001122 *
1123 * - A reference held by the cm_id between the ESTABLISHED and
1124 * DISCONNECTED events. If the remote peer disconnected first, this
1125 * reference could be gone.
1126 *
1127 * - A reference held by the svc_recv code that called this function
1128 * as part of close processing.
1129 *
Tom Tucker17113862008-05-01 11:13:50 -05001130 * At a minimum one references should still be held.
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001131 */
Tom Tucker377f9b22007-12-12 16:13:21 -06001132static void svc_rdma_detach(struct svc_xprt *xprt)
1133{
1134 struct svcxprt_rdma *rdma =
1135 container_of(xprt, struct svcxprt_rdma, sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001136 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001137
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001138 /* Disconnect and flush posted WQE */
1139 rdma_disconnect(rdma->sc_cm_id);
Tom Tucker377f9b22007-12-12 16:13:21 -06001140}
1141
Tom Tucker8da91ea2008-04-30 22:00:46 -05001142static void __svc_rdma_free(struct work_struct *work)
Tom Tucker377f9b22007-12-12 16:13:21 -06001143{
Tom Tucker8da91ea2008-04-30 22:00:46 -05001144 struct svcxprt_rdma *rdma =
1145 container_of(work, struct svcxprt_rdma, sc_work);
Tom Tucker377f9b22007-12-12 16:13:21 -06001146 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
Tom Tucker8da91ea2008-04-30 22:00:46 -05001147
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001148 /* We should only be called from kref_put */
Chuck Lever3fe04ee2015-01-13 11:03:03 -05001149 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1150 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1151 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
Tom Tucker8da91ea2008-04-30 22:00:46 -05001152
Tom Tucker356d0a12008-05-01 11:25:02 -05001153 /*
1154 * Destroy queued, but not processed read completions. Note
1155 * that this cleanup has to be done before destroying the
1156 * cm_id because the device ptr is needed to unmap the dma in
1157 * svc_rdma_put_context.
1158 */
Tom Tucker356d0a12008-05-01 11:25:02 -05001159 while (!list_empty(&rdma->sc_read_complete_q)) {
1160 struct svc_rdma_op_ctxt *ctxt;
1161 ctxt = list_entry(rdma->sc_read_complete_q.next,
1162 struct svc_rdma_op_ctxt,
1163 dto_q);
1164 list_del_init(&ctxt->dto_q);
1165 svc_rdma_put_context(ctxt, 1);
1166 }
Tom Tucker356d0a12008-05-01 11:25:02 -05001167
1168 /* Destroy queued, but not processed recv completions */
Tom Tucker356d0a12008-05-01 11:25:02 -05001169 while (!list_empty(&rdma->sc_rq_dto_q)) {
1170 struct svc_rdma_op_ctxt *ctxt;
1171 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1172 struct svc_rdma_op_ctxt,
1173 dto_q);
1174 list_del_init(&ctxt->dto_q);
1175 svc_rdma_put_context(ctxt, 1);
1176 }
Tom Tucker356d0a12008-05-01 11:25:02 -05001177
1178 /* Warn if we leaked a resource or under-referenced */
Chuck Lever3fe04ee2015-01-13 11:03:03 -05001179 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1180 pr_err("svcrdma: ctxt still in use? (%d)\n",
1181 atomic_read(&rdma->sc_ctxt_used));
1182 if (atomic_read(&rdma->sc_dma_used) != 0)
1183 pr_err("svcrdma: dma still in use? (%d)\n",
1184 atomic_read(&rdma->sc_dma_used));
Tom Tucker356d0a12008-05-01 11:25:02 -05001185
Tom Tucker64be86082008-10-06 14:45:18 -05001186 /* De-allocate fastreg mr */
1187 rdma_dealloc_frmr_q(rdma);
1188
Tom Tucker17113862008-05-01 11:13:50 -05001189 /* Destroy the QP if present (not a listener) */
1190 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1191 ib_destroy_qp(rdma->sc_qp);
1192
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001193 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1194 ib_destroy_cq(rdma->sc_sq_cq);
1195
1196 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1197 ib_destroy_cq(rdma->sc_rq_cq);
1198
1199 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1200 ib_dereg_mr(rdma->sc_phys_mr);
1201
1202 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1203 ib_dealloc_pd(rdma->sc_pd);
1204
Tom Tucker356d0a12008-05-01 11:25:02 -05001205 /* Destroy the CM ID */
1206 rdma_destroy_id(rdma->sc_cm_id);
1207
Tom Tucker377f9b22007-12-12 16:13:21 -06001208 kfree(rdma);
1209}
1210
Tom Tucker8da91ea2008-04-30 22:00:46 -05001211static void svc_rdma_free(struct svc_xprt *xprt)
1212{
1213 struct svcxprt_rdma *rdma =
1214 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1215 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
Tejun Heoa25e7582010-10-15 17:49:27 +02001216 queue_work(svc_rdma_wq, &rdma->sc_work);
Tom Tucker8da91ea2008-04-30 22:00:46 -05001217}
1218
Tom Tucker377f9b22007-12-12 16:13:21 -06001219static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1220{
1221 struct svcxprt_rdma *rdma =
1222 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1223
1224 /*
Steve Wise0bf48282014-05-28 15:12:01 -05001225 * If there are already waiters on the SQ,
Tom Tucker377f9b22007-12-12 16:13:21 -06001226 * return false.
1227 */
1228 if (waitqueue_active(&rdma->sc_send_wait))
1229 return 0;
1230
1231 /* Otherwise return true. */
1232 return 1;
1233}
1234
Chuck Lever16e4d932014-05-19 13:40:22 -04001235static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1236{
1237 return 1;
1238}
1239
Tom Tucker377f9b22007-12-12 16:13:21 -06001240int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1241{
Tom Tucker5b180a92008-08-11 14:10:19 -05001242 struct ib_send_wr *bad_wr, *n_wr;
1243 int wr_count;
1244 int i;
Tom Tucker377f9b22007-12-12 16:13:21 -06001245 int ret;
1246
1247 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
Tom Tucker9d6347a2008-04-25 15:51:27 -05001248 return -ENOTCONN;
Tom Tucker377f9b22007-12-12 16:13:21 -06001249
Tom Tucker5b180a92008-08-11 14:10:19 -05001250 wr_count = 1;
1251 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1252 wr_count++;
1253
Tom Tucker377f9b22007-12-12 16:13:21 -06001254 /* If the SQ is full, wait until an SQ entry is available */
1255 while (1) {
1256 spin_lock_bh(&xprt->sc_lock);
Tom Tucker5b180a92008-08-11 14:10:19 -05001257 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
Tom Tucker377f9b22007-12-12 16:13:21 -06001258 spin_unlock_bh(&xprt->sc_lock);
1259 atomic_inc(&rdma_stat_sq_starve);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -05001260
1261 /* See if we can opportunistically reap SQ WR to make room */
Tom Tucker377f9b22007-12-12 16:13:21 -06001262 sq_cq_reap(xprt);
1263
1264 /* Wait until SQ WR available if SQ still full */
1265 wait_event(xprt->sc_send_wait,
1266 atomic_read(&xprt->sc_sq_count) <
1267 xprt->sc_sq_depth);
Tom Tucker830bb592008-03-11 12:44:27 -05001268 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
Tom Tuckerb432e6b2010-10-12 15:33:52 -05001269 return -ENOTCONN;
Tom Tucker377f9b22007-12-12 16:13:21 -06001270 continue;
1271 }
Tom Tucker5b180a92008-08-11 14:10:19 -05001272 /* Take a transport ref for each WR posted */
1273 for (i = 0; i < wr_count; i++)
1274 svc_xprt_get(&xprt->sc_xprt);
1275
1276 /* Bump used SQ WR count and post */
1277 atomic_add(wr_count, &xprt->sc_sq_count);
Tom Tucker377f9b22007-12-12 16:13:21 -06001278 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
Tom Tucker5b180a92008-08-11 14:10:19 -05001279 if (ret) {
1280 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1281 atomic_sub(wr_count, &xprt->sc_sq_count);
1282 for (i = 0; i < wr_count; i ++)
1283 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001284 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1285 "sc_sq_count=%d, sc_sq_depth=%d\n",
1286 ret, atomic_read(&xprt->sc_sq_count),
1287 xprt->sc_sq_depth);
Tom Tucker0905c0f2008-05-01 10:49:03 -05001288 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001289 spin_unlock_bh(&xprt->sc_lock);
Tom Tucker5b180a92008-08-11 14:10:19 -05001290 if (ret)
1291 wake_up(&xprt->sc_send_wait);
Tom Tucker377f9b22007-12-12 16:13:21 -06001292 break;
1293 }
1294 return ret;
1295}
1296
Tom Tucker008fdbc2008-05-07 15:47:42 -05001297void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1298 enum rpcrdma_errcode err)
Tom Tucker377f9b22007-12-12 16:13:21 -06001299{
1300 struct ib_send_wr err_wr;
Tom Tucker377f9b22007-12-12 16:13:21 -06001301 struct page *p;
1302 struct svc_rdma_op_ctxt *ctxt;
Chuck Lever30b7e242015-06-04 11:21:10 -04001303 __be32 *va;
Tom Tucker377f9b22007-12-12 16:13:21 -06001304 int length;
1305 int ret;
1306
Chuck Leverb7e0b9a2015-06-04 11:21:20 -04001307 p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
Tom Tucker377f9b22007-12-12 16:13:21 -06001308 va = page_address(p);
1309
1310 /* XDR encode error */
1311 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1312
Tom Tucker4a843862010-10-12 15:33:57 -05001313 ctxt = svc_rdma_get_context(xprt);
1314 ctxt->direction = DMA_FROM_DEVICE;
1315 ctxt->count = 1;
1316 ctxt->pages[0] = p;
1317
Tom Tucker377f9b22007-12-12 16:13:21 -06001318 /* Prepare SGE for local address */
Tom Tucker4a843862010-10-12 15:33:57 -05001319 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1320 p, 0, length, DMA_FROM_DEVICE);
1321 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
Tom Tucker04911b52008-08-11 15:14:53 -05001322 put_page(p);
Jesper Juhla5e50262011-01-22 21:40:20 +00001323 svc_rdma_put_context(ctxt, 1);
Tom Tucker04911b52008-08-11 15:14:53 -05001324 return;
1325 }
1326 atomic_inc(&xprt->sc_dma_used);
Tom Tucker4a843862010-10-12 15:33:57 -05001327 ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1328 ctxt->sge[0].length = length;
Tom Tucker377f9b22007-12-12 16:13:21 -06001329
1330 /* Prepare SEND WR */
1331 memset(&err_wr, 0, sizeof err_wr);
1332 ctxt->wr_op = IB_WR_SEND;
1333 err_wr.wr_id = (unsigned long)ctxt;
Tom Tucker4a843862010-10-12 15:33:57 -05001334 err_wr.sg_list = ctxt->sge;
Tom Tucker377f9b22007-12-12 16:13:21 -06001335 err_wr.num_sge = 1;
1336 err_wr.opcode = IB_WR_SEND;
1337 err_wr.send_flags = IB_SEND_SIGNALED;
1338
1339 /* Post It */
1340 ret = svc_rdma_send(xprt, &err_wr);
1341 if (ret) {
Tom Tucker008fdbc2008-05-07 15:47:42 -05001342 dprintk("svcrdma: Error %d posting send for protocol error\n",
1343 ret);
Tom Tucker4a843862010-10-12 15:33:57 -05001344 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001345 svc_rdma_put_context(ctxt, 1);
1346 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001347}