blob: 1ed4740ff0b5971f56d6eebc4f47b73500cb48d3 [file] [log] [blame]
Tom Tucker377f9b22007-12-12 16:13:21 -06001/*
Steve Wise0bf48282014-05-28 15:12:01 -05002 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
Tom Tucker377f9b22007-12-12 16:13:21 -06003 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 *
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
22 *
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
26 * permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Author: Tom Tucker <tom@opengridcomputing.com>
41 */
42
43#include <linux/sunrpc/svc_xprt.h>
44#include <linux/sunrpc/debug.h>
45#include <linux/sunrpc/rpc_rdma.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000046#include <linux/interrupt.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040047#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090048#include <linux/slab.h>
Tom Tucker377f9b22007-12-12 16:13:21 -060049#include <linux/spinlock.h>
Tejun Heoa25e7582010-10-15 17:49:27 +020050#include <linux/workqueue.h>
Tom Tucker377f9b22007-12-12 16:13:21 -060051#include <rdma/ib_verbs.h>
52#include <rdma/rdma_cm.h>
53#include <linux/sunrpc/svc_rdma.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040054#include <linux/export.h>
Tom Tuckercec56c82012-02-15 11:30:00 -060055#include "xprt_rdma.h"
Tom Tucker377f9b22007-12-12 16:13:21 -060056
57#define RPCDBG_FACILITY RPCDBG_SVCXPRT
58
59static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
Pavel Emelyanov62832c02010-09-29 16:04:18 +040060 struct net *net,
Tom Tucker377f9b22007-12-12 16:13:21 -060061 struct sockaddr *sa, int salen,
62 int flags);
63static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
64static void svc_rdma_release_rqst(struct svc_rqst *);
Tom Tucker377f9b22007-12-12 16:13:21 -060065static void dto_tasklet_func(unsigned long data);
66static void svc_rdma_detach(struct svc_xprt *xprt);
67static void svc_rdma_free(struct svc_xprt *xprt);
68static int svc_rdma_has_wspace(struct svc_xprt *xprt);
Chuck Lever16e4d932014-05-19 13:40:22 -040069static int svc_rdma_secure_port(struct svc_rqst *);
Tom Tucker377f9b22007-12-12 16:13:21 -060070static void rq_cq_reap(struct svcxprt_rdma *xprt);
71static void sq_cq_reap(struct svcxprt_rdma *xprt);
72
Roel Kluin5eaa65b2008-12-10 15:18:31 -080073static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
Tom Tucker377f9b22007-12-12 16:13:21 -060074static DEFINE_SPINLOCK(dto_lock);
75static LIST_HEAD(dto_xprt_q);
76
77static struct svc_xprt_ops svc_rdma_ops = {
78 .xpo_create = svc_rdma_create,
79 .xpo_recvfrom = svc_rdma_recvfrom,
80 .xpo_sendto = svc_rdma_sendto,
81 .xpo_release_rqst = svc_rdma_release_rqst,
82 .xpo_detach = svc_rdma_detach,
83 .xpo_free = svc_rdma_free,
84 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
85 .xpo_has_wspace = svc_rdma_has_wspace,
86 .xpo_accept = svc_rdma_accept,
Chuck Lever16e4d932014-05-19 13:40:22 -040087 .xpo_secure_port = svc_rdma_secure_port,
Tom Tucker377f9b22007-12-12 16:13:21 -060088};
89
90struct svc_xprt_class svc_rdma_class = {
91 .xcl_name = "rdma",
92 .xcl_owner = THIS_MODULE,
93 .xcl_ops = &svc_rdma_ops,
Steve Wise7e5be2882014-09-23 17:11:22 -050094 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
Chuck Lever3c45ddf2014-07-16 15:38:32 -040095 .xcl_ident = XPRT_TRANSPORT_RDMA,
Tom Tucker377f9b22007-12-12 16:13:21 -060096};
97
Tom Tucker377f9b22007-12-12 16:13:21 -060098struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
99{
100 struct svc_rdma_op_ctxt *ctxt;
101
Chuck Leverb7e0b9a2015-06-04 11:21:20 -0400102 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
103 GFP_KERNEL | __GFP_NOFAIL);
Tom Tucker89488962008-05-28 15:14:02 -0500104 ctxt->xprt = xprt;
105 INIT_LIST_HEAD(&ctxt->dto_q);
106 ctxt->count = 0;
Tom Tucker64be86082008-10-06 14:45:18 -0500107 ctxt->frmr = NULL;
Tom Tucker89488962008-05-28 15:14:02 -0500108 atomic_inc(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600109 return ctxt;
110}
111
Tom Tucker146b6df2008-08-12 15:12:10 -0500112void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
Tom Tuckere6ab9142008-05-28 12:08:48 -0500113{
114 struct svcxprt_rdma *xprt = ctxt->xprt;
115 int i;
116 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
Tom Tucker64be86082008-10-06 14:45:18 -0500117 /*
118 * Unmap the DMA addr in the SGE if the lkey matches
119 * the sc_dma_lkey, otherwise, ignore it since it is
120 * an FRMR lkey and will be unmapped later when the
121 * last WR that uses it completes.
122 */
123 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
124 atomic_dec(&xprt->sc_dma_used);
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500125 ib_dma_unmap_page(xprt->sc_cm_id->device,
Tom Tucker64be86082008-10-06 14:45:18 -0500126 ctxt->sge[i].addr,
127 ctxt->sge[i].length,
128 ctxt->direction);
129 }
Tom Tuckere6ab9142008-05-28 12:08:48 -0500130 }
131}
132
Tom Tucker377f9b22007-12-12 16:13:21 -0600133void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
134{
135 struct svcxprt_rdma *xprt;
136 int i;
137
Tom Tucker377f9b22007-12-12 16:13:21 -0600138 xprt = ctxt->xprt;
139 if (free_pages)
140 for (i = 0; i < ctxt->count; i++)
141 put_page(ctxt->pages[i]);
142
Tom Tucker89488962008-05-28 15:14:02 -0500143 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
Tom Tucker87407672008-04-30 20:44:39 -0500144 atomic_dec(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600145}
146
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500147/*
148 * Temporary NFS req mappings are shared across all transport
149 * instances. These are short lived and should be bounded by the number
150 * of concurrent server threads * depth of the SQ.
151 */
152struct svc_rdma_req_map *svc_rdma_get_req_map(void)
153{
154 struct svc_rdma_req_map *map;
Chuck Leverb7e0b9a2015-06-04 11:21:20 -0400155 map = kmem_cache_alloc(svc_rdma_map_cachep,
156 GFP_KERNEL | __GFP_NOFAIL);
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500157 map->count = 0;
158 return map;
159}
160
161void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
162{
163 kmem_cache_free(svc_rdma_map_cachep, map);
164}
165
Tom Tucker377f9b22007-12-12 16:13:21 -0600166/* ib_cq event handler */
167static void cq_event_handler(struct ib_event *event, void *context)
168{
169 struct svc_xprt *xprt = context;
170 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
171 event->event, context);
172 set_bit(XPT_CLOSE, &xprt->xpt_flags);
173}
174
175/* QP event handler */
176static void qp_event_handler(struct ib_event *event, void *context)
177{
178 struct svc_xprt *xprt = context;
179
180 switch (event->event) {
181 /* These are considered benign events */
182 case IB_EVENT_PATH_MIG:
183 case IB_EVENT_COMM_EST:
184 case IB_EVENT_SQ_DRAINED:
185 case IB_EVENT_QP_LAST_WQE_REACHED:
186 dprintk("svcrdma: QP event %d received for QP=%p\n",
187 event->event, event->element.qp);
188 break;
189 /* These are considered fatal events */
190 case IB_EVENT_PATH_MIG_ERR:
191 case IB_EVENT_QP_FATAL:
192 case IB_EVENT_QP_REQ_ERR:
193 case IB_EVENT_QP_ACCESS_ERR:
194 case IB_EVENT_DEVICE_FATAL:
195 default:
196 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
197 "closing transport\n",
198 event->event, event->element.qp);
199 set_bit(XPT_CLOSE, &xprt->xpt_flags);
200 break;
201 }
202}
203
204/*
205 * Data Transfer Operation Tasklet
206 *
207 * Walks a list of transports with I/O pending, removing entries as
208 * they are added to the server's I/O pending list. Two bits indicate
209 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
210 * spinlock that serializes access to the transport list with the RQ
211 * and SQ interrupt handlers.
212 */
213static void dto_tasklet_func(unsigned long data)
214{
215 struct svcxprt_rdma *xprt;
216 unsigned long flags;
217
218 spin_lock_irqsave(&dto_lock, flags);
219 while (!list_empty(&dto_xprt_q)) {
220 xprt = list_entry(dto_xprt_q.next,
221 struct svcxprt_rdma, sc_dto_q);
222 list_del_init(&xprt->sc_dto_q);
223 spin_unlock_irqrestore(&dto_lock, flags);
224
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500225 rq_cq_reap(xprt);
226 sq_cq_reap(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600227
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400228 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600229 spin_lock_irqsave(&dto_lock, flags);
230 }
231 spin_unlock_irqrestore(&dto_lock, flags);
232}
233
234/*
235 * Receive Queue Completion Handler
236 *
237 * Since an RQ completion handler is called on interrupt context, we
238 * need to defer the handling of the I/O to a tasklet
239 */
240static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
241{
242 struct svcxprt_rdma *xprt = cq_context;
243 unsigned long flags;
244
Tom Tucker17113862008-05-01 11:13:50 -0500245 /* Guard against unconditional flush call for destroyed QP */
246 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
247 return;
248
Tom Tucker377f9b22007-12-12 16:13:21 -0600249 /*
250 * Set the bit regardless of whether or not it's on the list
251 * because it may be on the list already due to an SQ
252 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500253 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600254 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
255
256 /*
257 * If this transport is not already on the DTO transport queue,
258 * add it
259 */
260 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400261 if (list_empty(&xprt->sc_dto_q)) {
262 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600263 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400264 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600265 spin_unlock_irqrestore(&dto_lock, flags);
266
267 /* Tasklet does all the work to avoid irqsave locks. */
268 tasklet_schedule(&dto_tasklet);
269}
270
271/*
272 * rq_cq_reap - Process the RQ CQ.
273 *
274 * Take all completing WC off the CQE and enqueue the associated DTO
275 * context on the dto_q for the transport.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500276 *
277 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600278 */
279static void rq_cq_reap(struct svcxprt_rdma *xprt)
280{
281 int ret;
282 struct ib_wc wc;
283 struct svc_rdma_op_ctxt *ctxt = NULL;
284
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500285 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
286 return;
287
288 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600289 atomic_inc(&rdma_stat_rq_poll);
290
Tom Tucker377f9b22007-12-12 16:13:21 -0600291 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
292 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
293 ctxt->wc_status = wc.status;
294 ctxt->byte_len = wc.byte_len;
Tom Tuckere6ab9142008-05-28 12:08:48 -0500295 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600296 if (wc.status != IB_WC_SUCCESS) {
297 /* Close the transport */
Tom Tucker0905c0f2008-05-01 10:49:03 -0500298 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600299 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
300 svc_rdma_put_context(ctxt, 1);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500301 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600302 continue;
303 }
Tom Tucker47698e02008-05-06 11:49:05 -0500304 spin_lock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600305 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
Tom Tucker47698e02008-05-06 11:49:05 -0500306 spin_unlock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500307 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600308 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600309
310 if (ctxt)
311 atomic_inc(&rdma_stat_rq_prod);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500312
313 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
314 /*
315 * If data arrived before established event,
316 * don't enqueue. This defers RPC I/O until the
317 * RDMA connection is complete.
318 */
319 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
320 svc_xprt_enqueue(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600321}
322
323/*
Justin P. Mattock70f23fd2011-05-10 10:16:21 +0200324 * Process a completion context
Tom Tuckere1183212008-10-03 15:22:18 -0500325 */
326static void process_context(struct svcxprt_rdma *xprt,
327 struct svc_rdma_op_ctxt *ctxt)
328{
329 svc_rdma_unmap_dma(ctxt);
330
331 switch (ctxt->wr_op) {
332 case IB_WR_SEND:
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500333 if (ctxt->frmr)
334 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
Tom Tuckere1183212008-10-03 15:22:18 -0500335 svc_rdma_put_context(ctxt, 1);
336 break;
337
338 case IB_WR_RDMA_WRITE:
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500339 if (ctxt->frmr)
340 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
Tom Tuckere1183212008-10-03 15:22:18 -0500341 svc_rdma_put_context(ctxt, 0);
342 break;
343
344 case IB_WR_RDMA_READ:
Tom Tucker146b6df2008-08-12 15:12:10 -0500345 case IB_WR_RDMA_READ_WITH_INV:
Steve Wise0bf48282014-05-28 15:12:01 -0500346 svc_rdma_put_frmr(xprt, ctxt->frmr);
Tom Tuckere1183212008-10-03 15:22:18 -0500347 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
348 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500349 if (read_hdr) {
350 spin_lock_bh(&xprt->sc_rq_dto_lock);
351 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
352 list_add_tail(&read_hdr->dto_q,
353 &xprt->sc_read_complete_q);
354 spin_unlock_bh(&xprt->sc_rq_dto_lock);
355 } else {
356 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
357 }
Tom Tuckere1183212008-10-03 15:22:18 -0500358 svc_xprt_enqueue(&xprt->sc_xprt);
359 }
360 svc_rdma_put_context(ctxt, 0);
361 break;
362
363 default:
364 printk(KERN_ERR "svcrdma: unexpected completion type, "
365 "opcode=%d\n",
366 ctxt->wr_op);
367 break;
368 }
369}
370
371/*
Tom Tucker377f9b22007-12-12 16:13:21 -0600372 * Send Queue Completion Handler - potentially called on interrupt context.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500373 *
374 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600375 */
376static void sq_cq_reap(struct svcxprt_rdma *xprt)
377{
378 struct svc_rdma_op_ctxt *ctxt = NULL;
Steve Wise0bf48282014-05-28 15:12:01 -0500379 struct ib_wc wc_a[6];
380 struct ib_wc *wc;
Tom Tucker377f9b22007-12-12 16:13:21 -0600381 struct ib_cq *cq = xprt->sc_sq_cq;
382 int ret;
383
Steve Wise0bf48282014-05-28 15:12:01 -0500384 memset(wc_a, 0, sizeof(wc_a));
385
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500386 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
387 return;
388
389 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600390 atomic_inc(&rdma_stat_sq_poll);
Steve Wise0bf48282014-05-28 15:12:01 -0500391 while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
392 int i;
Tom Tucker377f9b22007-12-12 16:13:21 -0600393
Steve Wise0bf48282014-05-28 15:12:01 -0500394 for (i = 0; i < ret; i++) {
395 wc = &wc_a[i];
396 if (wc->status != IB_WC_SUCCESS) {
397 dprintk("svcrdma: sq wc err status %d\n",
398 wc->status);
Tom Tucker377f9b22007-12-12 16:13:21 -0600399
Steve Wise0bf48282014-05-28 15:12:01 -0500400 /* Close the transport */
401 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
402 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600403
Steve Wise0bf48282014-05-28 15:12:01 -0500404 /* Decrement used SQ WR count */
405 atomic_dec(&xprt->sc_sq_count);
406 wake_up(&xprt->sc_send_wait);
407
408 ctxt = (struct svc_rdma_op_ctxt *)
409 (unsigned long)wc->wr_id;
410 if (ctxt)
411 process_context(xprt, ctxt);
412
413 svc_xprt_put(&xprt->sc_xprt);
414 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600415 }
416
417 if (ctxt)
418 atomic_inc(&rdma_stat_sq_prod);
419}
420
421static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
422{
423 struct svcxprt_rdma *xprt = cq_context;
424 unsigned long flags;
425
Tom Tucker17113862008-05-01 11:13:50 -0500426 /* Guard against unconditional flush call for destroyed QP */
427 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
428 return;
429
Tom Tucker377f9b22007-12-12 16:13:21 -0600430 /*
431 * Set the bit regardless of whether or not it's on the list
432 * because it may be on the list already due to an RQ
433 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500434 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600435 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
436
437 /*
438 * If this transport is not already on the DTO transport queue,
439 * add it
440 */
441 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400442 if (list_empty(&xprt->sc_dto_q)) {
443 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600444 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400445 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600446 spin_unlock_irqrestore(&dto_lock, flags);
447
448 /* Tasklet does all the work to avoid irqsave locks. */
449 tasklet_schedule(&dto_tasklet);
450}
451
Tom Tucker377f9b22007-12-12 16:13:21 -0600452static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
453 int listener)
454{
455 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
456
457 if (!cma_xprt)
458 return NULL;
Stanislav Kinsburskybd4620d2011-12-06 14:19:10 +0300459 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
Tom Tucker377f9b22007-12-12 16:13:21 -0600460 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
461 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
462 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
463 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
Tom Tucker64be86082008-10-06 14:45:18 -0500464 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
Tom Tucker377f9b22007-12-12 16:13:21 -0600465 init_waitqueue_head(&cma_xprt->sc_send_wait);
466
467 spin_lock_init(&cma_xprt->sc_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600468 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
Tom Tucker64be86082008-10-06 14:45:18 -0500469 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600470
471 cma_xprt->sc_ord = svcrdma_ord;
472
473 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
474 cma_xprt->sc_max_requests = svcrdma_max_requests;
475 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
476 atomic_set(&cma_xprt->sc_sq_count, 0);
Tom Tucker87295b62008-05-28 13:17:44 -0500477 atomic_set(&cma_xprt->sc_ctxt_used, 0);
Tom Tucker377f9b22007-12-12 16:13:21 -0600478
Tom Tucker89488962008-05-28 15:14:02 -0500479 if (listener)
Tom Tucker377f9b22007-12-12 16:13:21 -0600480 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
481
482 return cma_xprt;
483}
484
Tom Tucker377f9b22007-12-12 16:13:21 -0600485int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
486{
487 struct ib_recv_wr recv_wr, *bad_recv_wr;
488 struct svc_rdma_op_ctxt *ctxt;
489 struct page *page;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500490 dma_addr_t pa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600491 int sge_no;
492 int buflen;
493 int ret;
494
495 ctxt = svc_rdma_get_context(xprt);
496 buflen = 0;
497 ctxt->direction = DMA_FROM_DEVICE;
498 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500499 if (sge_no >= xprt->sc_max_sge) {
500 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
501 goto err_put_ctxt;
502 }
Chuck Leverb7e0b9a2015-06-04 11:21:20 -0400503 page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
Tom Tucker377f9b22007-12-12 16:13:21 -0600504 ctxt->pages[sge_no] = page;
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500505 pa = ib_dma_map_page(xprt->sc_cm_id->device,
506 page, 0, PAGE_SIZE,
Tom Tucker377f9b22007-12-12 16:13:21 -0600507 DMA_FROM_DEVICE);
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500508 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
509 goto err_put_ctxt;
510 atomic_inc(&xprt->sc_dma_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600511 ctxt->sge[sge_no].addr = pa;
512 ctxt->sge[sge_no].length = PAGE_SIZE;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500513 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
Tom Tucker4a843862010-10-12 15:33:57 -0500514 ctxt->count = sge_no + 1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600515 buflen += PAGE_SIZE;
516 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600517 recv_wr.next = NULL;
518 recv_wr.sg_list = &ctxt->sge[0];
519 recv_wr.num_sge = ctxt->count;
520 recv_wr.wr_id = (u64)(unsigned long)ctxt;
521
Tom Tucker0905c0f2008-05-01 10:49:03 -0500522 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600523 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500524 if (ret) {
Steve Wise21515e42009-04-29 14:14:00 -0500525 svc_rdma_unmap_dma(ctxt);
Tom Tucker05a08262008-04-25 14:11:31 -0500526 svc_rdma_put_context(ctxt, 1);
Steve Wise21515e42009-04-29 14:14:00 -0500527 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500528 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600529 return ret;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500530
531 err_put_ctxt:
Tom Tucker4a843862010-10-12 15:33:57 -0500532 svc_rdma_unmap_dma(ctxt);
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500533 svc_rdma_put_context(ctxt, 1);
534 return -ENOMEM;
Tom Tucker377f9b22007-12-12 16:13:21 -0600535}
536
537/*
538 * This function handles the CONNECT_REQUEST event on a listening
539 * endpoint. It is passed the cma_id for the _new_ connection. The context in
540 * this cma_id is inherited from the listening cma_id and is the svc_xprt
541 * structure for the listening endpoint.
542 *
543 * This function creates a new xprt for the new connection and enqueues it on
544 * the accept queue for the listent xprt. When the listen thread is kicked, it
545 * will call the recvfrom method on the listen xprt which will accept the new
546 * connection.
547 */
Tom Tucker36ef25e2008-05-19 19:00:24 -0500548static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
Tom Tucker377f9b22007-12-12 16:13:21 -0600549{
550 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
551 struct svcxprt_rdma *newxprt;
Tom Tuckeraf261af2008-05-07 13:52:42 -0500552 struct sockaddr *sa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600553
554 /* Create a new transport */
555 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
556 if (!newxprt) {
557 dprintk("svcrdma: failed to create new transport\n");
558 return;
559 }
560 newxprt->sc_cm_id = new_cma_id;
561 new_cma_id->context = newxprt;
562 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
563 newxprt, newxprt->sc_cm_id, listen_xprt);
564
Tom Tucker36ef25e2008-05-19 19:00:24 -0500565 /* Save client advertised inbound read limit for use later in accept. */
566 newxprt->sc_ord = client_ird;
567
Tom Tuckeraf261af2008-05-07 13:52:42 -0500568 /* Set the local and remote addresses in the transport */
569 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
570 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
571 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
572 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
573
Tom Tucker377f9b22007-12-12 16:13:21 -0600574 /*
575 * Enqueue the new transport on the accept queue of the listening
576 * transport
577 */
578 spin_lock_bh(&listen_xprt->sc_lock);
579 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
580 spin_unlock_bh(&listen_xprt->sc_lock);
581
Tom Tucker377f9b22007-12-12 16:13:21 -0600582 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
583 svc_xprt_enqueue(&listen_xprt->sc_xprt);
584}
585
586/*
587 * Handles events generated on the listening endpoint. These events will be
588 * either be incoming connect requests or adapter removal events.
589 */
590static int rdma_listen_handler(struct rdma_cm_id *cma_id,
591 struct rdma_cm_event *event)
592{
593 struct svcxprt_rdma *xprt = cma_id->context;
594 int ret = 0;
595
596 switch (event->event) {
597 case RDMA_CM_EVENT_CONNECT_REQUEST:
598 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
599 "event=%d\n", cma_id, cma_id->context, event->event);
Tom Tucker36ef25e2008-05-19 19:00:24 -0500600 handle_connect_req(cma_id,
Tom Tucker67080c82008-10-03 12:41:14 -0500601 event->param.conn.initiator_depth);
Tom Tucker377f9b22007-12-12 16:13:21 -0600602 break;
603
604 case RDMA_CM_EVENT_ESTABLISHED:
605 /* Accept complete */
606 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
607 "cm_id=%p\n", xprt, cma_id);
608 break;
609
610 case RDMA_CM_EVENT_DEVICE_REMOVAL:
611 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
612 xprt, cma_id);
613 if (xprt)
614 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
615 break;
616
617 default:
618 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
619 "event=%d\n", cma_id, event->event);
620 break;
621 }
622
623 return ret;
624}
625
626static int rdma_cma_handler(struct rdma_cm_id *cma_id,
627 struct rdma_cm_event *event)
628{
629 struct svc_xprt *xprt = cma_id->context;
630 struct svcxprt_rdma *rdma =
631 container_of(xprt, struct svcxprt_rdma, sc_xprt);
632 switch (event->event) {
633 case RDMA_CM_EVENT_ESTABLISHED:
634 /* Accept complete */
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400635 svc_xprt_get(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600636 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
637 "cm_id=%p\n", xprt, cma_id);
638 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
639 svc_xprt_enqueue(xprt);
640 break;
641 case RDMA_CM_EVENT_DISCONNECTED:
642 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
643 xprt, cma_id);
644 if (xprt) {
645 set_bit(XPT_CLOSE, &xprt->xpt_flags);
646 svc_xprt_enqueue(xprt);
Tom Tucker120693d2008-04-24 14:17:21 -0500647 svc_xprt_put(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600648 }
649 break;
650 case RDMA_CM_EVENT_DEVICE_REMOVAL:
651 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
652 "event=%d\n", cma_id, xprt, event->event);
653 if (xprt) {
654 set_bit(XPT_CLOSE, &xprt->xpt_flags);
655 svc_xprt_enqueue(xprt);
656 }
657 break;
658 default:
659 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
660 "event=%d\n", cma_id, event->event);
661 break;
662 }
663 return 0;
664}
665
666/*
667 * Create a listening RDMA service endpoint.
668 */
669static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
Pavel Emelyanov62832c02010-09-29 16:04:18 +0400670 struct net *net,
Tom Tucker377f9b22007-12-12 16:13:21 -0600671 struct sockaddr *sa, int salen,
672 int flags)
673{
674 struct rdma_cm_id *listen_id;
675 struct svcxprt_rdma *cma_xprt;
Tom Tucker377f9b22007-12-12 16:13:21 -0600676 int ret;
677
678 dprintk("svcrdma: Creating RDMA socket\n");
Tom Tuckerbade7322010-04-03 08:27:29 -0500679 if (sa->sa_family != AF_INET) {
680 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
681 return ERR_PTR(-EAFNOSUPPORT);
682 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600683 cma_xprt = rdma_create_xprt(serv, 1);
684 if (!cma_xprt)
Tom Tucker58e8f622008-05-06 09:45:54 -0500685 return ERR_PTR(-ENOMEM);
Tom Tucker377f9b22007-12-12 16:13:21 -0600686
Sean Heftyb26f9b92010-04-01 17:08:41 +0000687 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
688 IB_QPT_RC);
Tom Tucker377f9b22007-12-12 16:13:21 -0600689 if (IS_ERR(listen_id)) {
Tom Tucker58e8f622008-05-06 09:45:54 -0500690 ret = PTR_ERR(listen_id);
691 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
692 goto err0;
Tom Tucker377f9b22007-12-12 16:13:21 -0600693 }
Tom Tucker58e8f622008-05-06 09:45:54 -0500694
Tom Tucker377f9b22007-12-12 16:13:21 -0600695 ret = rdma_bind_addr(listen_id, sa);
696 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600697 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500698 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600699 }
700 cma_xprt->sc_cm_id = listen_id;
701
702 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
703 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600704 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500705 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600706 }
707
708 /*
709 * We need to use the address from the cm_id in case the
710 * caller specified 0 for the port number.
711 */
712 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
713 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
714
715 return &cma_xprt->sc_xprt;
Tom Tucker58e8f622008-05-06 09:45:54 -0500716
717 err1:
718 rdma_destroy_id(listen_id);
719 err0:
720 kfree(cma_xprt);
721 return ERR_PTR(ret);
Tom Tucker377f9b22007-12-12 16:13:21 -0600722}
723
Tom Tucker64be86082008-10-06 14:45:18 -0500724static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
725{
726 struct ib_mr *mr;
727 struct ib_fast_reg_page_list *pl;
728 struct svc_rdma_fastreg_mr *frmr;
729
730 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
731 if (!frmr)
732 goto err;
733
734 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
Wei Yongjun846d8e72009-06-25 16:35:44 +0800735 if (IS_ERR(mr))
Tom Tucker64be86082008-10-06 14:45:18 -0500736 goto err_free_frmr;
737
738 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
739 RPCSVC_MAXPAGES);
Wei Yongjun846d8e72009-06-25 16:35:44 +0800740 if (IS_ERR(pl))
Tom Tucker64be86082008-10-06 14:45:18 -0500741 goto err_free_mr;
742
743 frmr->mr = mr;
744 frmr->page_list = pl;
745 INIT_LIST_HEAD(&frmr->frmr_list);
746 return frmr;
747
748 err_free_mr:
749 ib_dereg_mr(mr);
750 err_free_frmr:
751 kfree(frmr);
752 err:
753 return ERR_PTR(-ENOMEM);
754}
755
756static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
757{
758 struct svc_rdma_fastreg_mr *frmr;
759
760 while (!list_empty(&xprt->sc_frmr_q)) {
761 frmr = list_entry(xprt->sc_frmr_q.next,
762 struct svc_rdma_fastreg_mr, frmr_list);
763 list_del_init(&frmr->frmr_list);
764 ib_dereg_mr(frmr->mr);
765 ib_free_fast_reg_page_list(frmr->page_list);
766 kfree(frmr);
767 }
768}
769
770struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
771{
772 struct svc_rdma_fastreg_mr *frmr = NULL;
773
774 spin_lock_bh(&rdma->sc_frmr_q_lock);
775 if (!list_empty(&rdma->sc_frmr_q)) {
776 frmr = list_entry(rdma->sc_frmr_q.next,
777 struct svc_rdma_fastreg_mr, frmr_list);
778 list_del_init(&frmr->frmr_list);
779 frmr->map_len = 0;
780 frmr->page_list_len = 0;
781 }
782 spin_unlock_bh(&rdma->sc_frmr_q_lock);
783 if (frmr)
784 return frmr;
785
786 return rdma_alloc_frmr(rdma);
787}
788
789static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
790 struct svc_rdma_fastreg_mr *frmr)
791{
792 int page_no;
793 for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
794 dma_addr_t addr = frmr->page_list->page_list[page_no];
795 if (ib_dma_mapping_error(frmr->mr->device, addr))
796 continue;
797 atomic_dec(&xprt->sc_dma_used);
Tom Tuckerb432e6b2010-10-12 15:33:52 -0500798 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
799 frmr->direction);
Tom Tucker64be86082008-10-06 14:45:18 -0500800 }
801}
802
803void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
804 struct svc_rdma_fastreg_mr *frmr)
805{
806 if (frmr) {
807 frmr_unmap_dma(rdma, frmr);
808 spin_lock_bh(&rdma->sc_frmr_q_lock);
Chuck Lever3fe04ee2015-01-13 11:03:03 -0500809 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
Tom Tucker64be86082008-10-06 14:45:18 -0500810 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
811 spin_unlock_bh(&rdma->sc_frmr_q_lock);
812 }
813}
814
Tom Tucker377f9b22007-12-12 16:13:21 -0600815/*
816 * This is the xpo_recvfrom function for listening endpoints. Its
817 * purpose is to accept incoming connections. The CMA callback handler
818 * has already created a new transport and attached it to the new CMA
819 * ID.
820 *
821 * There is a queue of pending connections hung on the listening
822 * transport. This queue contains the new svc_xprt structure. This
823 * function takes svc_xprt structures off the accept_q and completes
824 * the connection.
825 */
826static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
827{
828 struct svcxprt_rdma *listen_rdma;
829 struct svcxprt_rdma *newxprt = NULL;
830 struct rdma_conn_param conn_param;
831 struct ib_qp_init_attr qp_attr;
832 struct ib_device_attr devattr;
Ingo Molnared72b9c2008-11-25 16:49:37 -0800833 int uninitialized_var(dma_mr_acc);
Tom Tucker3a5c6382008-09-30 13:46:13 -0500834 int need_dma_mr;
Tom Tucker377f9b22007-12-12 16:13:21 -0600835 int ret;
836 int i;
837
838 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
839 clear_bit(XPT_CONN, &xprt->xpt_flags);
840 /* Get the next entry off the accept list */
841 spin_lock_bh(&listen_rdma->sc_lock);
842 if (!list_empty(&listen_rdma->sc_accept_q)) {
843 newxprt = list_entry(listen_rdma->sc_accept_q.next,
844 struct svcxprt_rdma, sc_accept_q);
845 list_del_init(&newxprt->sc_accept_q);
846 }
847 if (!list_empty(&listen_rdma->sc_accept_q))
848 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
849 spin_unlock_bh(&listen_rdma->sc_lock);
850 if (!newxprt)
851 return NULL;
852
853 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
854 newxprt, newxprt->sc_cm_id);
855
856 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
857 if (ret) {
858 dprintk("svcrdma: could not query device attributes on "
859 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
860 goto errout;
861 }
862
863 /* Qualify the transport resource defaults with the
864 * capabilities of this particular device */
865 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
866 (size_t)RPCSVC_MAXPAGES);
867 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
868 (size_t)svcrdma_max_requests);
869 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
870
Tom Tucker36ef25e2008-05-19 19:00:24 -0500871 /*
872 * Limit ORD based on client limit, local device limit, and
873 * configured svcrdma limit.
874 */
875 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
876 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
Tom Tucker377f9b22007-12-12 16:13:21 -0600877
878 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
879 if (IS_ERR(newxprt->sc_pd)) {
880 dprintk("svcrdma: error creating PD for connect request\n");
881 goto errout;
882 }
883 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
884 sq_comp_handler,
885 cq_event_handler,
886 newxprt,
887 newxprt->sc_sq_depth,
888 0);
889 if (IS_ERR(newxprt->sc_sq_cq)) {
890 dprintk("svcrdma: error creating SQ CQ for connect request\n");
891 goto errout;
892 }
893 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
894 rq_comp_handler,
895 cq_event_handler,
896 newxprt,
897 newxprt->sc_max_requests,
898 0);
899 if (IS_ERR(newxprt->sc_rq_cq)) {
900 dprintk("svcrdma: error creating RQ CQ for connect request\n");
901 goto errout;
902 }
903
904 memset(&qp_attr, 0, sizeof qp_attr);
905 qp_attr.event_handler = qp_event_handler;
906 qp_attr.qp_context = &newxprt->sc_xprt;
907 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
908 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
909 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
910 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
911 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
912 qp_attr.qp_type = IB_QPT_RC;
913 qp_attr.send_cq = newxprt->sc_sq_cq;
914 qp_attr.recv_cq = newxprt->sc_rq_cq;
915 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
916 " cm_id->device=%p, sc_pd->device=%p\n"
917 " cap.max_send_wr = %d\n"
918 " cap.max_recv_wr = %d\n"
919 " cap.max_send_sge = %d\n"
920 " cap.max_recv_sge = %d\n",
921 newxprt->sc_cm_id, newxprt->sc_pd,
922 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
923 qp_attr.cap.max_send_wr,
924 qp_attr.cap.max_recv_wr,
925 qp_attr.cap.max_send_sge,
926 qp_attr.cap.max_recv_sge);
927
928 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
929 if (ret) {
Steve Wised1e458f2014-07-31 15:26:07 -0500930 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
931 goto errout;
Tom Tucker377f9b22007-12-12 16:13:21 -0600932 }
933 newxprt->sc_qp = newxprt->sc_cm_id->qp;
934
Tom Tucker3a5c6382008-09-30 13:46:13 -0500935 /*
936 * Use the most secure set of MR resources based on the
937 * transport type and available memory management features in
938 * the device. Here's the table implemented below:
939 *
940 * Fast Global DMA Remote WR
941 * Reg LKEY MR Access
942 * Sup'd Sup'd Needed Needed
943 *
944 * IWARP N N Y Y
945 * N Y Y Y
946 * Y N Y N
947 * Y Y N -
948 *
949 * IB N N Y N
950 * N Y N -
951 * Y N Y N
952 * Y Y N -
953 *
954 * NB: iWARP requires remote write access for the data sink
955 * of an RDMA_READ. IB does not.
956 */
Chuck Levere5452412015-01-13 11:03:20 -0500957 newxprt->sc_reader = rdma_read_chunk_lcl;
Tom Tucker3a5c6382008-09-30 13:46:13 -0500958 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
959 newxprt->sc_frmr_pg_list_len =
960 devattr.max_fast_reg_page_list_len;
961 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
Chuck Levere5452412015-01-13 11:03:20 -0500962 newxprt->sc_reader = rdma_read_chunk_frmr;
Tom Tucker3a5c6382008-09-30 13:46:13 -0500963 }
964
965 /*
966 * Determine if a DMA MR is required and if so, what privs are required
967 */
968 switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
969 case RDMA_TRANSPORT_IWARP:
970 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
971 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
972 need_dma_mr = 1;
973 dma_mr_acc =
974 (IB_ACCESS_LOCAL_WRITE |
975 IB_ACCESS_REMOTE_WRITE);
976 } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
977 need_dma_mr = 1;
978 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
979 } else
980 need_dma_mr = 0;
981 break;
982 case RDMA_TRANSPORT_IB:
Steve Wise0bf48282014-05-28 15:12:01 -0500983 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
984 need_dma_mr = 1;
985 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
986 } else if (!(devattr.device_cap_flags &
987 IB_DEVICE_LOCAL_DMA_LKEY)) {
Tom Tucker3a5c6382008-09-30 13:46:13 -0500988 need_dma_mr = 1;
989 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
990 } else
991 need_dma_mr = 0;
992 break;
993 default:
Tom Tucker377f9b22007-12-12 16:13:21 -0600994 goto errout;
995 }
996
Tom Tucker3a5c6382008-09-30 13:46:13 -0500997 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
998 if (need_dma_mr) {
999 /* Register all of physical memory */
1000 newxprt->sc_phys_mr =
1001 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1002 if (IS_ERR(newxprt->sc_phys_mr)) {
1003 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1004 ret);
1005 goto errout;
1006 }
1007 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1008 } else
1009 newxprt->sc_dma_lkey =
1010 newxprt->sc_cm_id->device->local_dma_lkey;
1011
Tom Tucker377f9b22007-12-12 16:13:21 -06001012 /* Post receive buffers */
1013 for (i = 0; i < newxprt->sc_max_requests; i++) {
1014 ret = svc_rdma_post_recv(newxprt);
1015 if (ret) {
1016 dprintk("svcrdma: failure posting receive buffers\n");
1017 goto errout;
1018 }
1019 }
1020
1021 /* Swap out the handler */
1022 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1023
Tom Tuckeraf261af2008-05-07 13:52:42 -05001024 /*
1025 * Arm the CQs for the SQ and RQ before accepting so we can't
1026 * miss the first message
1027 */
1028 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1029 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1030
Tom Tucker377f9b22007-12-12 16:13:21 -06001031 /* Accept Connection */
1032 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1033 memset(&conn_param, 0, sizeof conn_param);
1034 conn_param.responder_resources = 0;
1035 conn_param.initiator_depth = newxprt->sc_ord;
1036 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1037 if (ret) {
1038 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1039 ret);
1040 goto errout;
1041 }
1042
1043 dprintk("svcrdma: new connection %p accepted with the following "
1044 "attributes:\n"
Harvey Harrison21454aa2008-10-31 00:54:56 -07001045 " local_ip : %pI4\n"
Tom Tucker377f9b22007-12-12 16:13:21 -06001046 " local_port : %d\n"
Harvey Harrison21454aa2008-10-31 00:54:56 -07001047 " remote_ip : %pI4\n"
Tom Tucker377f9b22007-12-12 16:13:21 -06001048 " remote_port : %d\n"
1049 " max_sge : %d\n"
1050 " sq_depth : %d\n"
1051 " max_requests : %d\n"
1052 " ord : %d\n",
1053 newxprt,
Harvey Harrison21454aa2008-10-31 00:54:56 -07001054 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1055 route.addr.src_addr)->sin_addr.s_addr,
Tom Tucker377f9b22007-12-12 16:13:21 -06001056 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1057 route.addr.src_addr)->sin_port),
Harvey Harrison21454aa2008-10-31 00:54:56 -07001058 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1059 route.addr.dst_addr)->sin_addr.s_addr,
Tom Tucker377f9b22007-12-12 16:13:21 -06001060 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1061 route.addr.dst_addr)->sin_port),
1062 newxprt->sc_max_sge,
1063 newxprt->sc_sq_depth,
1064 newxprt->sc_max_requests,
1065 newxprt->sc_ord);
1066
Tom Tucker377f9b22007-12-12 16:13:21 -06001067 return &newxprt->sc_xprt;
1068
1069 errout:
1070 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001071 /* Take a reference in case the DTO handler runs */
1072 svc_xprt_get(&newxprt->sc_xprt);
Tom Tucker17113862008-05-01 11:13:50 -05001073 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001074 ib_destroy_qp(newxprt->sc_qp);
Tom Tucker377f9b22007-12-12 16:13:21 -06001075 rdma_destroy_id(newxprt->sc_cm_id);
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001076 /* This call to put will destroy the transport */
1077 svc_xprt_put(&newxprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001078 return NULL;
1079}
1080
Tom Tucker377f9b22007-12-12 16:13:21 -06001081static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1082{
Tom Tucker377f9b22007-12-12 16:13:21 -06001083}
1084
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001085/*
Tom Tucker17113862008-05-01 11:13:50 -05001086 * When connected, an svc_xprt has at least two references:
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001087 *
1088 * - A reference held by the cm_id between the ESTABLISHED and
1089 * DISCONNECTED events. If the remote peer disconnected first, this
1090 * reference could be gone.
1091 *
1092 * - A reference held by the svc_recv code that called this function
1093 * as part of close processing.
1094 *
Tom Tucker17113862008-05-01 11:13:50 -05001095 * At a minimum one references should still be held.
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001096 */
Tom Tucker377f9b22007-12-12 16:13:21 -06001097static void svc_rdma_detach(struct svc_xprt *xprt)
1098{
1099 struct svcxprt_rdma *rdma =
1100 container_of(xprt, struct svcxprt_rdma, sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001101 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001102
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001103 /* Disconnect and flush posted WQE */
1104 rdma_disconnect(rdma->sc_cm_id);
Tom Tucker377f9b22007-12-12 16:13:21 -06001105}
1106
Tom Tucker8da91ea2008-04-30 22:00:46 -05001107static void __svc_rdma_free(struct work_struct *work)
Tom Tucker377f9b22007-12-12 16:13:21 -06001108{
Tom Tucker8da91ea2008-04-30 22:00:46 -05001109 struct svcxprt_rdma *rdma =
1110 container_of(work, struct svcxprt_rdma, sc_work);
Tom Tucker377f9b22007-12-12 16:13:21 -06001111 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
Tom Tucker8da91ea2008-04-30 22:00:46 -05001112
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001113 /* We should only be called from kref_put */
Chuck Lever3fe04ee2015-01-13 11:03:03 -05001114 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1115 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1116 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
Tom Tucker8da91ea2008-04-30 22:00:46 -05001117
Tom Tucker356d0a12008-05-01 11:25:02 -05001118 /*
1119 * Destroy queued, but not processed read completions. Note
1120 * that this cleanup has to be done before destroying the
1121 * cm_id because the device ptr is needed to unmap the dma in
1122 * svc_rdma_put_context.
1123 */
Tom Tucker356d0a12008-05-01 11:25:02 -05001124 while (!list_empty(&rdma->sc_read_complete_q)) {
1125 struct svc_rdma_op_ctxt *ctxt;
1126 ctxt = list_entry(rdma->sc_read_complete_q.next,
1127 struct svc_rdma_op_ctxt,
1128 dto_q);
1129 list_del_init(&ctxt->dto_q);
1130 svc_rdma_put_context(ctxt, 1);
1131 }
Tom Tucker356d0a12008-05-01 11:25:02 -05001132
1133 /* Destroy queued, but not processed recv completions */
Tom Tucker356d0a12008-05-01 11:25:02 -05001134 while (!list_empty(&rdma->sc_rq_dto_q)) {
1135 struct svc_rdma_op_ctxt *ctxt;
1136 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1137 struct svc_rdma_op_ctxt,
1138 dto_q);
1139 list_del_init(&ctxt->dto_q);
1140 svc_rdma_put_context(ctxt, 1);
1141 }
Tom Tucker356d0a12008-05-01 11:25:02 -05001142
1143 /* Warn if we leaked a resource or under-referenced */
Chuck Lever3fe04ee2015-01-13 11:03:03 -05001144 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1145 pr_err("svcrdma: ctxt still in use? (%d)\n",
1146 atomic_read(&rdma->sc_ctxt_used));
1147 if (atomic_read(&rdma->sc_dma_used) != 0)
1148 pr_err("svcrdma: dma still in use? (%d)\n",
1149 atomic_read(&rdma->sc_dma_used));
Tom Tucker356d0a12008-05-01 11:25:02 -05001150
Tom Tucker64be86082008-10-06 14:45:18 -05001151 /* De-allocate fastreg mr */
1152 rdma_dealloc_frmr_q(rdma);
1153
Tom Tucker17113862008-05-01 11:13:50 -05001154 /* Destroy the QP if present (not a listener) */
1155 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1156 ib_destroy_qp(rdma->sc_qp);
1157
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001158 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1159 ib_destroy_cq(rdma->sc_sq_cq);
1160
1161 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1162 ib_destroy_cq(rdma->sc_rq_cq);
1163
1164 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1165 ib_dereg_mr(rdma->sc_phys_mr);
1166
1167 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1168 ib_dealloc_pd(rdma->sc_pd);
1169
Tom Tucker356d0a12008-05-01 11:25:02 -05001170 /* Destroy the CM ID */
1171 rdma_destroy_id(rdma->sc_cm_id);
1172
Tom Tucker377f9b22007-12-12 16:13:21 -06001173 kfree(rdma);
1174}
1175
Tom Tucker8da91ea2008-04-30 22:00:46 -05001176static void svc_rdma_free(struct svc_xprt *xprt)
1177{
1178 struct svcxprt_rdma *rdma =
1179 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1180 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
Tejun Heoa25e7582010-10-15 17:49:27 +02001181 queue_work(svc_rdma_wq, &rdma->sc_work);
Tom Tucker8da91ea2008-04-30 22:00:46 -05001182}
1183
Tom Tucker377f9b22007-12-12 16:13:21 -06001184static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1185{
1186 struct svcxprt_rdma *rdma =
1187 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1188
1189 /*
Steve Wise0bf48282014-05-28 15:12:01 -05001190 * If there are already waiters on the SQ,
Tom Tucker377f9b22007-12-12 16:13:21 -06001191 * return false.
1192 */
1193 if (waitqueue_active(&rdma->sc_send_wait))
1194 return 0;
1195
1196 /* Otherwise return true. */
1197 return 1;
1198}
1199
Chuck Lever16e4d932014-05-19 13:40:22 -04001200static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1201{
1202 return 1;
1203}
1204
Tom Tuckere1183212008-10-03 15:22:18 -05001205/*
1206 * Attempt to register the kvec representing the RPC memory with the
1207 * device.
1208 *
1209 * Returns:
1210 * NULL : The device does not support fastreg or there were no more
1211 * fastreg mr.
1212 * frmr : The kvec register request was successfully posted.
1213 * <0 : An error was encountered attempting to register the kvec.
1214 */
1215int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1216 struct svc_rdma_fastreg_mr *frmr)
1217{
1218 struct ib_send_wr fastreg_wr;
1219 u8 key;
1220
1221 /* Bump the key */
1222 key = (u8)(frmr->mr->lkey & 0x000000FF);
1223 ib_update_fast_reg_key(frmr->mr, ++key);
1224
1225 /* Prepare FASTREG WR */
1226 memset(&fastreg_wr, 0, sizeof fastreg_wr);
1227 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1228 fastreg_wr.send_flags = IB_SEND_SIGNALED;
1229 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1230 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1231 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1232 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1233 fastreg_wr.wr.fast_reg.length = frmr->map_len;
1234 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1235 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1236 return svc_rdma_send(xprt, &fastreg_wr);
1237}
1238
Tom Tucker377f9b22007-12-12 16:13:21 -06001239int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1240{
Tom Tucker5b180a92008-08-11 14:10:19 -05001241 struct ib_send_wr *bad_wr, *n_wr;
1242 int wr_count;
1243 int i;
Tom Tucker377f9b22007-12-12 16:13:21 -06001244 int ret;
1245
1246 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
Tom Tucker9d6347a2008-04-25 15:51:27 -05001247 return -ENOTCONN;
Tom Tucker377f9b22007-12-12 16:13:21 -06001248
Tom Tucker5b180a92008-08-11 14:10:19 -05001249 wr_count = 1;
1250 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1251 wr_count++;
1252
Tom Tucker377f9b22007-12-12 16:13:21 -06001253 /* If the SQ is full, wait until an SQ entry is available */
1254 while (1) {
1255 spin_lock_bh(&xprt->sc_lock);
Tom Tucker5b180a92008-08-11 14:10:19 -05001256 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
Tom Tucker377f9b22007-12-12 16:13:21 -06001257 spin_unlock_bh(&xprt->sc_lock);
1258 atomic_inc(&rdma_stat_sq_starve);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -05001259
1260 /* See if we can opportunistically reap SQ WR to make room */
Tom Tucker377f9b22007-12-12 16:13:21 -06001261 sq_cq_reap(xprt);
1262
1263 /* Wait until SQ WR available if SQ still full */
1264 wait_event(xprt->sc_send_wait,
1265 atomic_read(&xprt->sc_sq_count) <
1266 xprt->sc_sq_depth);
Tom Tucker830bb592008-03-11 12:44:27 -05001267 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
Tom Tuckerb432e6b2010-10-12 15:33:52 -05001268 return -ENOTCONN;
Tom Tucker377f9b22007-12-12 16:13:21 -06001269 continue;
1270 }
Tom Tucker5b180a92008-08-11 14:10:19 -05001271 /* Take a transport ref for each WR posted */
1272 for (i = 0; i < wr_count; i++)
1273 svc_xprt_get(&xprt->sc_xprt);
1274
1275 /* Bump used SQ WR count and post */
1276 atomic_add(wr_count, &xprt->sc_sq_count);
Tom Tucker377f9b22007-12-12 16:13:21 -06001277 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
Tom Tucker5b180a92008-08-11 14:10:19 -05001278 if (ret) {
1279 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1280 atomic_sub(wr_count, &xprt->sc_sq_count);
1281 for (i = 0; i < wr_count; i ++)
1282 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001283 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1284 "sc_sq_count=%d, sc_sq_depth=%d\n",
1285 ret, atomic_read(&xprt->sc_sq_count),
1286 xprt->sc_sq_depth);
Tom Tucker0905c0f2008-05-01 10:49:03 -05001287 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001288 spin_unlock_bh(&xprt->sc_lock);
Tom Tucker5b180a92008-08-11 14:10:19 -05001289 if (ret)
1290 wake_up(&xprt->sc_send_wait);
Tom Tucker377f9b22007-12-12 16:13:21 -06001291 break;
1292 }
1293 return ret;
1294}
1295
Tom Tucker008fdbc2008-05-07 15:47:42 -05001296void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1297 enum rpcrdma_errcode err)
Tom Tucker377f9b22007-12-12 16:13:21 -06001298{
1299 struct ib_send_wr err_wr;
Tom Tucker377f9b22007-12-12 16:13:21 -06001300 struct page *p;
1301 struct svc_rdma_op_ctxt *ctxt;
Chuck Lever30b7e242015-06-04 11:21:10 -04001302 __be32 *va;
Tom Tucker377f9b22007-12-12 16:13:21 -06001303 int length;
1304 int ret;
1305
Chuck Leverb7e0b9a2015-06-04 11:21:20 -04001306 p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
Tom Tucker377f9b22007-12-12 16:13:21 -06001307 va = page_address(p);
1308
1309 /* XDR encode error */
1310 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1311
Tom Tucker4a843862010-10-12 15:33:57 -05001312 ctxt = svc_rdma_get_context(xprt);
1313 ctxt->direction = DMA_FROM_DEVICE;
1314 ctxt->count = 1;
1315 ctxt->pages[0] = p;
1316
Tom Tucker377f9b22007-12-12 16:13:21 -06001317 /* Prepare SGE for local address */
Tom Tucker4a843862010-10-12 15:33:57 -05001318 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1319 p, 0, length, DMA_FROM_DEVICE);
1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
Tom Tucker04911b52008-08-11 15:14:53 -05001321 put_page(p);
Jesper Juhla5e50262011-01-22 21:40:20 +00001322 svc_rdma_put_context(ctxt, 1);
Tom Tucker04911b52008-08-11 15:14:53 -05001323 return;
1324 }
1325 atomic_inc(&xprt->sc_dma_used);
Tom Tucker4a843862010-10-12 15:33:57 -05001326 ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1327 ctxt->sge[0].length = length;
Tom Tucker377f9b22007-12-12 16:13:21 -06001328
1329 /* Prepare SEND WR */
1330 memset(&err_wr, 0, sizeof err_wr);
1331 ctxt->wr_op = IB_WR_SEND;
1332 err_wr.wr_id = (unsigned long)ctxt;
Tom Tucker4a843862010-10-12 15:33:57 -05001333 err_wr.sg_list = ctxt->sge;
Tom Tucker377f9b22007-12-12 16:13:21 -06001334 err_wr.num_sge = 1;
1335 err_wr.opcode = IB_WR_SEND;
1336 err_wr.send_flags = IB_SEND_SIGNALED;
1337
1338 /* Post It */
1339 ret = svc_rdma_send(xprt, &err_wr);
1340 if (ret) {
Tom Tucker008fdbc2008-05-07 15:47:42 -05001341 dprintk("svcrdma: Error %d posting send for protocol error\n",
1342 ret);
Tom Tucker4a843862010-10-12 15:33:57 -05001343 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001344 svc_rdma_put_context(ctxt, 1);
1345 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001346}