blob: fb0dff5e53ea881b57980b1aaa32abb4c1e0e5f3 [file] [log] [blame]
Tom Tucker377f9b22007-12-12 16:13:21 -06001/*
2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#include <linux/sunrpc/svc_xprt.h>
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h>
48#include <linux/sunrpc/svc_rdma.h>
49
50#define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
53 struct sockaddr *sa, int salen,
54 int flags);
55static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
56static void svc_rdma_release_rqst(struct svc_rqst *);
Tom Tucker377f9b22007-12-12 16:13:21 -060057static void dto_tasklet_func(unsigned long data);
58static void svc_rdma_detach(struct svc_xprt *xprt);
59static void svc_rdma_free(struct svc_xprt *xprt);
60static int svc_rdma_has_wspace(struct svc_xprt *xprt);
61static void rq_cq_reap(struct svcxprt_rdma *xprt);
62static void sq_cq_reap(struct svcxprt_rdma *xprt);
63
64DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
65static DEFINE_SPINLOCK(dto_lock);
66static LIST_HEAD(dto_xprt_q);
67
68static struct svc_xprt_ops svc_rdma_ops = {
69 .xpo_create = svc_rdma_create,
70 .xpo_recvfrom = svc_rdma_recvfrom,
71 .xpo_sendto = svc_rdma_sendto,
72 .xpo_release_rqst = svc_rdma_release_rqst,
73 .xpo_detach = svc_rdma_detach,
74 .xpo_free = svc_rdma_free,
75 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
76 .xpo_has_wspace = svc_rdma_has_wspace,
77 .xpo_accept = svc_rdma_accept,
78};
79
80struct svc_xprt_class svc_rdma_class = {
81 .xcl_name = "rdma",
82 .xcl_owner = THIS_MODULE,
83 .xcl_ops = &svc_rdma_ops,
84 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
85};
86
Tom Tucker89488962008-05-28 15:14:02 -050087/* WR context cache. Created in svc_rdma.c */
88extern struct kmem_cache *svc_rdma_ctxt_cachep;
Tom Tucker377f9b22007-12-12 16:13:21 -060089
90struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
91{
92 struct svc_rdma_op_ctxt *ctxt;
93
94 while (1) {
Tom Tucker89488962008-05-28 15:14:02 -050095 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
96 if (ctxt)
97 break;
98 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
Tom Tucker377f9b22007-12-12 16:13:21 -060099 }
Tom Tucker89488962008-05-28 15:14:02 -0500100 ctxt->xprt = xprt;
101 INIT_LIST_HEAD(&ctxt->dto_q);
102 ctxt->count = 0;
Tom Tucker64be86082008-10-06 14:45:18 -0500103 ctxt->frmr = NULL;
Tom Tucker89488962008-05-28 15:14:02 -0500104 atomic_inc(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600105 return ctxt;
106}
107
Tom Tucker146b6df2008-08-12 15:12:10 -0500108void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
Tom Tuckere6ab9142008-05-28 12:08:48 -0500109{
110 struct svcxprt_rdma *xprt = ctxt->xprt;
111 int i;
112 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
Tom Tucker64be86082008-10-06 14:45:18 -0500113 /*
114 * Unmap the DMA addr in the SGE if the lkey matches
115 * the sc_dma_lkey, otherwise, ignore it since it is
116 * an FRMR lkey and will be unmapped later when the
117 * last WR that uses it completes.
118 */
119 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
120 atomic_dec(&xprt->sc_dma_used);
121 ib_dma_unmap_single(xprt->sc_cm_id->device,
122 ctxt->sge[i].addr,
123 ctxt->sge[i].length,
124 ctxt->direction);
125 }
Tom Tuckere6ab9142008-05-28 12:08:48 -0500126 }
127}
128
Tom Tucker377f9b22007-12-12 16:13:21 -0600129void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
130{
131 struct svcxprt_rdma *xprt;
132 int i;
133
134 BUG_ON(!ctxt);
135 xprt = ctxt->xprt;
136 if (free_pages)
137 for (i = 0; i < ctxt->count; i++)
138 put_page(ctxt->pages[i]);
139
Tom Tucker89488962008-05-28 15:14:02 -0500140 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
Tom Tucker87407672008-04-30 20:44:39 -0500141 atomic_dec(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600142}
143
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500144/* Temporary NFS request map cache. Created in svc_rdma.c */
145extern struct kmem_cache *svc_rdma_map_cachep;
146
147/*
148 * Temporary NFS req mappings are shared across all transport
149 * instances. These are short lived and should be bounded by the number
150 * of concurrent server threads * depth of the SQ.
151 */
152struct svc_rdma_req_map *svc_rdma_get_req_map(void)
153{
154 struct svc_rdma_req_map *map;
155 while (1) {
156 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
157 if (map)
158 break;
159 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
160 }
161 map->count = 0;
Tom Tucker64be86082008-10-06 14:45:18 -0500162 map->frmr = NULL;
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500163 return map;
164}
165
166void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
167{
168 kmem_cache_free(svc_rdma_map_cachep, map);
169}
170
Tom Tucker377f9b22007-12-12 16:13:21 -0600171/* ib_cq event handler */
172static void cq_event_handler(struct ib_event *event, void *context)
173{
174 struct svc_xprt *xprt = context;
175 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
176 event->event, context);
177 set_bit(XPT_CLOSE, &xprt->xpt_flags);
178}
179
180/* QP event handler */
181static void qp_event_handler(struct ib_event *event, void *context)
182{
183 struct svc_xprt *xprt = context;
184
185 switch (event->event) {
186 /* These are considered benign events */
187 case IB_EVENT_PATH_MIG:
188 case IB_EVENT_COMM_EST:
189 case IB_EVENT_SQ_DRAINED:
190 case IB_EVENT_QP_LAST_WQE_REACHED:
191 dprintk("svcrdma: QP event %d received for QP=%p\n",
192 event->event, event->element.qp);
193 break;
194 /* These are considered fatal events */
195 case IB_EVENT_PATH_MIG_ERR:
196 case IB_EVENT_QP_FATAL:
197 case IB_EVENT_QP_REQ_ERR:
198 case IB_EVENT_QP_ACCESS_ERR:
199 case IB_EVENT_DEVICE_FATAL:
200 default:
201 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
202 "closing transport\n",
203 event->event, event->element.qp);
204 set_bit(XPT_CLOSE, &xprt->xpt_flags);
205 break;
206 }
207}
208
209/*
210 * Data Transfer Operation Tasklet
211 *
212 * Walks a list of transports with I/O pending, removing entries as
213 * they are added to the server's I/O pending list. Two bits indicate
214 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
215 * spinlock that serializes access to the transport list with the RQ
216 * and SQ interrupt handlers.
217 */
218static void dto_tasklet_func(unsigned long data)
219{
220 struct svcxprt_rdma *xprt;
221 unsigned long flags;
222
223 spin_lock_irqsave(&dto_lock, flags);
224 while (!list_empty(&dto_xprt_q)) {
225 xprt = list_entry(dto_xprt_q.next,
226 struct svcxprt_rdma, sc_dto_q);
227 list_del_init(&xprt->sc_dto_q);
228 spin_unlock_irqrestore(&dto_lock, flags);
229
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500230 rq_cq_reap(xprt);
231 sq_cq_reap(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600232
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400233 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600234 spin_lock_irqsave(&dto_lock, flags);
235 }
236 spin_unlock_irqrestore(&dto_lock, flags);
237}
238
239/*
240 * Receive Queue Completion Handler
241 *
242 * Since an RQ completion handler is called on interrupt context, we
243 * need to defer the handling of the I/O to a tasklet
244 */
245static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
246{
247 struct svcxprt_rdma *xprt = cq_context;
248 unsigned long flags;
249
Tom Tucker17113862008-05-01 11:13:50 -0500250 /* Guard against unconditional flush call for destroyed QP */
251 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
252 return;
253
Tom Tucker377f9b22007-12-12 16:13:21 -0600254 /*
255 * Set the bit regardless of whether or not it's on the list
256 * because it may be on the list already due to an SQ
257 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500258 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600259 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
260
261 /*
262 * If this transport is not already on the DTO transport queue,
263 * add it
264 */
265 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400266 if (list_empty(&xprt->sc_dto_q)) {
267 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600268 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400269 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600270 spin_unlock_irqrestore(&dto_lock, flags);
271
272 /* Tasklet does all the work to avoid irqsave locks. */
273 tasklet_schedule(&dto_tasklet);
274}
275
276/*
277 * rq_cq_reap - Process the RQ CQ.
278 *
279 * Take all completing WC off the CQE and enqueue the associated DTO
280 * context on the dto_q for the transport.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500281 *
282 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600283 */
284static void rq_cq_reap(struct svcxprt_rdma *xprt)
285{
286 int ret;
287 struct ib_wc wc;
288 struct svc_rdma_op_ctxt *ctxt = NULL;
289
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500290 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
291 return;
292
293 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600294 atomic_inc(&rdma_stat_rq_poll);
295
Tom Tucker377f9b22007-12-12 16:13:21 -0600296 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
297 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
298 ctxt->wc_status = wc.status;
299 ctxt->byte_len = wc.byte_len;
Tom Tuckere6ab9142008-05-28 12:08:48 -0500300 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600301 if (wc.status != IB_WC_SUCCESS) {
302 /* Close the transport */
Tom Tucker0905c0f2008-05-01 10:49:03 -0500303 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600304 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
305 svc_rdma_put_context(ctxt, 1);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500306 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600307 continue;
308 }
Tom Tucker47698e02008-05-06 11:49:05 -0500309 spin_lock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600310 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
Tom Tucker47698e02008-05-06 11:49:05 -0500311 spin_unlock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500312 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600313 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600314
315 if (ctxt)
316 atomic_inc(&rdma_stat_rq_prod);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500317
318 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
319 /*
320 * If data arrived before established event,
321 * don't enqueue. This defers RPC I/O until the
322 * RDMA connection is complete.
323 */
324 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
325 svc_xprt_enqueue(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600326}
327
328/*
Tom Tuckere1183212008-10-03 15:22:18 -0500329 * Processs a completion context
330 */
331static void process_context(struct svcxprt_rdma *xprt,
332 struct svc_rdma_op_ctxt *ctxt)
333{
334 svc_rdma_unmap_dma(ctxt);
335
336 switch (ctxt->wr_op) {
337 case IB_WR_SEND:
338 svc_rdma_put_context(ctxt, 1);
339 break;
340
341 case IB_WR_RDMA_WRITE:
342 svc_rdma_put_context(ctxt, 0);
343 break;
344
345 case IB_WR_RDMA_READ:
Tom Tucker146b6df2008-08-12 15:12:10 -0500346 case IB_WR_RDMA_READ_WITH_INV:
Tom Tuckere1183212008-10-03 15:22:18 -0500347 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
348 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
349 BUG_ON(!read_hdr);
Tom Tucker146b6df2008-08-12 15:12:10 -0500350 if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
351 svc_rdma_put_frmr(xprt, ctxt->frmr);
Tom Tuckere1183212008-10-03 15:22:18 -0500352 spin_lock_bh(&xprt->sc_rq_dto_lock);
353 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
354 list_add_tail(&read_hdr->dto_q,
355 &xprt->sc_read_complete_q);
356 spin_unlock_bh(&xprt->sc_rq_dto_lock);
357 svc_xprt_enqueue(&xprt->sc_xprt);
358 }
359 svc_rdma_put_context(ctxt, 0);
360 break;
361
362 default:
363 printk(KERN_ERR "svcrdma: unexpected completion type, "
364 "opcode=%d\n",
365 ctxt->wr_op);
366 break;
367 }
368}
369
370/*
Tom Tucker377f9b22007-12-12 16:13:21 -0600371 * Send Queue Completion Handler - potentially called on interrupt context.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500372 *
373 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600374 */
375static void sq_cq_reap(struct svcxprt_rdma *xprt)
376{
377 struct svc_rdma_op_ctxt *ctxt = NULL;
378 struct ib_wc wc;
379 struct ib_cq *cq = xprt->sc_sq_cq;
380 int ret;
381
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500382 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
383 return;
384
385 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600386 atomic_inc(&rdma_stat_sq_poll);
387 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600388 if (wc.status != IB_WC_SUCCESS)
389 /* Close the transport */
390 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
391
392 /* Decrement used SQ WR count */
393 atomic_dec(&xprt->sc_sq_count);
394 wake_up(&xprt->sc_send_wait);
395
Tom Tuckere1183212008-10-03 15:22:18 -0500396 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
397 if (ctxt)
398 process_context(xprt, ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600399
Tom Tucker0905c0f2008-05-01 10:49:03 -0500400 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600401 }
402
403 if (ctxt)
404 atomic_inc(&rdma_stat_sq_prod);
405}
406
407static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
408{
409 struct svcxprt_rdma *xprt = cq_context;
410 unsigned long flags;
411
Tom Tucker17113862008-05-01 11:13:50 -0500412 /* Guard against unconditional flush call for destroyed QP */
413 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
414 return;
415
Tom Tucker377f9b22007-12-12 16:13:21 -0600416 /*
417 * Set the bit regardless of whether or not it's on the list
418 * because it may be on the list already due to an RQ
419 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500420 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600421 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
422
423 /*
424 * If this transport is not already on the DTO transport queue,
425 * add it
426 */
427 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400428 if (list_empty(&xprt->sc_dto_q)) {
429 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600430 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400431 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600432 spin_unlock_irqrestore(&dto_lock, flags);
433
434 /* Tasklet does all the work to avoid irqsave locks. */
435 tasklet_schedule(&dto_tasklet);
436}
437
Tom Tucker377f9b22007-12-12 16:13:21 -0600438static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
439 int listener)
440{
441 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
442
443 if (!cma_xprt)
444 return NULL;
445 svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
446 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
447 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
448 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
449 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
Tom Tucker64be86082008-10-06 14:45:18 -0500450 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
Tom Tucker377f9b22007-12-12 16:13:21 -0600451 init_waitqueue_head(&cma_xprt->sc_send_wait);
452
453 spin_lock_init(&cma_xprt->sc_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600454 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
Tom Tucker64be86082008-10-06 14:45:18 -0500455 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600456
457 cma_xprt->sc_ord = svcrdma_ord;
458
459 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
460 cma_xprt->sc_max_requests = svcrdma_max_requests;
461 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
462 atomic_set(&cma_xprt->sc_sq_count, 0);
Tom Tucker87295b62008-05-28 13:17:44 -0500463 atomic_set(&cma_xprt->sc_ctxt_used, 0);
Tom Tucker377f9b22007-12-12 16:13:21 -0600464
Tom Tucker89488962008-05-28 15:14:02 -0500465 if (listener)
Tom Tucker377f9b22007-12-12 16:13:21 -0600466 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
467
468 return cma_xprt;
469}
470
471struct page *svc_rdma_get_page(void)
472{
473 struct page *page;
474
475 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
476 /* If we can't get memory, wait a bit and try again */
477 printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
478 "jiffies.\n");
479 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
480 }
481 return page;
482}
483
484int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
485{
486 struct ib_recv_wr recv_wr, *bad_recv_wr;
487 struct svc_rdma_op_ctxt *ctxt;
488 struct page *page;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500489 dma_addr_t pa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600490 int sge_no;
491 int buflen;
492 int ret;
493
494 ctxt = svc_rdma_get_context(xprt);
495 buflen = 0;
496 ctxt->direction = DMA_FROM_DEVICE;
497 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
498 BUG_ON(sge_no >= xprt->sc_max_sge);
499 page = svc_rdma_get_page();
500 ctxt->pages[sge_no] = page;
501 pa = ib_dma_map_page(xprt->sc_cm_id->device,
502 page, 0, PAGE_SIZE,
503 DMA_FROM_DEVICE);
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500504 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
505 goto err_put_ctxt;
506 atomic_inc(&xprt->sc_dma_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600507 ctxt->sge[sge_no].addr = pa;
508 ctxt->sge[sge_no].length = PAGE_SIZE;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500509 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
Tom Tucker377f9b22007-12-12 16:13:21 -0600510 buflen += PAGE_SIZE;
511 }
512 ctxt->count = sge_no;
513 recv_wr.next = NULL;
514 recv_wr.sg_list = &ctxt->sge[0];
515 recv_wr.num_sge = ctxt->count;
516 recv_wr.wr_id = (u64)(unsigned long)ctxt;
517
Tom Tucker0905c0f2008-05-01 10:49:03 -0500518 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600519 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500520 if (ret) {
521 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker05a08262008-04-25 14:11:31 -0500522 svc_rdma_put_context(ctxt, 1);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500523 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600524 return ret;
Tom Tuckera5abf4e2008-09-30 14:05:41 -0500525
526 err_put_ctxt:
527 svc_rdma_put_context(ctxt, 1);
528 return -ENOMEM;
Tom Tucker377f9b22007-12-12 16:13:21 -0600529}
530
531/*
532 * This function handles the CONNECT_REQUEST event on a listening
533 * endpoint. It is passed the cma_id for the _new_ connection. The context in
534 * this cma_id is inherited from the listening cma_id and is the svc_xprt
535 * structure for the listening endpoint.
536 *
537 * This function creates a new xprt for the new connection and enqueues it on
538 * the accept queue for the listent xprt. When the listen thread is kicked, it
539 * will call the recvfrom method on the listen xprt which will accept the new
540 * connection.
541 */
Tom Tucker36ef25e2008-05-19 19:00:24 -0500542static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
Tom Tucker377f9b22007-12-12 16:13:21 -0600543{
544 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
545 struct svcxprt_rdma *newxprt;
Tom Tuckeraf261af2008-05-07 13:52:42 -0500546 struct sockaddr *sa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600547
548 /* Create a new transport */
549 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
550 if (!newxprt) {
551 dprintk("svcrdma: failed to create new transport\n");
552 return;
553 }
554 newxprt->sc_cm_id = new_cma_id;
555 new_cma_id->context = newxprt;
556 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
557 newxprt, newxprt->sc_cm_id, listen_xprt);
558
Tom Tucker36ef25e2008-05-19 19:00:24 -0500559 /* Save client advertised inbound read limit for use later in accept. */
560 newxprt->sc_ord = client_ird;
561
Tom Tuckeraf261af2008-05-07 13:52:42 -0500562 /* Set the local and remote addresses in the transport */
563 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
564 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
565 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
566 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
567
Tom Tucker377f9b22007-12-12 16:13:21 -0600568 /*
569 * Enqueue the new transport on the accept queue of the listening
570 * transport
571 */
572 spin_lock_bh(&listen_xprt->sc_lock);
573 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
574 spin_unlock_bh(&listen_xprt->sc_lock);
575
576 /*
577 * Can't use svc_xprt_received here because we are not on a
578 * rqstp thread
579 */
580 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
581 svc_xprt_enqueue(&listen_xprt->sc_xprt);
582}
583
584/*
585 * Handles events generated on the listening endpoint. These events will be
586 * either be incoming connect requests or adapter removal events.
587 */
588static int rdma_listen_handler(struct rdma_cm_id *cma_id,
589 struct rdma_cm_event *event)
590{
591 struct svcxprt_rdma *xprt = cma_id->context;
592 int ret = 0;
593
594 switch (event->event) {
595 case RDMA_CM_EVENT_CONNECT_REQUEST:
596 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
597 "event=%d\n", cma_id, cma_id->context, event->event);
Tom Tucker36ef25e2008-05-19 19:00:24 -0500598 handle_connect_req(cma_id,
599 event->param.conn.responder_resources);
Tom Tucker377f9b22007-12-12 16:13:21 -0600600 break;
601
602 case RDMA_CM_EVENT_ESTABLISHED:
603 /* Accept complete */
604 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
605 "cm_id=%p\n", xprt, cma_id);
606 break;
607
608 case RDMA_CM_EVENT_DEVICE_REMOVAL:
609 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
610 xprt, cma_id);
611 if (xprt)
612 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
613 break;
614
615 default:
616 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
617 "event=%d\n", cma_id, event->event);
618 break;
619 }
620
621 return ret;
622}
623
624static int rdma_cma_handler(struct rdma_cm_id *cma_id,
625 struct rdma_cm_event *event)
626{
627 struct svc_xprt *xprt = cma_id->context;
628 struct svcxprt_rdma *rdma =
629 container_of(xprt, struct svcxprt_rdma, sc_xprt);
630 switch (event->event) {
631 case RDMA_CM_EVENT_ESTABLISHED:
632 /* Accept complete */
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400633 svc_xprt_get(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600634 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
635 "cm_id=%p\n", xprt, cma_id);
636 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
637 svc_xprt_enqueue(xprt);
638 break;
639 case RDMA_CM_EVENT_DISCONNECTED:
640 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
641 xprt, cma_id);
642 if (xprt) {
643 set_bit(XPT_CLOSE, &xprt->xpt_flags);
644 svc_xprt_enqueue(xprt);
Tom Tucker120693d2008-04-24 14:17:21 -0500645 svc_xprt_put(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600646 }
647 break;
648 case RDMA_CM_EVENT_DEVICE_REMOVAL:
649 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
650 "event=%d\n", cma_id, xprt, event->event);
651 if (xprt) {
652 set_bit(XPT_CLOSE, &xprt->xpt_flags);
653 svc_xprt_enqueue(xprt);
654 }
655 break;
656 default:
657 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
658 "event=%d\n", cma_id, event->event);
659 break;
660 }
661 return 0;
662}
663
664/*
665 * Create a listening RDMA service endpoint.
666 */
667static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
668 struct sockaddr *sa, int salen,
669 int flags)
670{
671 struct rdma_cm_id *listen_id;
672 struct svcxprt_rdma *cma_xprt;
673 struct svc_xprt *xprt;
674 int ret;
675
676 dprintk("svcrdma: Creating RDMA socket\n");
677
678 cma_xprt = rdma_create_xprt(serv, 1);
679 if (!cma_xprt)
Tom Tucker58e8f622008-05-06 09:45:54 -0500680 return ERR_PTR(-ENOMEM);
Tom Tucker377f9b22007-12-12 16:13:21 -0600681 xprt = &cma_xprt->sc_xprt;
682
683 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
684 if (IS_ERR(listen_id)) {
Tom Tucker58e8f622008-05-06 09:45:54 -0500685 ret = PTR_ERR(listen_id);
686 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
687 goto err0;
Tom Tucker377f9b22007-12-12 16:13:21 -0600688 }
Tom Tucker58e8f622008-05-06 09:45:54 -0500689
Tom Tucker377f9b22007-12-12 16:13:21 -0600690 ret = rdma_bind_addr(listen_id, sa);
691 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600692 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500693 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600694 }
695 cma_xprt->sc_cm_id = listen_id;
696
697 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
698 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600699 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500700 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600701 }
702
703 /*
704 * We need to use the address from the cm_id in case the
705 * caller specified 0 for the port number.
706 */
707 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
708 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
709
710 return &cma_xprt->sc_xprt;
Tom Tucker58e8f622008-05-06 09:45:54 -0500711
712 err1:
713 rdma_destroy_id(listen_id);
714 err0:
715 kfree(cma_xprt);
716 return ERR_PTR(ret);
Tom Tucker377f9b22007-12-12 16:13:21 -0600717}
718
Tom Tucker64be86082008-10-06 14:45:18 -0500719static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
720{
721 struct ib_mr *mr;
722 struct ib_fast_reg_page_list *pl;
723 struct svc_rdma_fastreg_mr *frmr;
724
725 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
726 if (!frmr)
727 goto err;
728
729 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
730 if (!mr)
731 goto err_free_frmr;
732
733 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
734 RPCSVC_MAXPAGES);
735 if (!pl)
736 goto err_free_mr;
737
738 frmr->mr = mr;
739 frmr->page_list = pl;
740 INIT_LIST_HEAD(&frmr->frmr_list);
741 return frmr;
742
743 err_free_mr:
744 ib_dereg_mr(mr);
745 err_free_frmr:
746 kfree(frmr);
747 err:
748 return ERR_PTR(-ENOMEM);
749}
750
751static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
752{
753 struct svc_rdma_fastreg_mr *frmr;
754
755 while (!list_empty(&xprt->sc_frmr_q)) {
756 frmr = list_entry(xprt->sc_frmr_q.next,
757 struct svc_rdma_fastreg_mr, frmr_list);
758 list_del_init(&frmr->frmr_list);
759 ib_dereg_mr(frmr->mr);
760 ib_free_fast_reg_page_list(frmr->page_list);
761 kfree(frmr);
762 }
763}
764
765struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
766{
767 struct svc_rdma_fastreg_mr *frmr = NULL;
768
769 spin_lock_bh(&rdma->sc_frmr_q_lock);
770 if (!list_empty(&rdma->sc_frmr_q)) {
771 frmr = list_entry(rdma->sc_frmr_q.next,
772 struct svc_rdma_fastreg_mr, frmr_list);
773 list_del_init(&frmr->frmr_list);
774 frmr->map_len = 0;
775 frmr->page_list_len = 0;
776 }
777 spin_unlock_bh(&rdma->sc_frmr_q_lock);
778 if (frmr)
779 return frmr;
780
781 return rdma_alloc_frmr(rdma);
782}
783
784static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
785 struct svc_rdma_fastreg_mr *frmr)
786{
787 int page_no;
788 for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
789 dma_addr_t addr = frmr->page_list->page_list[page_no];
790 if (ib_dma_mapping_error(frmr->mr->device, addr))
791 continue;
792 atomic_dec(&xprt->sc_dma_used);
793 ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE,
794 frmr->direction);
795 }
796}
797
798void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
799 struct svc_rdma_fastreg_mr *frmr)
800{
801 if (frmr) {
802 frmr_unmap_dma(rdma, frmr);
803 spin_lock_bh(&rdma->sc_frmr_q_lock);
804 BUG_ON(!list_empty(&frmr->frmr_list));
805 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
806 spin_unlock_bh(&rdma->sc_frmr_q_lock);
807 }
808}
809
Tom Tucker377f9b22007-12-12 16:13:21 -0600810/*
811 * This is the xpo_recvfrom function for listening endpoints. Its
812 * purpose is to accept incoming connections. The CMA callback handler
813 * has already created a new transport and attached it to the new CMA
814 * ID.
815 *
816 * There is a queue of pending connections hung on the listening
817 * transport. This queue contains the new svc_xprt structure. This
818 * function takes svc_xprt structures off the accept_q and completes
819 * the connection.
820 */
821static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
822{
823 struct svcxprt_rdma *listen_rdma;
824 struct svcxprt_rdma *newxprt = NULL;
825 struct rdma_conn_param conn_param;
826 struct ib_qp_init_attr qp_attr;
827 struct ib_device_attr devattr;
Tom Tucker3a5c6382008-09-30 13:46:13 -0500828 int dma_mr_acc;
829 int need_dma_mr;
Tom Tucker377f9b22007-12-12 16:13:21 -0600830 int ret;
831 int i;
832
833 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
834 clear_bit(XPT_CONN, &xprt->xpt_flags);
835 /* Get the next entry off the accept list */
836 spin_lock_bh(&listen_rdma->sc_lock);
837 if (!list_empty(&listen_rdma->sc_accept_q)) {
838 newxprt = list_entry(listen_rdma->sc_accept_q.next,
839 struct svcxprt_rdma, sc_accept_q);
840 list_del_init(&newxprt->sc_accept_q);
841 }
842 if (!list_empty(&listen_rdma->sc_accept_q))
843 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
844 spin_unlock_bh(&listen_rdma->sc_lock);
845 if (!newxprt)
846 return NULL;
847
848 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
849 newxprt, newxprt->sc_cm_id);
850
851 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
852 if (ret) {
853 dprintk("svcrdma: could not query device attributes on "
854 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
855 goto errout;
856 }
857
858 /* Qualify the transport resource defaults with the
859 * capabilities of this particular device */
860 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
861 (size_t)RPCSVC_MAXPAGES);
862 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
863 (size_t)svcrdma_max_requests);
864 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
865
Tom Tucker36ef25e2008-05-19 19:00:24 -0500866 /*
867 * Limit ORD based on client limit, local device limit, and
868 * configured svcrdma limit.
869 */
870 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
871 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
Tom Tucker377f9b22007-12-12 16:13:21 -0600872
873 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
874 if (IS_ERR(newxprt->sc_pd)) {
875 dprintk("svcrdma: error creating PD for connect request\n");
876 goto errout;
877 }
878 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
879 sq_comp_handler,
880 cq_event_handler,
881 newxprt,
882 newxprt->sc_sq_depth,
883 0);
884 if (IS_ERR(newxprt->sc_sq_cq)) {
885 dprintk("svcrdma: error creating SQ CQ for connect request\n");
886 goto errout;
887 }
888 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
889 rq_comp_handler,
890 cq_event_handler,
891 newxprt,
892 newxprt->sc_max_requests,
893 0);
894 if (IS_ERR(newxprt->sc_rq_cq)) {
895 dprintk("svcrdma: error creating RQ CQ for connect request\n");
896 goto errout;
897 }
898
899 memset(&qp_attr, 0, sizeof qp_attr);
900 qp_attr.event_handler = qp_event_handler;
901 qp_attr.qp_context = &newxprt->sc_xprt;
902 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
903 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
904 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
905 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
906 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
907 qp_attr.qp_type = IB_QPT_RC;
908 qp_attr.send_cq = newxprt->sc_sq_cq;
909 qp_attr.recv_cq = newxprt->sc_rq_cq;
910 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
911 " cm_id->device=%p, sc_pd->device=%p\n"
912 " cap.max_send_wr = %d\n"
913 " cap.max_recv_wr = %d\n"
914 " cap.max_send_sge = %d\n"
915 " cap.max_recv_sge = %d\n",
916 newxprt->sc_cm_id, newxprt->sc_pd,
917 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
918 qp_attr.cap.max_send_wr,
919 qp_attr.cap.max_recv_wr,
920 qp_attr.cap.max_send_sge,
921 qp_attr.cap.max_recv_sge);
922
923 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
924 if (ret) {
925 /*
926 * XXX: This is a hack. We need a xx_request_qp interface
927 * that will adjust the qp_attr's with a best-effort
928 * number
929 */
930 qp_attr.cap.max_send_sge -= 2;
931 qp_attr.cap.max_recv_sge -= 2;
932 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
933 &qp_attr);
934 if (ret) {
935 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
936 goto errout;
937 }
938 newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
939 newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
940 newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
941 newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
942 }
943 newxprt->sc_qp = newxprt->sc_cm_id->qp;
944
Tom Tucker3a5c6382008-09-30 13:46:13 -0500945 /*
946 * Use the most secure set of MR resources based on the
947 * transport type and available memory management features in
948 * the device. Here's the table implemented below:
949 *
950 * Fast Global DMA Remote WR
951 * Reg LKEY MR Access
952 * Sup'd Sup'd Needed Needed
953 *
954 * IWARP N N Y Y
955 * N Y Y Y
956 * Y N Y N
957 * Y Y N -
958 *
959 * IB N N Y N
960 * N Y N -
961 * Y N Y N
962 * Y Y N -
963 *
964 * NB: iWARP requires remote write access for the data sink
965 * of an RDMA_READ. IB does not.
966 */
967 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
968 newxprt->sc_frmr_pg_list_len =
969 devattr.max_fast_reg_page_list_len;
970 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
971 }
972
973 /*
974 * Determine if a DMA MR is required and if so, what privs are required
975 */
976 switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
977 case RDMA_TRANSPORT_IWARP:
978 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
979 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
980 need_dma_mr = 1;
981 dma_mr_acc =
982 (IB_ACCESS_LOCAL_WRITE |
983 IB_ACCESS_REMOTE_WRITE);
984 } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
985 need_dma_mr = 1;
986 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
987 } else
988 need_dma_mr = 0;
989 break;
990 case RDMA_TRANSPORT_IB:
991 if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
992 need_dma_mr = 1;
993 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
994 } else
995 need_dma_mr = 0;
996 break;
997 default:
Tom Tucker377f9b22007-12-12 16:13:21 -0600998 goto errout;
999 }
1000
Tom Tucker3a5c6382008-09-30 13:46:13 -05001001 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1002 if (need_dma_mr) {
1003 /* Register all of physical memory */
1004 newxprt->sc_phys_mr =
1005 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1006 if (IS_ERR(newxprt->sc_phys_mr)) {
1007 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1008 ret);
1009 goto errout;
1010 }
1011 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1012 } else
1013 newxprt->sc_dma_lkey =
1014 newxprt->sc_cm_id->device->local_dma_lkey;
1015
Tom Tucker377f9b22007-12-12 16:13:21 -06001016 /* Post receive buffers */
1017 for (i = 0; i < newxprt->sc_max_requests; i++) {
1018 ret = svc_rdma_post_recv(newxprt);
1019 if (ret) {
1020 dprintk("svcrdma: failure posting receive buffers\n");
1021 goto errout;
1022 }
1023 }
1024
1025 /* Swap out the handler */
1026 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1027
Tom Tuckeraf261af2008-05-07 13:52:42 -05001028 /*
1029 * Arm the CQs for the SQ and RQ before accepting so we can't
1030 * miss the first message
1031 */
1032 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1033 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1034
Tom Tucker377f9b22007-12-12 16:13:21 -06001035 /* Accept Connection */
1036 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1037 memset(&conn_param, 0, sizeof conn_param);
1038 conn_param.responder_resources = 0;
1039 conn_param.initiator_depth = newxprt->sc_ord;
1040 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1041 if (ret) {
1042 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1043 ret);
1044 goto errout;
1045 }
1046
1047 dprintk("svcrdma: new connection %p accepted with the following "
1048 "attributes:\n"
1049 " local_ip : %d.%d.%d.%d\n"
1050 " local_port : %d\n"
1051 " remote_ip : %d.%d.%d.%d\n"
1052 " remote_port : %d\n"
1053 " max_sge : %d\n"
1054 " sq_depth : %d\n"
1055 " max_requests : %d\n"
1056 " ord : %d\n",
1057 newxprt,
1058 NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
1059 route.addr.src_addr)->sin_addr.s_addr),
1060 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1061 route.addr.src_addr)->sin_port),
1062 NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
1063 route.addr.dst_addr)->sin_addr.s_addr),
1064 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1065 route.addr.dst_addr)->sin_port),
1066 newxprt->sc_max_sge,
1067 newxprt->sc_sq_depth,
1068 newxprt->sc_max_requests,
1069 newxprt->sc_ord);
1070
Tom Tucker377f9b22007-12-12 16:13:21 -06001071 return &newxprt->sc_xprt;
1072
1073 errout:
1074 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001075 /* Take a reference in case the DTO handler runs */
1076 svc_xprt_get(&newxprt->sc_xprt);
Tom Tucker17113862008-05-01 11:13:50 -05001077 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001078 ib_destroy_qp(newxprt->sc_qp);
Tom Tucker377f9b22007-12-12 16:13:21 -06001079 rdma_destroy_id(newxprt->sc_cm_id);
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001080 /* This call to put will destroy the transport */
1081 svc_xprt_put(&newxprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001082 return NULL;
1083}
1084
Tom Tucker377f9b22007-12-12 16:13:21 -06001085static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1086{
Tom Tucker377f9b22007-12-12 16:13:21 -06001087}
1088
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001089/*
Tom Tucker17113862008-05-01 11:13:50 -05001090 * When connected, an svc_xprt has at least two references:
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001091 *
1092 * - A reference held by the cm_id between the ESTABLISHED and
1093 * DISCONNECTED events. If the remote peer disconnected first, this
1094 * reference could be gone.
1095 *
1096 * - A reference held by the svc_recv code that called this function
1097 * as part of close processing.
1098 *
Tom Tucker17113862008-05-01 11:13:50 -05001099 * At a minimum one references should still be held.
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001100 */
Tom Tucker377f9b22007-12-12 16:13:21 -06001101static void svc_rdma_detach(struct svc_xprt *xprt)
1102{
1103 struct svcxprt_rdma *rdma =
1104 container_of(xprt, struct svcxprt_rdma, sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001105 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001106
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001107 /* Disconnect and flush posted WQE */
1108 rdma_disconnect(rdma->sc_cm_id);
Tom Tucker377f9b22007-12-12 16:13:21 -06001109}
1110
Tom Tucker8da91ea2008-04-30 22:00:46 -05001111static void __svc_rdma_free(struct work_struct *work)
Tom Tucker377f9b22007-12-12 16:13:21 -06001112{
Tom Tucker8da91ea2008-04-30 22:00:46 -05001113 struct svcxprt_rdma *rdma =
1114 container_of(work, struct svcxprt_rdma, sc_work);
Tom Tucker377f9b22007-12-12 16:13:21 -06001115 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
Tom Tucker8da91ea2008-04-30 22:00:46 -05001116
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001117 /* We should only be called from kref_put */
Tom Tucker8da91ea2008-04-30 22:00:46 -05001118 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
1119
Tom Tucker356d0a12008-05-01 11:25:02 -05001120 /*
1121 * Destroy queued, but not processed read completions. Note
1122 * that this cleanup has to be done before destroying the
1123 * cm_id because the device ptr is needed to unmap the dma in
1124 * svc_rdma_put_context.
1125 */
Tom Tucker356d0a12008-05-01 11:25:02 -05001126 while (!list_empty(&rdma->sc_read_complete_q)) {
1127 struct svc_rdma_op_ctxt *ctxt;
1128 ctxt = list_entry(rdma->sc_read_complete_q.next,
1129 struct svc_rdma_op_ctxt,
1130 dto_q);
1131 list_del_init(&ctxt->dto_q);
1132 svc_rdma_put_context(ctxt, 1);
1133 }
Tom Tucker356d0a12008-05-01 11:25:02 -05001134
1135 /* Destroy queued, but not processed recv completions */
Tom Tucker356d0a12008-05-01 11:25:02 -05001136 while (!list_empty(&rdma->sc_rq_dto_q)) {
1137 struct svc_rdma_op_ctxt *ctxt;
1138 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1139 struct svc_rdma_op_ctxt,
1140 dto_q);
1141 list_del_init(&ctxt->dto_q);
1142 svc_rdma_put_context(ctxt, 1);
1143 }
Tom Tucker356d0a12008-05-01 11:25:02 -05001144
1145 /* Warn if we leaked a resource or under-referenced */
1146 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
Tom Tucker87295b62008-05-28 13:17:44 -05001147 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
Tom Tucker356d0a12008-05-01 11:25:02 -05001148
Tom Tucker64be86082008-10-06 14:45:18 -05001149 /* De-allocate fastreg mr */
1150 rdma_dealloc_frmr_q(rdma);
1151
Tom Tucker17113862008-05-01 11:13:50 -05001152 /* Destroy the QP if present (not a listener) */
1153 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1154 ib_destroy_qp(rdma->sc_qp);
1155
Tom Tuckerc48cbb42008-03-11 14:31:39 -04001156 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1157 ib_destroy_cq(rdma->sc_sq_cq);
1158
1159 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1160 ib_destroy_cq(rdma->sc_rq_cq);
1161
1162 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1163 ib_dereg_mr(rdma->sc_phys_mr);
1164
1165 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1166 ib_dealloc_pd(rdma->sc_pd);
1167
Tom Tucker356d0a12008-05-01 11:25:02 -05001168 /* Destroy the CM ID */
1169 rdma_destroy_id(rdma->sc_cm_id);
1170
Tom Tucker377f9b22007-12-12 16:13:21 -06001171 kfree(rdma);
1172}
1173
Tom Tucker8da91ea2008-04-30 22:00:46 -05001174static void svc_rdma_free(struct svc_xprt *xprt)
1175{
1176 struct svcxprt_rdma *rdma =
1177 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1178 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1179 schedule_work(&rdma->sc_work);
1180}
1181
Tom Tucker377f9b22007-12-12 16:13:21 -06001182static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1183{
1184 struct svcxprt_rdma *rdma =
1185 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1186
1187 /*
1188 * If there are fewer SQ WR available than required to send a
1189 * simple response, return false.
1190 */
1191 if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1192 return 0;
1193
1194 /*
1195 * ...or there are already waiters on the SQ,
1196 * return false.
1197 */
1198 if (waitqueue_active(&rdma->sc_send_wait))
1199 return 0;
1200
1201 /* Otherwise return true. */
1202 return 1;
1203}
1204
Tom Tuckere1183212008-10-03 15:22:18 -05001205/*
1206 * Attempt to register the kvec representing the RPC memory with the
1207 * device.
1208 *
1209 * Returns:
1210 * NULL : The device does not support fastreg or there were no more
1211 * fastreg mr.
1212 * frmr : The kvec register request was successfully posted.
1213 * <0 : An error was encountered attempting to register the kvec.
1214 */
1215int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1216 struct svc_rdma_fastreg_mr *frmr)
1217{
1218 struct ib_send_wr fastreg_wr;
1219 u8 key;
1220
1221 /* Bump the key */
1222 key = (u8)(frmr->mr->lkey & 0x000000FF);
1223 ib_update_fast_reg_key(frmr->mr, ++key);
1224
1225 /* Prepare FASTREG WR */
1226 memset(&fastreg_wr, 0, sizeof fastreg_wr);
1227 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1228 fastreg_wr.send_flags = IB_SEND_SIGNALED;
1229 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1230 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1231 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1232 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1233 fastreg_wr.wr.fast_reg.length = frmr->map_len;
1234 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1235 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1236 return svc_rdma_send(xprt, &fastreg_wr);
1237}
1238
Tom Tucker377f9b22007-12-12 16:13:21 -06001239int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1240{
Tom Tucker5b180a92008-08-11 14:10:19 -05001241 struct ib_send_wr *bad_wr, *n_wr;
1242 int wr_count;
1243 int i;
Tom Tucker377f9b22007-12-12 16:13:21 -06001244 int ret;
1245
1246 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
Tom Tucker9d6347a2008-04-25 15:51:27 -05001247 return -ENOTCONN;
Tom Tucker377f9b22007-12-12 16:13:21 -06001248
1249 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
Tom Tucker5b180a92008-08-11 14:10:19 -05001250 wr_count = 1;
1251 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1252 wr_count++;
1253
Tom Tucker377f9b22007-12-12 16:13:21 -06001254 /* If the SQ is full, wait until an SQ entry is available */
1255 while (1) {
1256 spin_lock_bh(&xprt->sc_lock);
Tom Tucker5b180a92008-08-11 14:10:19 -05001257 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
Tom Tucker377f9b22007-12-12 16:13:21 -06001258 spin_unlock_bh(&xprt->sc_lock);
1259 atomic_inc(&rdma_stat_sq_starve);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -05001260
1261 /* See if we can opportunistically reap SQ WR to make room */
Tom Tucker377f9b22007-12-12 16:13:21 -06001262 sq_cq_reap(xprt);
1263
1264 /* Wait until SQ WR available if SQ still full */
1265 wait_event(xprt->sc_send_wait,
1266 atomic_read(&xprt->sc_sq_count) <
1267 xprt->sc_sq_depth);
Tom Tucker830bb592008-03-11 12:44:27 -05001268 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1269 return 0;
Tom Tucker377f9b22007-12-12 16:13:21 -06001270 continue;
1271 }
Tom Tucker5b180a92008-08-11 14:10:19 -05001272 /* Take a transport ref for each WR posted */
1273 for (i = 0; i < wr_count; i++)
1274 svc_xprt_get(&xprt->sc_xprt);
1275
1276 /* Bump used SQ WR count and post */
1277 atomic_add(wr_count, &xprt->sc_sq_count);
Tom Tucker377f9b22007-12-12 16:13:21 -06001278 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
Tom Tucker5b180a92008-08-11 14:10:19 -05001279 if (ret) {
1280 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1281 atomic_sub(wr_count, &xprt->sc_sq_count);
1282 for (i = 0; i < wr_count; i ++)
1283 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001284 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1285 "sc_sq_count=%d, sc_sq_depth=%d\n",
1286 ret, atomic_read(&xprt->sc_sq_count),
1287 xprt->sc_sq_depth);
Tom Tucker0905c0f2008-05-01 10:49:03 -05001288 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001289 spin_unlock_bh(&xprt->sc_lock);
Tom Tucker5b180a92008-08-11 14:10:19 -05001290 if (ret)
1291 wake_up(&xprt->sc_send_wait);
Tom Tucker377f9b22007-12-12 16:13:21 -06001292 break;
1293 }
1294 return ret;
1295}
1296
Tom Tucker008fdbc2008-05-07 15:47:42 -05001297void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1298 enum rpcrdma_errcode err)
Tom Tucker377f9b22007-12-12 16:13:21 -06001299{
1300 struct ib_send_wr err_wr;
1301 struct ib_sge sge;
1302 struct page *p;
1303 struct svc_rdma_op_ctxt *ctxt;
1304 u32 *va;
1305 int length;
1306 int ret;
1307
1308 p = svc_rdma_get_page();
1309 va = page_address(p);
1310
1311 /* XDR encode error */
1312 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1313
1314 /* Prepare SGE for local address */
Tom Tucker87295b62008-05-28 13:17:44 -05001315 atomic_inc(&xprt->sc_dma_used);
Tom Tucker377f9b22007-12-12 16:13:21 -06001316 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
1317 p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1318 sge.lkey = xprt->sc_phys_mr->lkey;
1319 sge.length = length;
1320
1321 ctxt = svc_rdma_get_context(xprt);
1322 ctxt->count = 1;
1323 ctxt->pages[0] = p;
1324
1325 /* Prepare SEND WR */
1326 memset(&err_wr, 0, sizeof err_wr);
1327 ctxt->wr_op = IB_WR_SEND;
1328 err_wr.wr_id = (unsigned long)ctxt;
1329 err_wr.sg_list = &sge;
1330 err_wr.num_sge = 1;
1331 err_wr.opcode = IB_WR_SEND;
1332 err_wr.send_flags = IB_SEND_SIGNALED;
1333
1334 /* Post It */
1335 ret = svc_rdma_send(xprt, &err_wr);
1336 if (ret) {
Tom Tucker008fdbc2008-05-07 15:47:42 -05001337 dprintk("svcrdma: Error %d posting send for protocol error\n",
1338 ret);
Tom Tucker377f9b22007-12-12 16:13:21 -06001339 svc_rdma_put_context(ctxt, 1);
1340 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001341}