blob: 900cb69728c691537e14e85e6f4618c0b0f79cfe [file] [log] [blame]
Tom Tucker377f9b22007-12-12 16:13:21 -06001/*
2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#include <linux/sunrpc/svc_xprt.h>
43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/spinlock.h>
46#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h>
48#include <linux/sunrpc/svc_rdma.h>
49
50#define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
53 struct sockaddr *sa, int salen,
54 int flags);
55static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
56static void svc_rdma_release_rqst(struct svc_rqst *);
Tom Tucker377f9b22007-12-12 16:13:21 -060057static void dto_tasklet_func(unsigned long data);
58static void svc_rdma_detach(struct svc_xprt *xprt);
59static void svc_rdma_free(struct svc_xprt *xprt);
60static int svc_rdma_has_wspace(struct svc_xprt *xprt);
61static void rq_cq_reap(struct svcxprt_rdma *xprt);
62static void sq_cq_reap(struct svcxprt_rdma *xprt);
63
64DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
65static DEFINE_SPINLOCK(dto_lock);
66static LIST_HEAD(dto_xprt_q);
67
68static struct svc_xprt_ops svc_rdma_ops = {
69 .xpo_create = svc_rdma_create,
70 .xpo_recvfrom = svc_rdma_recvfrom,
71 .xpo_sendto = svc_rdma_sendto,
72 .xpo_release_rqst = svc_rdma_release_rqst,
73 .xpo_detach = svc_rdma_detach,
74 .xpo_free = svc_rdma_free,
75 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
76 .xpo_has_wspace = svc_rdma_has_wspace,
77 .xpo_accept = svc_rdma_accept,
78};
79
80struct svc_xprt_class svc_rdma_class = {
81 .xcl_name = "rdma",
82 .xcl_owner = THIS_MODULE,
83 .xcl_ops = &svc_rdma_ops,
84 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
85};
86
Tom Tucker89488962008-05-28 15:14:02 -050087/* WR context cache. Created in svc_rdma.c */
88extern struct kmem_cache *svc_rdma_ctxt_cachep;
Tom Tucker377f9b22007-12-12 16:13:21 -060089
90struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
91{
92 struct svc_rdma_op_ctxt *ctxt;
93
94 while (1) {
Tom Tucker89488962008-05-28 15:14:02 -050095 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
96 if (ctxt)
97 break;
98 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
Tom Tucker377f9b22007-12-12 16:13:21 -060099 }
Tom Tucker89488962008-05-28 15:14:02 -0500100 ctxt->xprt = xprt;
101 INIT_LIST_HEAD(&ctxt->dto_q);
102 ctxt->count = 0;
103 atomic_inc(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600104 return ctxt;
105}
106
Tom Tuckere6ab9142008-05-28 12:08:48 -0500107static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
108{
109 struct svcxprt_rdma *xprt = ctxt->xprt;
110 int i;
111 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
Tom Tucker87295b62008-05-28 13:17:44 -0500112 atomic_dec(&xprt->sc_dma_used);
Tom Tuckere6ab9142008-05-28 12:08:48 -0500113 ib_dma_unmap_single(xprt->sc_cm_id->device,
114 ctxt->sge[i].addr,
115 ctxt->sge[i].length,
116 ctxt->direction);
117 }
118}
119
Tom Tucker377f9b22007-12-12 16:13:21 -0600120void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
121{
122 struct svcxprt_rdma *xprt;
123 int i;
124
125 BUG_ON(!ctxt);
126 xprt = ctxt->xprt;
127 if (free_pages)
128 for (i = 0; i < ctxt->count; i++)
129 put_page(ctxt->pages[i]);
130
Tom Tucker89488962008-05-28 15:14:02 -0500131 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
Tom Tucker87407672008-04-30 20:44:39 -0500132 atomic_dec(&xprt->sc_ctxt_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600133}
134
Tom Tuckerab96ddd2008-05-28 13:54:04 -0500135/* Temporary NFS request map cache. Created in svc_rdma.c */
136extern struct kmem_cache *svc_rdma_map_cachep;
137
138/*
139 * Temporary NFS req mappings are shared across all transport
140 * instances. These are short lived and should be bounded by the number
141 * of concurrent server threads * depth of the SQ.
142 */
143struct svc_rdma_req_map *svc_rdma_get_req_map(void)
144{
145 struct svc_rdma_req_map *map;
146 while (1) {
147 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
148 if (map)
149 break;
150 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
151 }
152 map->count = 0;
153 return map;
154}
155
156void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
157{
158 kmem_cache_free(svc_rdma_map_cachep, map);
159}
160
Tom Tucker377f9b22007-12-12 16:13:21 -0600161/* ib_cq event handler */
162static void cq_event_handler(struct ib_event *event, void *context)
163{
164 struct svc_xprt *xprt = context;
165 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
166 event->event, context);
167 set_bit(XPT_CLOSE, &xprt->xpt_flags);
168}
169
170/* QP event handler */
171static void qp_event_handler(struct ib_event *event, void *context)
172{
173 struct svc_xprt *xprt = context;
174
175 switch (event->event) {
176 /* These are considered benign events */
177 case IB_EVENT_PATH_MIG:
178 case IB_EVENT_COMM_EST:
179 case IB_EVENT_SQ_DRAINED:
180 case IB_EVENT_QP_LAST_WQE_REACHED:
181 dprintk("svcrdma: QP event %d received for QP=%p\n",
182 event->event, event->element.qp);
183 break;
184 /* These are considered fatal events */
185 case IB_EVENT_PATH_MIG_ERR:
186 case IB_EVENT_QP_FATAL:
187 case IB_EVENT_QP_REQ_ERR:
188 case IB_EVENT_QP_ACCESS_ERR:
189 case IB_EVENT_DEVICE_FATAL:
190 default:
191 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
192 "closing transport\n",
193 event->event, event->element.qp);
194 set_bit(XPT_CLOSE, &xprt->xpt_flags);
195 break;
196 }
197}
198
199/*
200 * Data Transfer Operation Tasklet
201 *
202 * Walks a list of transports with I/O pending, removing entries as
203 * they are added to the server's I/O pending list. Two bits indicate
204 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
205 * spinlock that serializes access to the transport list with the RQ
206 * and SQ interrupt handlers.
207 */
208static void dto_tasklet_func(unsigned long data)
209{
210 struct svcxprt_rdma *xprt;
211 unsigned long flags;
212
213 spin_lock_irqsave(&dto_lock, flags);
214 while (!list_empty(&dto_xprt_q)) {
215 xprt = list_entry(dto_xprt_q.next,
216 struct svcxprt_rdma, sc_dto_q);
217 list_del_init(&xprt->sc_dto_q);
218 spin_unlock_irqrestore(&dto_lock, flags);
219
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500220 rq_cq_reap(xprt);
221 sq_cq_reap(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600222
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400223 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600224 spin_lock_irqsave(&dto_lock, flags);
225 }
226 spin_unlock_irqrestore(&dto_lock, flags);
227}
228
229/*
230 * Receive Queue Completion Handler
231 *
232 * Since an RQ completion handler is called on interrupt context, we
233 * need to defer the handling of the I/O to a tasklet
234 */
235static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
236{
237 struct svcxprt_rdma *xprt = cq_context;
238 unsigned long flags;
239
Tom Tucker17113862008-05-01 11:13:50 -0500240 /* Guard against unconditional flush call for destroyed QP */
241 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
242 return;
243
Tom Tucker377f9b22007-12-12 16:13:21 -0600244 /*
245 * Set the bit regardless of whether or not it's on the list
246 * because it may be on the list already due to an SQ
247 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500248 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600249 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
250
251 /*
252 * If this transport is not already on the DTO transport queue,
253 * add it
254 */
255 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400256 if (list_empty(&xprt->sc_dto_q)) {
257 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600258 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400259 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600260 spin_unlock_irqrestore(&dto_lock, flags);
261
262 /* Tasklet does all the work to avoid irqsave locks. */
263 tasklet_schedule(&dto_tasklet);
264}
265
266/*
267 * rq_cq_reap - Process the RQ CQ.
268 *
269 * Take all completing WC off the CQE and enqueue the associated DTO
270 * context on the dto_q for the transport.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500271 *
272 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600273 */
274static void rq_cq_reap(struct svcxprt_rdma *xprt)
275{
276 int ret;
277 struct ib_wc wc;
278 struct svc_rdma_op_ctxt *ctxt = NULL;
279
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500280 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
281 return;
282
283 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600284 atomic_inc(&rdma_stat_rq_poll);
285
Tom Tucker377f9b22007-12-12 16:13:21 -0600286 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
287 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
288 ctxt->wc_status = wc.status;
289 ctxt->byte_len = wc.byte_len;
Tom Tuckere6ab9142008-05-28 12:08:48 -0500290 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600291 if (wc.status != IB_WC_SUCCESS) {
292 /* Close the transport */
Tom Tucker0905c0f2008-05-01 10:49:03 -0500293 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600294 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
295 svc_rdma_put_context(ctxt, 1);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500296 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600297 continue;
298 }
Tom Tucker47698e02008-05-06 11:49:05 -0500299 spin_lock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600300 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
Tom Tucker47698e02008-05-06 11:49:05 -0500301 spin_unlock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500302 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600303 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600304
305 if (ctxt)
306 atomic_inc(&rdma_stat_rq_prod);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500307
308 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
309 /*
310 * If data arrived before established event,
311 * don't enqueue. This defers RPC I/O until the
312 * RDMA connection is complete.
313 */
314 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
315 svc_xprt_enqueue(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600316}
317
318/*
319 * Send Queue Completion Handler - potentially called on interrupt context.
Tom Tucker0905c0f2008-05-01 10:49:03 -0500320 *
321 * Note that caller must hold a transport reference.
Tom Tucker377f9b22007-12-12 16:13:21 -0600322 */
323static void sq_cq_reap(struct svcxprt_rdma *xprt)
324{
325 struct svc_rdma_op_ctxt *ctxt = NULL;
326 struct ib_wc wc;
327 struct ib_cq *cq = xprt->sc_sq_cq;
328 int ret;
329
Tom Tuckerdbcd00e2008-05-06 11:33:11 -0500330
331 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
332 return;
333
334 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
Tom Tucker377f9b22007-12-12 16:13:21 -0600335 atomic_inc(&rdma_stat_sq_poll);
336 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
337 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
338 xprt = ctxt->xprt;
339
Tom Tuckere6ab9142008-05-28 12:08:48 -0500340 svc_rdma_unmap_dma(ctxt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600341 if (wc.status != IB_WC_SUCCESS)
342 /* Close the transport */
343 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
344
345 /* Decrement used SQ WR count */
346 atomic_dec(&xprt->sc_sq_count);
347 wake_up(&xprt->sc_send_wait);
348
349 switch (ctxt->wr_op) {
350 case IB_WR_SEND:
Tom Tucker377f9b22007-12-12 16:13:21 -0600351 svc_rdma_put_context(ctxt, 1);
352 break;
353
Tom Tucker34d16e42008-07-02 14:56:13 -0500354 case IB_WR_RDMA_WRITE:
355 svc_rdma_put_context(ctxt, 0);
356 break;
357
Tom Tucker377f9b22007-12-12 16:13:21 -0600358 case IB_WR_RDMA_READ:
359 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
Tom Tucker02e74522008-04-30 19:50:56 -0500360 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
361 BUG_ON(!read_hdr);
Tom Tucker24b8b442008-08-13 11:05:41 -0500362 spin_lock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600363 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
Tom Tucker02e74522008-04-30 19:50:56 -0500364 list_add_tail(&read_hdr->dto_q,
Tom Tucker377f9b22007-12-12 16:13:21 -0600365 &xprt->sc_read_complete_q);
Tom Tucker24b8b442008-08-13 11:05:41 -0500366 spin_unlock_bh(&xprt->sc_rq_dto_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600367 svc_xprt_enqueue(&xprt->sc_xprt);
368 }
Tom Tucker02e74522008-04-30 19:50:56 -0500369 svc_rdma_put_context(ctxt, 0);
Tom Tucker377f9b22007-12-12 16:13:21 -0600370 break;
371
372 default:
373 printk(KERN_ERR "svcrdma: unexpected completion type, "
374 "opcode=%d, status=%d\n",
375 wc.opcode, wc.status);
376 break;
377 }
Tom Tucker0905c0f2008-05-01 10:49:03 -0500378 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600379 }
380
381 if (ctxt)
382 atomic_inc(&rdma_stat_sq_prod);
383}
384
385static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
386{
387 struct svcxprt_rdma *xprt = cq_context;
388 unsigned long flags;
389
Tom Tucker17113862008-05-01 11:13:50 -0500390 /* Guard against unconditional flush call for destroyed QP */
391 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
392 return;
393
Tom Tucker377f9b22007-12-12 16:13:21 -0600394 /*
395 * Set the bit regardless of whether or not it's on the list
396 * because it may be on the list already due to an RQ
397 * completion.
Tom Tucker17113862008-05-01 11:13:50 -0500398 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600399 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
400
401 /*
402 * If this transport is not already on the DTO transport queue,
403 * add it
404 */
405 spin_lock_irqsave(&dto_lock, flags);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400406 if (list_empty(&xprt->sc_dto_q)) {
407 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600408 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400409 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600410 spin_unlock_irqrestore(&dto_lock, flags);
411
412 /* Tasklet does all the work to avoid irqsave locks. */
413 tasklet_schedule(&dto_tasklet);
414}
415
Tom Tucker377f9b22007-12-12 16:13:21 -0600416static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
417 int listener)
418{
419 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
420
421 if (!cma_xprt)
422 return NULL;
423 svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
424 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
425 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
426 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
427 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
428 init_waitqueue_head(&cma_xprt->sc_send_wait);
429
430 spin_lock_init(&cma_xprt->sc_lock);
Tom Tucker377f9b22007-12-12 16:13:21 -0600431 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
432
433 cma_xprt->sc_ord = svcrdma_ord;
434
435 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
436 cma_xprt->sc_max_requests = svcrdma_max_requests;
437 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
438 atomic_set(&cma_xprt->sc_sq_count, 0);
Tom Tucker87295b62008-05-28 13:17:44 -0500439 atomic_set(&cma_xprt->sc_ctxt_used, 0);
Tom Tucker377f9b22007-12-12 16:13:21 -0600440
Tom Tucker89488962008-05-28 15:14:02 -0500441 if (listener)
Tom Tucker377f9b22007-12-12 16:13:21 -0600442 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
443
444 return cma_xprt;
445}
446
447struct page *svc_rdma_get_page(void)
448{
449 struct page *page;
450
451 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
452 /* If we can't get memory, wait a bit and try again */
453 printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
454 "jiffies.\n");
455 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
456 }
457 return page;
458}
459
460int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
461{
462 struct ib_recv_wr recv_wr, *bad_recv_wr;
463 struct svc_rdma_op_ctxt *ctxt;
464 struct page *page;
465 unsigned long pa;
466 int sge_no;
467 int buflen;
468 int ret;
469
470 ctxt = svc_rdma_get_context(xprt);
471 buflen = 0;
472 ctxt->direction = DMA_FROM_DEVICE;
473 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
474 BUG_ON(sge_no >= xprt->sc_max_sge);
475 page = svc_rdma_get_page();
476 ctxt->pages[sge_no] = page;
Tom Tucker87295b62008-05-28 13:17:44 -0500477 atomic_inc(&xprt->sc_dma_used);
Tom Tucker377f9b22007-12-12 16:13:21 -0600478 pa = ib_dma_map_page(xprt->sc_cm_id->device,
479 page, 0, PAGE_SIZE,
480 DMA_FROM_DEVICE);
481 ctxt->sge[sge_no].addr = pa;
482 ctxt->sge[sge_no].length = PAGE_SIZE;
483 ctxt->sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
484 buflen += PAGE_SIZE;
485 }
486 ctxt->count = sge_no;
487 recv_wr.next = NULL;
488 recv_wr.sg_list = &ctxt->sge[0];
489 recv_wr.num_sge = ctxt->count;
490 recv_wr.wr_id = (u64)(unsigned long)ctxt;
491
Tom Tucker0905c0f2008-05-01 10:49:03 -0500492 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600493 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500494 if (ret) {
495 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker05a08262008-04-25 14:11:31 -0500496 svc_rdma_put_context(ctxt, 1);
Tom Tucker0905c0f2008-05-01 10:49:03 -0500497 }
Tom Tucker377f9b22007-12-12 16:13:21 -0600498 return ret;
499}
500
501/*
502 * This function handles the CONNECT_REQUEST event on a listening
503 * endpoint. It is passed the cma_id for the _new_ connection. The context in
504 * this cma_id is inherited from the listening cma_id and is the svc_xprt
505 * structure for the listening endpoint.
506 *
507 * This function creates a new xprt for the new connection and enqueues it on
508 * the accept queue for the listent xprt. When the listen thread is kicked, it
509 * will call the recvfrom method on the listen xprt which will accept the new
510 * connection.
511 */
Tom Tucker36ef25e2008-05-19 19:00:24 -0500512static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
Tom Tucker377f9b22007-12-12 16:13:21 -0600513{
514 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
515 struct svcxprt_rdma *newxprt;
Tom Tuckeraf261af2008-05-07 13:52:42 -0500516 struct sockaddr *sa;
Tom Tucker377f9b22007-12-12 16:13:21 -0600517
518 /* Create a new transport */
519 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
520 if (!newxprt) {
521 dprintk("svcrdma: failed to create new transport\n");
522 return;
523 }
524 newxprt->sc_cm_id = new_cma_id;
525 new_cma_id->context = newxprt;
526 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
527 newxprt, newxprt->sc_cm_id, listen_xprt);
528
Tom Tucker36ef25e2008-05-19 19:00:24 -0500529 /* Save client advertised inbound read limit for use later in accept. */
530 newxprt->sc_ord = client_ird;
531
Tom Tuckeraf261af2008-05-07 13:52:42 -0500532 /* Set the local and remote addresses in the transport */
533 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
534 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
535 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
536 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
537
Tom Tucker377f9b22007-12-12 16:13:21 -0600538 /*
539 * Enqueue the new transport on the accept queue of the listening
540 * transport
541 */
542 spin_lock_bh(&listen_xprt->sc_lock);
543 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
544 spin_unlock_bh(&listen_xprt->sc_lock);
545
546 /*
547 * Can't use svc_xprt_received here because we are not on a
548 * rqstp thread
549 */
550 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
551 svc_xprt_enqueue(&listen_xprt->sc_xprt);
552}
553
554/*
555 * Handles events generated on the listening endpoint. These events will be
556 * either be incoming connect requests or adapter removal events.
557 */
558static int rdma_listen_handler(struct rdma_cm_id *cma_id,
559 struct rdma_cm_event *event)
560{
561 struct svcxprt_rdma *xprt = cma_id->context;
562 int ret = 0;
563
564 switch (event->event) {
565 case RDMA_CM_EVENT_CONNECT_REQUEST:
566 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
567 "event=%d\n", cma_id, cma_id->context, event->event);
Tom Tucker36ef25e2008-05-19 19:00:24 -0500568 handle_connect_req(cma_id,
569 event->param.conn.responder_resources);
Tom Tucker377f9b22007-12-12 16:13:21 -0600570 break;
571
572 case RDMA_CM_EVENT_ESTABLISHED:
573 /* Accept complete */
574 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
575 "cm_id=%p\n", xprt, cma_id);
576 break;
577
578 case RDMA_CM_EVENT_DEVICE_REMOVAL:
579 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
580 xprt, cma_id);
581 if (xprt)
582 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
583 break;
584
585 default:
586 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
587 "event=%d\n", cma_id, event->event);
588 break;
589 }
590
591 return ret;
592}
593
594static int rdma_cma_handler(struct rdma_cm_id *cma_id,
595 struct rdma_cm_event *event)
596{
597 struct svc_xprt *xprt = cma_id->context;
598 struct svcxprt_rdma *rdma =
599 container_of(xprt, struct svcxprt_rdma, sc_xprt);
600 switch (event->event) {
601 case RDMA_CM_EVENT_ESTABLISHED:
602 /* Accept complete */
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400603 svc_xprt_get(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600604 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
605 "cm_id=%p\n", xprt, cma_id);
606 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
607 svc_xprt_enqueue(xprt);
608 break;
609 case RDMA_CM_EVENT_DISCONNECTED:
610 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
611 xprt, cma_id);
612 if (xprt) {
613 set_bit(XPT_CLOSE, &xprt->xpt_flags);
614 svc_xprt_enqueue(xprt);
Tom Tucker120693d2008-04-24 14:17:21 -0500615 svc_xprt_put(xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600616 }
617 break;
618 case RDMA_CM_EVENT_DEVICE_REMOVAL:
619 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
620 "event=%d\n", cma_id, xprt, event->event);
621 if (xprt) {
622 set_bit(XPT_CLOSE, &xprt->xpt_flags);
623 svc_xprt_enqueue(xprt);
624 }
625 break;
626 default:
627 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
628 "event=%d\n", cma_id, event->event);
629 break;
630 }
631 return 0;
632}
633
634/*
635 * Create a listening RDMA service endpoint.
636 */
637static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
638 struct sockaddr *sa, int salen,
639 int flags)
640{
641 struct rdma_cm_id *listen_id;
642 struct svcxprt_rdma *cma_xprt;
643 struct svc_xprt *xprt;
644 int ret;
645
646 dprintk("svcrdma: Creating RDMA socket\n");
647
648 cma_xprt = rdma_create_xprt(serv, 1);
649 if (!cma_xprt)
Tom Tucker58e8f622008-05-06 09:45:54 -0500650 return ERR_PTR(-ENOMEM);
Tom Tucker377f9b22007-12-12 16:13:21 -0600651 xprt = &cma_xprt->sc_xprt;
652
653 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
654 if (IS_ERR(listen_id)) {
Tom Tucker58e8f622008-05-06 09:45:54 -0500655 ret = PTR_ERR(listen_id);
656 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
657 goto err0;
Tom Tucker377f9b22007-12-12 16:13:21 -0600658 }
Tom Tucker58e8f622008-05-06 09:45:54 -0500659
Tom Tucker377f9b22007-12-12 16:13:21 -0600660 ret = rdma_bind_addr(listen_id, sa);
661 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600662 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500663 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600664 }
665 cma_xprt->sc_cm_id = listen_id;
666
667 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
668 if (ret) {
Tom Tucker377f9b22007-12-12 16:13:21 -0600669 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
Tom Tucker58e8f622008-05-06 09:45:54 -0500670 goto err1;
Tom Tucker377f9b22007-12-12 16:13:21 -0600671 }
672
673 /*
674 * We need to use the address from the cm_id in case the
675 * caller specified 0 for the port number.
676 */
677 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
678 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
679
680 return &cma_xprt->sc_xprt;
Tom Tucker58e8f622008-05-06 09:45:54 -0500681
682 err1:
683 rdma_destroy_id(listen_id);
684 err0:
685 kfree(cma_xprt);
686 return ERR_PTR(ret);
Tom Tucker377f9b22007-12-12 16:13:21 -0600687}
688
689/*
690 * This is the xpo_recvfrom function for listening endpoints. Its
691 * purpose is to accept incoming connections. The CMA callback handler
692 * has already created a new transport and attached it to the new CMA
693 * ID.
694 *
695 * There is a queue of pending connections hung on the listening
696 * transport. This queue contains the new svc_xprt structure. This
697 * function takes svc_xprt structures off the accept_q and completes
698 * the connection.
699 */
700static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
701{
702 struct svcxprt_rdma *listen_rdma;
703 struct svcxprt_rdma *newxprt = NULL;
704 struct rdma_conn_param conn_param;
705 struct ib_qp_init_attr qp_attr;
706 struct ib_device_attr devattr;
Tom Tucker377f9b22007-12-12 16:13:21 -0600707 int ret;
708 int i;
709
710 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
711 clear_bit(XPT_CONN, &xprt->xpt_flags);
712 /* Get the next entry off the accept list */
713 spin_lock_bh(&listen_rdma->sc_lock);
714 if (!list_empty(&listen_rdma->sc_accept_q)) {
715 newxprt = list_entry(listen_rdma->sc_accept_q.next,
716 struct svcxprt_rdma, sc_accept_q);
717 list_del_init(&newxprt->sc_accept_q);
718 }
719 if (!list_empty(&listen_rdma->sc_accept_q))
720 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
721 spin_unlock_bh(&listen_rdma->sc_lock);
722 if (!newxprt)
723 return NULL;
724
725 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
726 newxprt, newxprt->sc_cm_id);
727
728 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
729 if (ret) {
730 dprintk("svcrdma: could not query device attributes on "
731 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
732 goto errout;
733 }
734
735 /* Qualify the transport resource defaults with the
736 * capabilities of this particular device */
737 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
738 (size_t)RPCSVC_MAXPAGES);
739 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
740 (size_t)svcrdma_max_requests);
741 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
742
Tom Tucker36ef25e2008-05-19 19:00:24 -0500743 /*
744 * Limit ORD based on client limit, local device limit, and
745 * configured svcrdma limit.
746 */
747 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
748 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
Tom Tucker377f9b22007-12-12 16:13:21 -0600749
750 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
751 if (IS_ERR(newxprt->sc_pd)) {
752 dprintk("svcrdma: error creating PD for connect request\n");
753 goto errout;
754 }
755 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
756 sq_comp_handler,
757 cq_event_handler,
758 newxprt,
759 newxprt->sc_sq_depth,
760 0);
761 if (IS_ERR(newxprt->sc_sq_cq)) {
762 dprintk("svcrdma: error creating SQ CQ for connect request\n");
763 goto errout;
764 }
765 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
766 rq_comp_handler,
767 cq_event_handler,
768 newxprt,
769 newxprt->sc_max_requests,
770 0);
771 if (IS_ERR(newxprt->sc_rq_cq)) {
772 dprintk("svcrdma: error creating RQ CQ for connect request\n");
773 goto errout;
774 }
775
776 memset(&qp_attr, 0, sizeof qp_attr);
777 qp_attr.event_handler = qp_event_handler;
778 qp_attr.qp_context = &newxprt->sc_xprt;
779 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
780 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
781 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
782 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
783 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
784 qp_attr.qp_type = IB_QPT_RC;
785 qp_attr.send_cq = newxprt->sc_sq_cq;
786 qp_attr.recv_cq = newxprt->sc_rq_cq;
787 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
788 " cm_id->device=%p, sc_pd->device=%p\n"
789 " cap.max_send_wr = %d\n"
790 " cap.max_recv_wr = %d\n"
791 " cap.max_send_sge = %d\n"
792 " cap.max_recv_sge = %d\n",
793 newxprt->sc_cm_id, newxprt->sc_pd,
794 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
795 qp_attr.cap.max_send_wr,
796 qp_attr.cap.max_recv_wr,
797 qp_attr.cap.max_send_sge,
798 qp_attr.cap.max_recv_sge);
799
800 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
801 if (ret) {
802 /*
803 * XXX: This is a hack. We need a xx_request_qp interface
804 * that will adjust the qp_attr's with a best-effort
805 * number
806 */
807 qp_attr.cap.max_send_sge -= 2;
808 qp_attr.cap.max_recv_sge -= 2;
809 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
810 &qp_attr);
811 if (ret) {
812 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
813 goto errout;
814 }
815 newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
816 newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
817 newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
818 newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
819 }
820 newxprt->sc_qp = newxprt->sc_cm_id->qp;
821
822 /* Register all of physical memory */
823 newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd,
824 IB_ACCESS_LOCAL_WRITE |
825 IB_ACCESS_REMOTE_WRITE);
826 if (IS_ERR(newxprt->sc_phys_mr)) {
827 dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret);
828 goto errout;
829 }
830
831 /* Post receive buffers */
832 for (i = 0; i < newxprt->sc_max_requests; i++) {
833 ret = svc_rdma_post_recv(newxprt);
834 if (ret) {
835 dprintk("svcrdma: failure posting receive buffers\n");
836 goto errout;
837 }
838 }
839
840 /* Swap out the handler */
841 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
842
Tom Tuckeraf261af2008-05-07 13:52:42 -0500843 /*
844 * Arm the CQs for the SQ and RQ before accepting so we can't
845 * miss the first message
846 */
847 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
848 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
849
Tom Tucker377f9b22007-12-12 16:13:21 -0600850 /* Accept Connection */
851 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
852 memset(&conn_param, 0, sizeof conn_param);
853 conn_param.responder_resources = 0;
854 conn_param.initiator_depth = newxprt->sc_ord;
855 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
856 if (ret) {
857 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
858 ret);
859 goto errout;
860 }
861
862 dprintk("svcrdma: new connection %p accepted with the following "
863 "attributes:\n"
864 " local_ip : %d.%d.%d.%d\n"
865 " local_port : %d\n"
866 " remote_ip : %d.%d.%d.%d\n"
867 " remote_port : %d\n"
868 " max_sge : %d\n"
869 " sq_depth : %d\n"
870 " max_requests : %d\n"
871 " ord : %d\n",
872 newxprt,
873 NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
874 route.addr.src_addr)->sin_addr.s_addr),
875 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
876 route.addr.src_addr)->sin_port),
877 NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
878 route.addr.dst_addr)->sin_addr.s_addr),
879 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
880 route.addr.dst_addr)->sin_port),
881 newxprt->sc_max_sge,
882 newxprt->sc_sq_depth,
883 newxprt->sc_max_requests,
884 newxprt->sc_ord);
885
Tom Tucker377f9b22007-12-12 16:13:21 -0600886 return &newxprt->sc_xprt;
887
888 errout:
889 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400890 /* Take a reference in case the DTO handler runs */
891 svc_xprt_get(&newxprt->sc_xprt);
Tom Tucker17113862008-05-01 11:13:50 -0500892 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400893 ib_destroy_qp(newxprt->sc_qp);
Tom Tucker377f9b22007-12-12 16:13:21 -0600894 rdma_destroy_id(newxprt->sc_cm_id);
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400895 /* This call to put will destroy the transport */
896 svc_xprt_put(&newxprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600897 return NULL;
898}
899
Tom Tucker377f9b22007-12-12 16:13:21 -0600900static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
901{
Tom Tucker377f9b22007-12-12 16:13:21 -0600902}
903
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400904/*
Tom Tucker17113862008-05-01 11:13:50 -0500905 * When connected, an svc_xprt has at least two references:
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400906 *
907 * - A reference held by the cm_id between the ESTABLISHED and
908 * DISCONNECTED events. If the remote peer disconnected first, this
909 * reference could be gone.
910 *
911 * - A reference held by the svc_recv code that called this function
912 * as part of close processing.
913 *
Tom Tucker17113862008-05-01 11:13:50 -0500914 * At a minimum one references should still be held.
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400915 */
Tom Tucker377f9b22007-12-12 16:13:21 -0600916static void svc_rdma_detach(struct svc_xprt *xprt)
917{
918 struct svcxprt_rdma *rdma =
919 container_of(xprt, struct svcxprt_rdma, sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600920 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -0600921
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400922 /* Disconnect and flush posted WQE */
923 rdma_disconnect(rdma->sc_cm_id);
Tom Tucker377f9b22007-12-12 16:13:21 -0600924}
925
Tom Tucker8da91ea2008-04-30 22:00:46 -0500926static void __svc_rdma_free(struct work_struct *work)
Tom Tucker377f9b22007-12-12 16:13:21 -0600927{
Tom Tucker8da91ea2008-04-30 22:00:46 -0500928 struct svcxprt_rdma *rdma =
929 container_of(work, struct svcxprt_rdma, sc_work);
Tom Tucker377f9b22007-12-12 16:13:21 -0600930 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
Tom Tucker8da91ea2008-04-30 22:00:46 -0500931
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400932 /* We should only be called from kref_put */
Tom Tucker8da91ea2008-04-30 22:00:46 -0500933 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
934
Tom Tucker356d0a12008-05-01 11:25:02 -0500935 /*
936 * Destroy queued, but not processed read completions. Note
937 * that this cleanup has to be done before destroying the
938 * cm_id because the device ptr is needed to unmap the dma in
939 * svc_rdma_put_context.
940 */
Tom Tucker356d0a12008-05-01 11:25:02 -0500941 while (!list_empty(&rdma->sc_read_complete_q)) {
942 struct svc_rdma_op_ctxt *ctxt;
943 ctxt = list_entry(rdma->sc_read_complete_q.next,
944 struct svc_rdma_op_ctxt,
945 dto_q);
946 list_del_init(&ctxt->dto_q);
947 svc_rdma_put_context(ctxt, 1);
948 }
Tom Tucker356d0a12008-05-01 11:25:02 -0500949
950 /* Destroy queued, but not processed recv completions */
Tom Tucker356d0a12008-05-01 11:25:02 -0500951 while (!list_empty(&rdma->sc_rq_dto_q)) {
952 struct svc_rdma_op_ctxt *ctxt;
953 ctxt = list_entry(rdma->sc_rq_dto_q.next,
954 struct svc_rdma_op_ctxt,
955 dto_q);
956 list_del_init(&ctxt->dto_q);
957 svc_rdma_put_context(ctxt, 1);
958 }
Tom Tucker356d0a12008-05-01 11:25:02 -0500959
960 /* Warn if we leaked a resource or under-referenced */
961 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
Tom Tucker87295b62008-05-28 13:17:44 -0500962 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
Tom Tucker356d0a12008-05-01 11:25:02 -0500963
Tom Tucker17113862008-05-01 11:13:50 -0500964 /* Destroy the QP if present (not a listener) */
965 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
966 ib_destroy_qp(rdma->sc_qp);
967
Tom Tuckerc48cbb42008-03-11 14:31:39 -0400968 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
969 ib_destroy_cq(rdma->sc_sq_cq);
970
971 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
972 ib_destroy_cq(rdma->sc_rq_cq);
973
974 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
975 ib_dereg_mr(rdma->sc_phys_mr);
976
977 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
978 ib_dealloc_pd(rdma->sc_pd);
979
Tom Tucker356d0a12008-05-01 11:25:02 -0500980 /* Destroy the CM ID */
981 rdma_destroy_id(rdma->sc_cm_id);
982
Tom Tucker377f9b22007-12-12 16:13:21 -0600983 kfree(rdma);
984}
985
Tom Tucker8da91ea2008-04-30 22:00:46 -0500986static void svc_rdma_free(struct svc_xprt *xprt)
987{
988 struct svcxprt_rdma *rdma =
989 container_of(xprt, struct svcxprt_rdma, sc_xprt);
990 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
991 schedule_work(&rdma->sc_work);
992}
993
Tom Tucker377f9b22007-12-12 16:13:21 -0600994static int svc_rdma_has_wspace(struct svc_xprt *xprt)
995{
996 struct svcxprt_rdma *rdma =
997 container_of(xprt, struct svcxprt_rdma, sc_xprt);
998
999 /*
1000 * If there are fewer SQ WR available than required to send a
1001 * simple response, return false.
1002 */
1003 if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1004 return 0;
1005
1006 /*
1007 * ...or there are already waiters on the SQ,
1008 * return false.
1009 */
1010 if (waitqueue_active(&rdma->sc_send_wait))
1011 return 0;
1012
1013 /* Otherwise return true. */
1014 return 1;
1015}
1016
1017int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1018{
1019 struct ib_send_wr *bad_wr;
1020 int ret;
1021
1022 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
Tom Tucker9d6347a2008-04-25 15:51:27 -05001023 return -ENOTCONN;
Tom Tucker377f9b22007-12-12 16:13:21 -06001024
1025 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1026 BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
1027 wr->opcode);
1028 /* If the SQ is full, wait until an SQ entry is available */
1029 while (1) {
1030 spin_lock_bh(&xprt->sc_lock);
1031 if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
1032 spin_unlock_bh(&xprt->sc_lock);
1033 atomic_inc(&rdma_stat_sq_starve);
Tom Tuckerdbcd00e2008-05-06 11:33:11 -05001034
1035 /* See if we can opportunistically reap SQ WR to make room */
Tom Tucker377f9b22007-12-12 16:13:21 -06001036 sq_cq_reap(xprt);
1037
1038 /* Wait until SQ WR available if SQ still full */
1039 wait_event(xprt->sc_send_wait,
1040 atomic_read(&xprt->sc_sq_count) <
1041 xprt->sc_sq_depth);
Tom Tucker830bb592008-03-11 12:44:27 -05001042 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1043 return 0;
Tom Tucker377f9b22007-12-12 16:13:21 -06001044 continue;
1045 }
1046 /* Bumped used SQ WR count and post */
Tom Tucker0905c0f2008-05-01 10:49:03 -05001047 svc_xprt_get(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001048 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1049 if (!ret)
1050 atomic_inc(&xprt->sc_sq_count);
Tom Tucker0905c0f2008-05-01 10:49:03 -05001051 else {
1052 svc_xprt_put(&xprt->sc_xprt);
Tom Tucker377f9b22007-12-12 16:13:21 -06001053 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1054 "sc_sq_count=%d, sc_sq_depth=%d\n",
1055 ret, atomic_read(&xprt->sc_sq_count),
1056 xprt->sc_sq_depth);
Tom Tucker0905c0f2008-05-01 10:49:03 -05001057 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001058 spin_unlock_bh(&xprt->sc_lock);
1059 break;
1060 }
1061 return ret;
1062}
1063
Tom Tucker008fdbc2008-05-07 15:47:42 -05001064void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1065 enum rpcrdma_errcode err)
Tom Tucker377f9b22007-12-12 16:13:21 -06001066{
1067 struct ib_send_wr err_wr;
1068 struct ib_sge sge;
1069 struct page *p;
1070 struct svc_rdma_op_ctxt *ctxt;
1071 u32 *va;
1072 int length;
1073 int ret;
1074
1075 p = svc_rdma_get_page();
1076 va = page_address(p);
1077
1078 /* XDR encode error */
1079 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1080
1081 /* Prepare SGE for local address */
Tom Tucker87295b62008-05-28 13:17:44 -05001082 atomic_inc(&xprt->sc_dma_used);
Tom Tucker377f9b22007-12-12 16:13:21 -06001083 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
1084 p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1085 sge.lkey = xprt->sc_phys_mr->lkey;
1086 sge.length = length;
1087
1088 ctxt = svc_rdma_get_context(xprt);
1089 ctxt->count = 1;
1090 ctxt->pages[0] = p;
1091
1092 /* Prepare SEND WR */
1093 memset(&err_wr, 0, sizeof err_wr);
1094 ctxt->wr_op = IB_WR_SEND;
1095 err_wr.wr_id = (unsigned long)ctxt;
1096 err_wr.sg_list = &sge;
1097 err_wr.num_sge = 1;
1098 err_wr.opcode = IB_WR_SEND;
1099 err_wr.send_flags = IB_SEND_SIGNALED;
1100
1101 /* Post It */
1102 ret = svc_rdma_send(xprt, &err_wr);
1103 if (ret) {
Tom Tucker008fdbc2008-05-07 15:47:42 -05001104 dprintk("svcrdma: Error %d posting send for protocol error\n",
1105 ret);
Tom Tucker377f9b22007-12-12 16:13:21 -06001106 svc_rdma_put_context(ctxt, 1);
1107 }
Tom Tucker377f9b22007-12-12 16:13:21 -06001108}