Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the BSD-type |
| 8 | * license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * |
| 14 | * Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * |
| 17 | * Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials provided |
| 20 | * with the distribution. |
| 21 | * |
| 22 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 23 | * its contributors may be used to endorse or promote products |
| 24 | * derived from this software without specific prior written |
| 25 | * permission. |
| 26 | * |
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 38 | * |
| 39 | * Author: Tom Tucker <tom@opengridcomputing.com> |
| 40 | */ |
| 41 | |
| 42 | #include <linux/sunrpc/svc_xprt.h> |
| 43 | #include <linux/sunrpc/debug.h> |
| 44 | #include <linux/sunrpc/rpc_rdma.h> |
| 45 | #include <linux/spinlock.h> |
| 46 | #include <rdma/ib_verbs.h> |
| 47 | #include <rdma/rdma_cm.h> |
| 48 | #include <linux/sunrpc/svc_rdma.h> |
| 49 | |
| 50 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
| 51 | |
| 52 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
| 53 | struct sockaddr *sa, int salen, |
| 54 | int flags); |
| 55 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); |
| 56 | static void svc_rdma_release_rqst(struct svc_rqst *); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 57 | static void dto_tasklet_func(unsigned long data); |
| 58 | static void svc_rdma_detach(struct svc_xprt *xprt); |
| 59 | static void svc_rdma_free(struct svc_xprt *xprt); |
| 60 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); |
| 61 | static void rq_cq_reap(struct svcxprt_rdma *xprt); |
| 62 | static void sq_cq_reap(struct svcxprt_rdma *xprt); |
| 63 | |
| 64 | DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); |
| 65 | static DEFINE_SPINLOCK(dto_lock); |
| 66 | static LIST_HEAD(dto_xprt_q); |
| 67 | |
| 68 | static struct svc_xprt_ops svc_rdma_ops = { |
| 69 | .xpo_create = svc_rdma_create, |
| 70 | .xpo_recvfrom = svc_rdma_recvfrom, |
| 71 | .xpo_sendto = svc_rdma_sendto, |
| 72 | .xpo_release_rqst = svc_rdma_release_rqst, |
| 73 | .xpo_detach = svc_rdma_detach, |
| 74 | .xpo_free = svc_rdma_free, |
| 75 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, |
| 76 | .xpo_has_wspace = svc_rdma_has_wspace, |
| 77 | .xpo_accept = svc_rdma_accept, |
| 78 | }; |
| 79 | |
| 80 | struct svc_xprt_class svc_rdma_class = { |
| 81 | .xcl_name = "rdma", |
| 82 | .xcl_owner = THIS_MODULE, |
| 83 | .xcl_ops = &svc_rdma_ops, |
| 84 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, |
| 85 | }; |
| 86 | |
| 87 | static int rdma_bump_context_cache(struct svcxprt_rdma *xprt) |
| 88 | { |
| 89 | int target; |
| 90 | int at_least_one = 0; |
| 91 | struct svc_rdma_op_ctxt *ctxt; |
| 92 | |
| 93 | target = min(xprt->sc_ctxt_cnt + xprt->sc_ctxt_bump, |
| 94 | xprt->sc_ctxt_max); |
| 95 | |
| 96 | spin_lock_bh(&xprt->sc_ctxt_lock); |
| 97 | while (xprt->sc_ctxt_cnt < target) { |
| 98 | xprt->sc_ctxt_cnt++; |
| 99 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
| 100 | |
| 101 | ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); |
| 102 | |
| 103 | spin_lock_bh(&xprt->sc_ctxt_lock); |
| 104 | if (ctxt) { |
| 105 | at_least_one = 1; |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 106 | INIT_LIST_HEAD(&ctxt->free_list); |
| 107 | list_add(&ctxt->free_list, &xprt->sc_ctxt_free); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 108 | } else { |
| 109 | /* kmalloc failed...give up for now */ |
| 110 | xprt->sc_ctxt_cnt--; |
| 111 | break; |
| 112 | } |
| 113 | } |
| 114 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
| 115 | dprintk("svcrdma: sc_ctxt_max=%d, sc_ctxt_cnt=%d\n", |
| 116 | xprt->sc_ctxt_max, xprt->sc_ctxt_cnt); |
| 117 | return at_least_one; |
| 118 | } |
| 119 | |
| 120 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
| 121 | { |
| 122 | struct svc_rdma_op_ctxt *ctxt; |
| 123 | |
| 124 | while (1) { |
| 125 | spin_lock_bh(&xprt->sc_ctxt_lock); |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 126 | if (unlikely(list_empty(&xprt->sc_ctxt_free))) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 127 | /* Try to bump my cache. */ |
| 128 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
| 129 | |
| 130 | if (rdma_bump_context_cache(xprt)) |
| 131 | continue; |
| 132 | |
| 133 | printk(KERN_INFO "svcrdma: sleeping waiting for " |
| 134 | "context memory on xprt=%p\n", |
| 135 | xprt); |
| 136 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); |
| 137 | continue; |
| 138 | } |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 139 | ctxt = list_entry(xprt->sc_ctxt_free.next, |
| 140 | struct svc_rdma_op_ctxt, |
| 141 | free_list); |
| 142 | list_del_init(&ctxt->free_list); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 143 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
| 144 | ctxt->xprt = xprt; |
| 145 | INIT_LIST_HEAD(&ctxt->dto_q); |
| 146 | ctxt->count = 0; |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 147 | atomic_inc(&xprt->sc_ctxt_used); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 148 | break; |
| 149 | } |
| 150 | return ctxt; |
| 151 | } |
| 152 | |
| 153 | void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) |
| 154 | { |
| 155 | struct svcxprt_rdma *xprt; |
| 156 | int i; |
| 157 | |
| 158 | BUG_ON(!ctxt); |
| 159 | xprt = ctxt->xprt; |
| 160 | if (free_pages) |
| 161 | for (i = 0; i < ctxt->count; i++) |
| 162 | put_page(ctxt->pages[i]); |
| 163 | |
| 164 | for (i = 0; i < ctxt->count; i++) |
Tom Tucker | 97a3df3 | 2008-05-01 14:02:45 -0500 | [diff] [blame] | 165 | ib_dma_unmap_single(xprt->sc_cm_id->device, |
| 166 | ctxt->sge[i].addr, |
| 167 | ctxt->sge[i].length, |
| 168 | ctxt->direction); |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 169 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 170 | spin_lock_bh(&xprt->sc_ctxt_lock); |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 171 | list_add(&ctxt->free_list, &xprt->sc_ctxt_free); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 172 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 173 | atomic_dec(&xprt->sc_ctxt_used); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 174 | } |
| 175 | |
Tom Tucker | ab96ddd | 2008-05-28 13:54:04 -0500 | [diff] [blame^] | 176 | /* Temporary NFS request map cache. Created in svc_rdma.c */ |
| 177 | extern struct kmem_cache *svc_rdma_map_cachep; |
| 178 | |
| 179 | /* |
| 180 | * Temporary NFS req mappings are shared across all transport |
| 181 | * instances. These are short lived and should be bounded by the number |
| 182 | * of concurrent server threads * depth of the SQ. |
| 183 | */ |
| 184 | struct svc_rdma_req_map *svc_rdma_get_req_map(void) |
| 185 | { |
| 186 | struct svc_rdma_req_map *map; |
| 187 | while (1) { |
| 188 | map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); |
| 189 | if (map) |
| 190 | break; |
| 191 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); |
| 192 | } |
| 193 | map->count = 0; |
| 194 | return map; |
| 195 | } |
| 196 | |
| 197 | void svc_rdma_put_req_map(struct svc_rdma_req_map *map) |
| 198 | { |
| 199 | kmem_cache_free(svc_rdma_map_cachep, map); |
| 200 | } |
| 201 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 202 | /* ib_cq event handler */ |
| 203 | static void cq_event_handler(struct ib_event *event, void *context) |
| 204 | { |
| 205 | struct svc_xprt *xprt = context; |
| 206 | dprintk("svcrdma: received CQ event id=%d, context=%p\n", |
| 207 | event->event, context); |
| 208 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 209 | } |
| 210 | |
| 211 | /* QP event handler */ |
| 212 | static void qp_event_handler(struct ib_event *event, void *context) |
| 213 | { |
| 214 | struct svc_xprt *xprt = context; |
| 215 | |
| 216 | switch (event->event) { |
| 217 | /* These are considered benign events */ |
| 218 | case IB_EVENT_PATH_MIG: |
| 219 | case IB_EVENT_COMM_EST: |
| 220 | case IB_EVENT_SQ_DRAINED: |
| 221 | case IB_EVENT_QP_LAST_WQE_REACHED: |
| 222 | dprintk("svcrdma: QP event %d received for QP=%p\n", |
| 223 | event->event, event->element.qp); |
| 224 | break; |
| 225 | /* These are considered fatal events */ |
| 226 | case IB_EVENT_PATH_MIG_ERR: |
| 227 | case IB_EVENT_QP_FATAL: |
| 228 | case IB_EVENT_QP_REQ_ERR: |
| 229 | case IB_EVENT_QP_ACCESS_ERR: |
| 230 | case IB_EVENT_DEVICE_FATAL: |
| 231 | default: |
| 232 | dprintk("svcrdma: QP ERROR event %d received for QP=%p, " |
| 233 | "closing transport\n", |
| 234 | event->event, event->element.qp); |
| 235 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 236 | break; |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Data Transfer Operation Tasklet |
| 242 | * |
| 243 | * Walks a list of transports with I/O pending, removing entries as |
| 244 | * they are added to the server's I/O pending list. Two bits indicate |
| 245 | * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave |
| 246 | * spinlock that serializes access to the transport list with the RQ |
| 247 | * and SQ interrupt handlers. |
| 248 | */ |
| 249 | static void dto_tasklet_func(unsigned long data) |
| 250 | { |
| 251 | struct svcxprt_rdma *xprt; |
| 252 | unsigned long flags; |
| 253 | |
| 254 | spin_lock_irqsave(&dto_lock, flags); |
| 255 | while (!list_empty(&dto_xprt_q)) { |
| 256 | xprt = list_entry(dto_xprt_q.next, |
| 257 | struct svcxprt_rdma, sc_dto_q); |
| 258 | list_del_init(&xprt->sc_dto_q); |
| 259 | spin_unlock_irqrestore(&dto_lock, flags); |
| 260 | |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 261 | rq_cq_reap(xprt); |
| 262 | sq_cq_reap(xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 263 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 264 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 265 | spin_lock_irqsave(&dto_lock, flags); |
| 266 | } |
| 267 | spin_unlock_irqrestore(&dto_lock, flags); |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * Receive Queue Completion Handler |
| 272 | * |
| 273 | * Since an RQ completion handler is called on interrupt context, we |
| 274 | * need to defer the handling of the I/O to a tasklet |
| 275 | */ |
| 276 | static void rq_comp_handler(struct ib_cq *cq, void *cq_context) |
| 277 | { |
| 278 | struct svcxprt_rdma *xprt = cq_context; |
| 279 | unsigned long flags; |
| 280 | |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 281 | /* Guard against unconditional flush call for destroyed QP */ |
| 282 | if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) |
| 283 | return; |
| 284 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 285 | /* |
| 286 | * Set the bit regardless of whether or not it's on the list |
| 287 | * because it may be on the list already due to an SQ |
| 288 | * completion. |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 289 | */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 290 | set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); |
| 291 | |
| 292 | /* |
| 293 | * If this transport is not already on the DTO transport queue, |
| 294 | * add it |
| 295 | */ |
| 296 | spin_lock_irqsave(&dto_lock, flags); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 297 | if (list_empty(&xprt->sc_dto_q)) { |
| 298 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 299 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 300 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 301 | spin_unlock_irqrestore(&dto_lock, flags); |
| 302 | |
| 303 | /* Tasklet does all the work to avoid irqsave locks. */ |
| 304 | tasklet_schedule(&dto_tasklet); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * rq_cq_reap - Process the RQ CQ. |
| 309 | * |
| 310 | * Take all completing WC off the CQE and enqueue the associated DTO |
| 311 | * context on the dto_q for the transport. |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 312 | * |
| 313 | * Note that caller must hold a transport reference. |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 314 | */ |
| 315 | static void rq_cq_reap(struct svcxprt_rdma *xprt) |
| 316 | { |
| 317 | int ret; |
| 318 | struct ib_wc wc; |
| 319 | struct svc_rdma_op_ctxt *ctxt = NULL; |
| 320 | |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 321 | if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) |
| 322 | return; |
| 323 | |
| 324 | ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 325 | atomic_inc(&rdma_stat_rq_poll); |
| 326 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 327 | while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { |
| 328 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; |
| 329 | ctxt->wc_status = wc.status; |
| 330 | ctxt->byte_len = wc.byte_len; |
| 331 | if (wc.status != IB_WC_SUCCESS) { |
| 332 | /* Close the transport */ |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 333 | dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 334 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 335 | svc_rdma_put_context(ctxt, 1); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 336 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 337 | continue; |
| 338 | } |
Tom Tucker | 47698e0 | 2008-05-06 11:49:05 -0500 | [diff] [blame] | 339 | spin_lock_bh(&xprt->sc_rq_dto_lock); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 340 | list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); |
Tom Tucker | 47698e0 | 2008-05-06 11:49:05 -0500 | [diff] [blame] | 341 | spin_unlock_bh(&xprt->sc_rq_dto_lock); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 342 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 343 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 344 | |
| 345 | if (ctxt) |
| 346 | atomic_inc(&rdma_stat_rq_prod); |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 347 | |
| 348 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); |
| 349 | /* |
| 350 | * If data arrived before established event, |
| 351 | * don't enqueue. This defers RPC I/O until the |
| 352 | * RDMA connection is complete. |
| 353 | */ |
| 354 | if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) |
| 355 | svc_xprt_enqueue(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 356 | } |
| 357 | |
| 358 | /* |
| 359 | * Send Queue Completion Handler - potentially called on interrupt context. |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 360 | * |
| 361 | * Note that caller must hold a transport reference. |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 362 | */ |
| 363 | static void sq_cq_reap(struct svcxprt_rdma *xprt) |
| 364 | { |
| 365 | struct svc_rdma_op_ctxt *ctxt = NULL; |
| 366 | struct ib_wc wc; |
| 367 | struct ib_cq *cq = xprt->sc_sq_cq; |
| 368 | int ret; |
| 369 | |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 370 | |
| 371 | if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) |
| 372 | return; |
| 373 | |
| 374 | ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 375 | atomic_inc(&rdma_stat_sq_poll); |
| 376 | while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { |
| 377 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; |
| 378 | xprt = ctxt->xprt; |
| 379 | |
| 380 | if (wc.status != IB_WC_SUCCESS) |
| 381 | /* Close the transport */ |
| 382 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 383 | |
| 384 | /* Decrement used SQ WR count */ |
| 385 | atomic_dec(&xprt->sc_sq_count); |
| 386 | wake_up(&xprt->sc_send_wait); |
| 387 | |
| 388 | switch (ctxt->wr_op) { |
| 389 | case IB_WR_SEND: |
| 390 | case IB_WR_RDMA_WRITE: |
| 391 | svc_rdma_put_context(ctxt, 1); |
| 392 | break; |
| 393 | |
| 394 | case IB_WR_RDMA_READ: |
| 395 | if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 396 | struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; |
| 397 | BUG_ON(!read_hdr); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 398 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 399 | spin_lock_bh(&xprt->sc_read_complete_lock); |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 400 | list_add_tail(&read_hdr->dto_q, |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 401 | &xprt->sc_read_complete_q); |
| 402 | spin_unlock_bh(&xprt->sc_read_complete_lock); |
| 403 | svc_xprt_enqueue(&xprt->sc_xprt); |
| 404 | } |
Tom Tucker | 02e7452 | 2008-04-30 19:50:56 -0500 | [diff] [blame] | 405 | svc_rdma_put_context(ctxt, 0); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 406 | break; |
| 407 | |
| 408 | default: |
| 409 | printk(KERN_ERR "svcrdma: unexpected completion type, " |
| 410 | "opcode=%d, status=%d\n", |
| 411 | wc.opcode, wc.status); |
| 412 | break; |
| 413 | } |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 414 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | if (ctxt) |
| 418 | atomic_inc(&rdma_stat_sq_prod); |
| 419 | } |
| 420 | |
| 421 | static void sq_comp_handler(struct ib_cq *cq, void *cq_context) |
| 422 | { |
| 423 | struct svcxprt_rdma *xprt = cq_context; |
| 424 | unsigned long flags; |
| 425 | |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 426 | /* Guard against unconditional flush call for destroyed QP */ |
| 427 | if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) |
| 428 | return; |
| 429 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 430 | /* |
| 431 | * Set the bit regardless of whether or not it's on the list |
| 432 | * because it may be on the list already due to an RQ |
| 433 | * completion. |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 434 | */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 435 | set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); |
| 436 | |
| 437 | /* |
| 438 | * If this transport is not already on the DTO transport queue, |
| 439 | * add it |
| 440 | */ |
| 441 | spin_lock_irqsave(&dto_lock, flags); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 442 | if (list_empty(&xprt->sc_dto_q)) { |
| 443 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 444 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 445 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 446 | spin_unlock_irqrestore(&dto_lock, flags); |
| 447 | |
| 448 | /* Tasklet does all the work to avoid irqsave locks. */ |
| 449 | tasklet_schedule(&dto_tasklet); |
| 450 | } |
| 451 | |
| 452 | static void create_context_cache(struct svcxprt_rdma *xprt, |
| 453 | int ctxt_count, int ctxt_bump, int ctxt_max) |
| 454 | { |
| 455 | struct svc_rdma_op_ctxt *ctxt; |
| 456 | int i; |
| 457 | |
| 458 | xprt->sc_ctxt_max = ctxt_max; |
| 459 | xprt->sc_ctxt_bump = ctxt_bump; |
| 460 | xprt->sc_ctxt_cnt = 0; |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 461 | atomic_set(&xprt->sc_ctxt_used, 0); |
| 462 | |
| 463 | INIT_LIST_HEAD(&xprt->sc_ctxt_free); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 464 | for (i = 0; i < ctxt_count; i++) { |
| 465 | ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); |
| 466 | if (ctxt) { |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 467 | INIT_LIST_HEAD(&ctxt->free_list); |
| 468 | list_add(&ctxt->free_list, &xprt->sc_ctxt_free); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 469 | xprt->sc_ctxt_cnt++; |
| 470 | } |
| 471 | } |
| 472 | } |
| 473 | |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 474 | static void destroy_context_cache(struct svcxprt_rdma *xprt) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 475 | { |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 476 | while (!list_empty(&xprt->sc_ctxt_free)) { |
| 477 | struct svc_rdma_op_ctxt *ctxt; |
| 478 | ctxt = list_entry(xprt->sc_ctxt_free.next, |
| 479 | struct svc_rdma_op_ctxt, |
| 480 | free_list); |
| 481 | list_del_init(&ctxt->free_list); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 482 | kfree(ctxt); |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 483 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, |
| 487 | int listener) |
| 488 | { |
| 489 | struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); |
| 490 | |
| 491 | if (!cma_xprt) |
| 492 | return NULL; |
| 493 | svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv); |
| 494 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); |
| 495 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); |
| 496 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); |
| 497 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); |
| 498 | init_waitqueue_head(&cma_xprt->sc_send_wait); |
| 499 | |
| 500 | spin_lock_init(&cma_xprt->sc_lock); |
| 501 | spin_lock_init(&cma_xprt->sc_read_complete_lock); |
| 502 | spin_lock_init(&cma_xprt->sc_ctxt_lock); |
| 503 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); |
| 504 | |
| 505 | cma_xprt->sc_ord = svcrdma_ord; |
| 506 | |
| 507 | cma_xprt->sc_max_req_size = svcrdma_max_req_size; |
| 508 | cma_xprt->sc_max_requests = svcrdma_max_requests; |
| 509 | cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; |
| 510 | atomic_set(&cma_xprt->sc_sq_count, 0); |
| 511 | |
| 512 | if (!listener) { |
| 513 | int reqs = cma_xprt->sc_max_requests; |
| 514 | create_context_cache(cma_xprt, |
| 515 | reqs << 1, /* starting size */ |
| 516 | reqs, /* bump amount */ |
| 517 | reqs + |
| 518 | cma_xprt->sc_sq_depth + |
| 519 | RPCRDMA_MAX_THREADS + 1); /* max */ |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 520 | if (list_empty(&cma_xprt->sc_ctxt_free)) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 521 | kfree(cma_xprt); |
| 522 | return NULL; |
| 523 | } |
| 524 | clear_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); |
| 525 | } else |
| 526 | set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); |
| 527 | |
| 528 | return cma_xprt; |
| 529 | } |
| 530 | |
| 531 | struct page *svc_rdma_get_page(void) |
| 532 | { |
| 533 | struct page *page; |
| 534 | |
| 535 | while ((page = alloc_page(GFP_KERNEL)) == NULL) { |
| 536 | /* If we can't get memory, wait a bit and try again */ |
| 537 | printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " |
| 538 | "jiffies.\n"); |
| 539 | schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); |
| 540 | } |
| 541 | return page; |
| 542 | } |
| 543 | |
| 544 | int svc_rdma_post_recv(struct svcxprt_rdma *xprt) |
| 545 | { |
| 546 | struct ib_recv_wr recv_wr, *bad_recv_wr; |
| 547 | struct svc_rdma_op_ctxt *ctxt; |
| 548 | struct page *page; |
| 549 | unsigned long pa; |
| 550 | int sge_no; |
| 551 | int buflen; |
| 552 | int ret; |
| 553 | |
| 554 | ctxt = svc_rdma_get_context(xprt); |
| 555 | buflen = 0; |
| 556 | ctxt->direction = DMA_FROM_DEVICE; |
| 557 | for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { |
| 558 | BUG_ON(sge_no >= xprt->sc_max_sge); |
| 559 | page = svc_rdma_get_page(); |
| 560 | ctxt->pages[sge_no] = page; |
| 561 | pa = ib_dma_map_page(xprt->sc_cm_id->device, |
| 562 | page, 0, PAGE_SIZE, |
| 563 | DMA_FROM_DEVICE); |
| 564 | ctxt->sge[sge_no].addr = pa; |
| 565 | ctxt->sge[sge_no].length = PAGE_SIZE; |
| 566 | ctxt->sge[sge_no].lkey = xprt->sc_phys_mr->lkey; |
| 567 | buflen += PAGE_SIZE; |
| 568 | } |
| 569 | ctxt->count = sge_no; |
| 570 | recv_wr.next = NULL; |
| 571 | recv_wr.sg_list = &ctxt->sge[0]; |
| 572 | recv_wr.num_sge = ctxt->count; |
| 573 | recv_wr.wr_id = (u64)(unsigned long)ctxt; |
| 574 | |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 575 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 576 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 577 | if (ret) { |
| 578 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 05a0826 | 2008-04-25 14:11:31 -0500 | [diff] [blame] | 579 | svc_rdma_put_context(ctxt, 1); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 580 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 581 | return ret; |
| 582 | } |
| 583 | |
| 584 | /* |
| 585 | * This function handles the CONNECT_REQUEST event on a listening |
| 586 | * endpoint. It is passed the cma_id for the _new_ connection. The context in |
| 587 | * this cma_id is inherited from the listening cma_id and is the svc_xprt |
| 588 | * structure for the listening endpoint. |
| 589 | * |
| 590 | * This function creates a new xprt for the new connection and enqueues it on |
| 591 | * the accept queue for the listent xprt. When the listen thread is kicked, it |
| 592 | * will call the recvfrom method on the listen xprt which will accept the new |
| 593 | * connection. |
| 594 | */ |
| 595 | static void handle_connect_req(struct rdma_cm_id *new_cma_id) |
| 596 | { |
| 597 | struct svcxprt_rdma *listen_xprt = new_cma_id->context; |
| 598 | struct svcxprt_rdma *newxprt; |
Tom Tucker | af261af | 2008-05-07 13:52:42 -0500 | [diff] [blame] | 599 | struct sockaddr *sa; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 600 | |
| 601 | /* Create a new transport */ |
| 602 | newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); |
| 603 | if (!newxprt) { |
| 604 | dprintk("svcrdma: failed to create new transport\n"); |
| 605 | return; |
| 606 | } |
| 607 | newxprt->sc_cm_id = new_cma_id; |
| 608 | new_cma_id->context = newxprt; |
| 609 | dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", |
| 610 | newxprt, newxprt->sc_cm_id, listen_xprt); |
| 611 | |
Tom Tucker | af261af | 2008-05-07 13:52:42 -0500 | [diff] [blame] | 612 | /* Set the local and remote addresses in the transport */ |
| 613 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; |
| 614 | svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); |
| 615 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; |
| 616 | svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); |
| 617 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 618 | /* |
| 619 | * Enqueue the new transport on the accept queue of the listening |
| 620 | * transport |
| 621 | */ |
| 622 | spin_lock_bh(&listen_xprt->sc_lock); |
| 623 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); |
| 624 | spin_unlock_bh(&listen_xprt->sc_lock); |
| 625 | |
| 626 | /* |
| 627 | * Can't use svc_xprt_received here because we are not on a |
| 628 | * rqstp thread |
| 629 | */ |
| 630 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
| 631 | svc_xprt_enqueue(&listen_xprt->sc_xprt); |
| 632 | } |
| 633 | |
| 634 | /* |
| 635 | * Handles events generated on the listening endpoint. These events will be |
| 636 | * either be incoming connect requests or adapter removal events. |
| 637 | */ |
| 638 | static int rdma_listen_handler(struct rdma_cm_id *cma_id, |
| 639 | struct rdma_cm_event *event) |
| 640 | { |
| 641 | struct svcxprt_rdma *xprt = cma_id->context; |
| 642 | int ret = 0; |
| 643 | |
| 644 | switch (event->event) { |
| 645 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
| 646 | dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " |
| 647 | "event=%d\n", cma_id, cma_id->context, event->event); |
| 648 | handle_connect_req(cma_id); |
| 649 | break; |
| 650 | |
| 651 | case RDMA_CM_EVENT_ESTABLISHED: |
| 652 | /* Accept complete */ |
| 653 | dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " |
| 654 | "cm_id=%p\n", xprt, cma_id); |
| 655 | break; |
| 656 | |
| 657 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 658 | dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", |
| 659 | xprt, cma_id); |
| 660 | if (xprt) |
| 661 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 662 | break; |
| 663 | |
| 664 | default: |
| 665 | dprintk("svcrdma: Unexpected event on listening endpoint %p, " |
| 666 | "event=%d\n", cma_id, event->event); |
| 667 | break; |
| 668 | } |
| 669 | |
| 670 | return ret; |
| 671 | } |
| 672 | |
| 673 | static int rdma_cma_handler(struct rdma_cm_id *cma_id, |
| 674 | struct rdma_cm_event *event) |
| 675 | { |
| 676 | struct svc_xprt *xprt = cma_id->context; |
| 677 | struct svcxprt_rdma *rdma = |
| 678 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 679 | switch (event->event) { |
| 680 | case RDMA_CM_EVENT_ESTABLISHED: |
| 681 | /* Accept complete */ |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 682 | svc_xprt_get(xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 683 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " |
| 684 | "cm_id=%p\n", xprt, cma_id); |
| 685 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); |
| 686 | svc_xprt_enqueue(xprt); |
| 687 | break; |
| 688 | case RDMA_CM_EVENT_DISCONNECTED: |
| 689 | dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", |
| 690 | xprt, cma_id); |
| 691 | if (xprt) { |
| 692 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 693 | svc_xprt_enqueue(xprt); |
Tom Tucker | 120693d | 2008-04-24 14:17:21 -0500 | [diff] [blame] | 694 | svc_xprt_put(xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 695 | } |
| 696 | break; |
| 697 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 698 | dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " |
| 699 | "event=%d\n", cma_id, xprt, event->event); |
| 700 | if (xprt) { |
| 701 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 702 | svc_xprt_enqueue(xprt); |
| 703 | } |
| 704 | break; |
| 705 | default: |
| 706 | dprintk("svcrdma: Unexpected event on DTO endpoint %p, " |
| 707 | "event=%d\n", cma_id, event->event); |
| 708 | break; |
| 709 | } |
| 710 | return 0; |
| 711 | } |
| 712 | |
| 713 | /* |
| 714 | * Create a listening RDMA service endpoint. |
| 715 | */ |
| 716 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
| 717 | struct sockaddr *sa, int salen, |
| 718 | int flags) |
| 719 | { |
| 720 | struct rdma_cm_id *listen_id; |
| 721 | struct svcxprt_rdma *cma_xprt; |
| 722 | struct svc_xprt *xprt; |
| 723 | int ret; |
| 724 | |
| 725 | dprintk("svcrdma: Creating RDMA socket\n"); |
| 726 | |
| 727 | cma_xprt = rdma_create_xprt(serv, 1); |
| 728 | if (!cma_xprt) |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 729 | return ERR_PTR(-ENOMEM); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 730 | xprt = &cma_xprt->sc_xprt; |
| 731 | |
| 732 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); |
| 733 | if (IS_ERR(listen_id)) { |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 734 | ret = PTR_ERR(listen_id); |
| 735 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); |
| 736 | goto err0; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 737 | } |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 738 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 739 | ret = rdma_bind_addr(listen_id, sa); |
| 740 | if (ret) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 741 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 742 | goto err1; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 743 | } |
| 744 | cma_xprt->sc_cm_id = listen_id; |
| 745 | |
| 746 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); |
| 747 | if (ret) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 748 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 749 | goto err1; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 750 | } |
| 751 | |
| 752 | /* |
| 753 | * We need to use the address from the cm_id in case the |
| 754 | * caller specified 0 for the port number. |
| 755 | */ |
| 756 | sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; |
| 757 | svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); |
| 758 | |
| 759 | return &cma_xprt->sc_xprt; |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 760 | |
| 761 | err1: |
| 762 | rdma_destroy_id(listen_id); |
| 763 | err0: |
| 764 | kfree(cma_xprt); |
| 765 | return ERR_PTR(ret); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 766 | } |
| 767 | |
| 768 | /* |
| 769 | * This is the xpo_recvfrom function for listening endpoints. Its |
| 770 | * purpose is to accept incoming connections. The CMA callback handler |
| 771 | * has already created a new transport and attached it to the new CMA |
| 772 | * ID. |
| 773 | * |
| 774 | * There is a queue of pending connections hung on the listening |
| 775 | * transport. This queue contains the new svc_xprt structure. This |
| 776 | * function takes svc_xprt structures off the accept_q and completes |
| 777 | * the connection. |
| 778 | */ |
| 779 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) |
| 780 | { |
| 781 | struct svcxprt_rdma *listen_rdma; |
| 782 | struct svcxprt_rdma *newxprt = NULL; |
| 783 | struct rdma_conn_param conn_param; |
| 784 | struct ib_qp_init_attr qp_attr; |
| 785 | struct ib_device_attr devattr; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 786 | int ret; |
| 787 | int i; |
| 788 | |
| 789 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 790 | clear_bit(XPT_CONN, &xprt->xpt_flags); |
| 791 | /* Get the next entry off the accept list */ |
| 792 | spin_lock_bh(&listen_rdma->sc_lock); |
| 793 | if (!list_empty(&listen_rdma->sc_accept_q)) { |
| 794 | newxprt = list_entry(listen_rdma->sc_accept_q.next, |
| 795 | struct svcxprt_rdma, sc_accept_q); |
| 796 | list_del_init(&newxprt->sc_accept_q); |
| 797 | } |
| 798 | if (!list_empty(&listen_rdma->sc_accept_q)) |
| 799 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); |
| 800 | spin_unlock_bh(&listen_rdma->sc_lock); |
| 801 | if (!newxprt) |
| 802 | return NULL; |
| 803 | |
| 804 | dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", |
| 805 | newxprt, newxprt->sc_cm_id); |
| 806 | |
| 807 | ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); |
| 808 | if (ret) { |
| 809 | dprintk("svcrdma: could not query device attributes on " |
| 810 | "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); |
| 811 | goto errout; |
| 812 | } |
| 813 | |
| 814 | /* Qualify the transport resource defaults with the |
| 815 | * capabilities of this particular device */ |
| 816 | newxprt->sc_max_sge = min((size_t)devattr.max_sge, |
| 817 | (size_t)RPCSVC_MAXPAGES); |
| 818 | newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, |
| 819 | (size_t)svcrdma_max_requests); |
| 820 | newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; |
| 821 | |
| 822 | newxprt->sc_ord = min((size_t)devattr.max_qp_rd_atom, |
| 823 | (size_t)svcrdma_ord); |
| 824 | |
| 825 | newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); |
| 826 | if (IS_ERR(newxprt->sc_pd)) { |
| 827 | dprintk("svcrdma: error creating PD for connect request\n"); |
| 828 | goto errout; |
| 829 | } |
| 830 | newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, |
| 831 | sq_comp_handler, |
| 832 | cq_event_handler, |
| 833 | newxprt, |
| 834 | newxprt->sc_sq_depth, |
| 835 | 0); |
| 836 | if (IS_ERR(newxprt->sc_sq_cq)) { |
| 837 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); |
| 838 | goto errout; |
| 839 | } |
| 840 | newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, |
| 841 | rq_comp_handler, |
| 842 | cq_event_handler, |
| 843 | newxprt, |
| 844 | newxprt->sc_max_requests, |
| 845 | 0); |
| 846 | if (IS_ERR(newxprt->sc_rq_cq)) { |
| 847 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); |
| 848 | goto errout; |
| 849 | } |
| 850 | |
| 851 | memset(&qp_attr, 0, sizeof qp_attr); |
| 852 | qp_attr.event_handler = qp_event_handler; |
| 853 | qp_attr.qp_context = &newxprt->sc_xprt; |
| 854 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; |
| 855 | qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; |
| 856 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; |
| 857 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; |
| 858 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 859 | qp_attr.qp_type = IB_QPT_RC; |
| 860 | qp_attr.send_cq = newxprt->sc_sq_cq; |
| 861 | qp_attr.recv_cq = newxprt->sc_rq_cq; |
| 862 | dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" |
| 863 | " cm_id->device=%p, sc_pd->device=%p\n" |
| 864 | " cap.max_send_wr = %d\n" |
| 865 | " cap.max_recv_wr = %d\n" |
| 866 | " cap.max_send_sge = %d\n" |
| 867 | " cap.max_recv_sge = %d\n", |
| 868 | newxprt->sc_cm_id, newxprt->sc_pd, |
| 869 | newxprt->sc_cm_id->device, newxprt->sc_pd->device, |
| 870 | qp_attr.cap.max_send_wr, |
| 871 | qp_attr.cap.max_recv_wr, |
| 872 | qp_attr.cap.max_send_sge, |
| 873 | qp_attr.cap.max_recv_sge); |
| 874 | |
| 875 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); |
| 876 | if (ret) { |
| 877 | /* |
| 878 | * XXX: This is a hack. We need a xx_request_qp interface |
| 879 | * that will adjust the qp_attr's with a best-effort |
| 880 | * number |
| 881 | */ |
| 882 | qp_attr.cap.max_send_sge -= 2; |
| 883 | qp_attr.cap.max_recv_sge -= 2; |
| 884 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, |
| 885 | &qp_attr); |
| 886 | if (ret) { |
| 887 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); |
| 888 | goto errout; |
| 889 | } |
| 890 | newxprt->sc_max_sge = qp_attr.cap.max_send_sge; |
| 891 | newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; |
| 892 | newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; |
| 893 | newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; |
| 894 | } |
| 895 | newxprt->sc_qp = newxprt->sc_cm_id->qp; |
| 896 | |
| 897 | /* Register all of physical memory */ |
| 898 | newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd, |
| 899 | IB_ACCESS_LOCAL_WRITE | |
| 900 | IB_ACCESS_REMOTE_WRITE); |
| 901 | if (IS_ERR(newxprt->sc_phys_mr)) { |
| 902 | dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret); |
| 903 | goto errout; |
| 904 | } |
| 905 | |
| 906 | /* Post receive buffers */ |
| 907 | for (i = 0; i < newxprt->sc_max_requests; i++) { |
| 908 | ret = svc_rdma_post_recv(newxprt); |
| 909 | if (ret) { |
| 910 | dprintk("svcrdma: failure posting receive buffers\n"); |
| 911 | goto errout; |
| 912 | } |
| 913 | } |
| 914 | |
| 915 | /* Swap out the handler */ |
| 916 | newxprt->sc_cm_id->event_handler = rdma_cma_handler; |
| 917 | |
Tom Tucker | af261af | 2008-05-07 13:52:42 -0500 | [diff] [blame] | 918 | /* |
| 919 | * Arm the CQs for the SQ and RQ before accepting so we can't |
| 920 | * miss the first message |
| 921 | */ |
| 922 | ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); |
| 923 | ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); |
| 924 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 925 | /* Accept Connection */ |
| 926 | set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); |
| 927 | memset(&conn_param, 0, sizeof conn_param); |
| 928 | conn_param.responder_resources = 0; |
| 929 | conn_param.initiator_depth = newxprt->sc_ord; |
| 930 | ret = rdma_accept(newxprt->sc_cm_id, &conn_param); |
| 931 | if (ret) { |
| 932 | dprintk("svcrdma: failed to accept new connection, ret=%d\n", |
| 933 | ret); |
| 934 | goto errout; |
| 935 | } |
| 936 | |
| 937 | dprintk("svcrdma: new connection %p accepted with the following " |
| 938 | "attributes:\n" |
| 939 | " local_ip : %d.%d.%d.%d\n" |
| 940 | " local_port : %d\n" |
| 941 | " remote_ip : %d.%d.%d.%d\n" |
| 942 | " remote_port : %d\n" |
| 943 | " max_sge : %d\n" |
| 944 | " sq_depth : %d\n" |
| 945 | " max_requests : %d\n" |
| 946 | " ord : %d\n", |
| 947 | newxprt, |
| 948 | NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 949 | route.addr.src_addr)->sin_addr.s_addr), |
| 950 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 951 | route.addr.src_addr)->sin_port), |
| 952 | NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 953 | route.addr.dst_addr)->sin_addr.s_addr), |
| 954 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 955 | route.addr.dst_addr)->sin_port), |
| 956 | newxprt->sc_max_sge, |
| 957 | newxprt->sc_sq_depth, |
| 958 | newxprt->sc_max_requests, |
| 959 | newxprt->sc_ord); |
| 960 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 961 | return &newxprt->sc_xprt; |
| 962 | |
| 963 | errout: |
| 964 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 965 | /* Take a reference in case the DTO handler runs */ |
| 966 | svc_xprt_get(&newxprt->sc_xprt); |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 967 | if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 968 | ib_destroy_qp(newxprt->sc_qp); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 969 | rdma_destroy_id(newxprt->sc_cm_id); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 970 | /* This call to put will destroy the transport */ |
| 971 | svc_xprt_put(&newxprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 972 | return NULL; |
| 973 | } |
| 974 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 975 | static void svc_rdma_release_rqst(struct svc_rqst *rqstp) |
| 976 | { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 977 | } |
| 978 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 979 | /* |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 980 | * When connected, an svc_xprt has at least two references: |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 981 | * |
| 982 | * - A reference held by the cm_id between the ESTABLISHED and |
| 983 | * DISCONNECTED events. If the remote peer disconnected first, this |
| 984 | * reference could be gone. |
| 985 | * |
| 986 | * - A reference held by the svc_recv code that called this function |
| 987 | * as part of close processing. |
| 988 | * |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 989 | * At a minimum one references should still be held. |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 990 | */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 991 | static void svc_rdma_detach(struct svc_xprt *xprt) |
| 992 | { |
| 993 | struct svcxprt_rdma *rdma = |
| 994 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 995 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 996 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 997 | /* Disconnect and flush posted WQE */ |
| 998 | rdma_disconnect(rdma->sc_cm_id); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 999 | } |
| 1000 | |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1001 | static void __svc_rdma_free(struct work_struct *work) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1002 | { |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1003 | struct svcxprt_rdma *rdma = |
| 1004 | container_of(work, struct svcxprt_rdma, sc_work); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1005 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1006 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1007 | /* We should only be called from kref_put */ |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1008 | BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); |
| 1009 | |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1010 | /* |
| 1011 | * Destroy queued, but not processed read completions. Note |
| 1012 | * that this cleanup has to be done before destroying the |
| 1013 | * cm_id because the device ptr is needed to unmap the dma in |
| 1014 | * svc_rdma_put_context. |
| 1015 | */ |
| 1016 | spin_lock_bh(&rdma->sc_read_complete_lock); |
| 1017 | while (!list_empty(&rdma->sc_read_complete_q)) { |
| 1018 | struct svc_rdma_op_ctxt *ctxt; |
| 1019 | ctxt = list_entry(rdma->sc_read_complete_q.next, |
| 1020 | struct svc_rdma_op_ctxt, |
| 1021 | dto_q); |
| 1022 | list_del_init(&ctxt->dto_q); |
| 1023 | svc_rdma_put_context(ctxt, 1); |
| 1024 | } |
| 1025 | spin_unlock_bh(&rdma->sc_read_complete_lock); |
| 1026 | |
| 1027 | /* Destroy queued, but not processed recv completions */ |
| 1028 | spin_lock_bh(&rdma->sc_rq_dto_lock); |
| 1029 | while (!list_empty(&rdma->sc_rq_dto_q)) { |
| 1030 | struct svc_rdma_op_ctxt *ctxt; |
| 1031 | ctxt = list_entry(rdma->sc_rq_dto_q.next, |
| 1032 | struct svc_rdma_op_ctxt, |
| 1033 | dto_q); |
| 1034 | list_del_init(&ctxt->dto_q); |
| 1035 | svc_rdma_put_context(ctxt, 1); |
| 1036 | } |
| 1037 | spin_unlock_bh(&rdma->sc_rq_dto_lock); |
| 1038 | |
| 1039 | /* Warn if we leaked a resource or under-referenced */ |
| 1040 | WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); |
| 1041 | |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 1042 | /* Destroy the QP if present (not a listener) */ |
| 1043 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) |
| 1044 | ib_destroy_qp(rdma->sc_qp); |
| 1045 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1046 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
| 1047 | ib_destroy_cq(rdma->sc_sq_cq); |
| 1048 | |
| 1049 | if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) |
| 1050 | ib_destroy_cq(rdma->sc_rq_cq); |
| 1051 | |
| 1052 | if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) |
| 1053 | ib_dereg_mr(rdma->sc_phys_mr); |
| 1054 | |
| 1055 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
| 1056 | ib_dealloc_pd(rdma->sc_pd); |
| 1057 | |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1058 | /* Destroy the CM ID */ |
| 1059 | rdma_destroy_id(rdma->sc_cm_id); |
| 1060 | |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 1061 | destroy_context_cache(rdma); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1062 | kfree(rdma); |
| 1063 | } |
| 1064 | |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1065 | static void svc_rdma_free(struct svc_xprt *xprt) |
| 1066 | { |
| 1067 | struct svcxprt_rdma *rdma = |
| 1068 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 1069 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); |
| 1070 | schedule_work(&rdma->sc_work); |
| 1071 | } |
| 1072 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1073 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
| 1074 | { |
| 1075 | struct svcxprt_rdma *rdma = |
| 1076 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 1077 | |
| 1078 | /* |
| 1079 | * If there are fewer SQ WR available than required to send a |
| 1080 | * simple response, return false. |
| 1081 | */ |
| 1082 | if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) |
| 1083 | return 0; |
| 1084 | |
| 1085 | /* |
| 1086 | * ...or there are already waiters on the SQ, |
| 1087 | * return false. |
| 1088 | */ |
| 1089 | if (waitqueue_active(&rdma->sc_send_wait)) |
| 1090 | return 0; |
| 1091 | |
| 1092 | /* Otherwise return true. */ |
| 1093 | return 1; |
| 1094 | } |
| 1095 | |
| 1096 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
| 1097 | { |
| 1098 | struct ib_send_wr *bad_wr; |
| 1099 | int ret; |
| 1100 | |
| 1101 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
Tom Tucker | 9d6347a | 2008-04-25 15:51:27 -0500 | [diff] [blame] | 1102 | return -ENOTCONN; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1103 | |
| 1104 | BUG_ON(wr->send_flags != IB_SEND_SIGNALED); |
| 1105 | BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op != |
| 1106 | wr->opcode); |
| 1107 | /* If the SQ is full, wait until an SQ entry is available */ |
| 1108 | while (1) { |
| 1109 | spin_lock_bh(&xprt->sc_lock); |
| 1110 | if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) { |
| 1111 | spin_unlock_bh(&xprt->sc_lock); |
| 1112 | atomic_inc(&rdma_stat_sq_starve); |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 1113 | |
| 1114 | /* See if we can opportunistically reap SQ WR to make room */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1115 | sq_cq_reap(xprt); |
| 1116 | |
| 1117 | /* Wait until SQ WR available if SQ still full */ |
| 1118 | wait_event(xprt->sc_send_wait, |
| 1119 | atomic_read(&xprt->sc_sq_count) < |
| 1120 | xprt->sc_sq_depth); |
Tom Tucker | 830bb59 | 2008-03-11 12:44:27 -0500 | [diff] [blame] | 1121 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
| 1122 | return 0; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1123 | continue; |
| 1124 | } |
| 1125 | /* Bumped used SQ WR count and post */ |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 1126 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1127 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); |
| 1128 | if (!ret) |
| 1129 | atomic_inc(&xprt->sc_sq_count); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 1130 | else { |
| 1131 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1132 | dprintk("svcrdma: failed to post SQ WR rc=%d, " |
| 1133 | "sc_sq_count=%d, sc_sq_depth=%d\n", |
| 1134 | ret, atomic_read(&xprt->sc_sq_count), |
| 1135 | xprt->sc_sq_depth); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 1136 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1137 | spin_unlock_bh(&xprt->sc_lock); |
| 1138 | break; |
| 1139 | } |
| 1140 | return ret; |
| 1141 | } |
| 1142 | |
Tom Tucker | 008fdbc | 2008-05-07 15:47:42 -0500 | [diff] [blame] | 1143 | void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, |
| 1144 | enum rpcrdma_errcode err) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1145 | { |
| 1146 | struct ib_send_wr err_wr; |
| 1147 | struct ib_sge sge; |
| 1148 | struct page *p; |
| 1149 | struct svc_rdma_op_ctxt *ctxt; |
| 1150 | u32 *va; |
| 1151 | int length; |
| 1152 | int ret; |
| 1153 | |
| 1154 | p = svc_rdma_get_page(); |
| 1155 | va = page_address(p); |
| 1156 | |
| 1157 | /* XDR encode error */ |
| 1158 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); |
| 1159 | |
| 1160 | /* Prepare SGE for local address */ |
| 1161 | sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, |
| 1162 | p, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
| 1163 | sge.lkey = xprt->sc_phys_mr->lkey; |
| 1164 | sge.length = length; |
| 1165 | |
| 1166 | ctxt = svc_rdma_get_context(xprt); |
| 1167 | ctxt->count = 1; |
| 1168 | ctxt->pages[0] = p; |
| 1169 | |
| 1170 | /* Prepare SEND WR */ |
| 1171 | memset(&err_wr, 0, sizeof err_wr); |
| 1172 | ctxt->wr_op = IB_WR_SEND; |
| 1173 | err_wr.wr_id = (unsigned long)ctxt; |
| 1174 | err_wr.sg_list = &sge; |
| 1175 | err_wr.num_sge = 1; |
| 1176 | err_wr.opcode = IB_WR_SEND; |
| 1177 | err_wr.send_flags = IB_SEND_SIGNALED; |
| 1178 | |
| 1179 | /* Post It */ |
| 1180 | ret = svc_rdma_send(xprt, &err_wr); |
| 1181 | if (ret) { |
Tom Tucker | 008fdbc | 2008-05-07 15:47:42 -0500 | [diff] [blame] | 1182 | dprintk("svcrdma: Error %d posting send for protocol error\n", |
| 1183 | ret); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1184 | svc_rdma_put_context(ctxt, 1); |
| 1185 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1186 | } |