Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the BSD-type |
| 8 | * license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or without |
| 11 | * modification, are permitted provided that the following conditions |
| 12 | * are met: |
| 13 | * |
| 14 | * Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * |
| 17 | * Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials provided |
| 20 | * with the distribution. |
| 21 | * |
| 22 | * Neither the name of the Network Appliance, Inc. nor the names of |
| 23 | * its contributors may be used to endorse or promote products |
| 24 | * derived from this software without specific prior written |
| 25 | * permission. |
| 26 | * |
| 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 38 | * |
| 39 | * Author: Tom Tucker <tom@opengridcomputing.com> |
| 40 | */ |
| 41 | |
| 42 | #include <linux/sunrpc/svc_xprt.h> |
| 43 | #include <linux/sunrpc/debug.h> |
| 44 | #include <linux/sunrpc/rpc_rdma.h> |
| 45 | #include <linux/spinlock.h> |
| 46 | #include <rdma/ib_verbs.h> |
| 47 | #include <rdma/rdma_cm.h> |
| 48 | #include <linux/sunrpc/svc_rdma.h> |
| 49 | |
| 50 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
| 51 | |
| 52 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
| 53 | struct sockaddr *sa, int salen, |
| 54 | int flags); |
| 55 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); |
| 56 | static void svc_rdma_release_rqst(struct svc_rqst *); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 57 | static void dto_tasklet_func(unsigned long data); |
| 58 | static void svc_rdma_detach(struct svc_xprt *xprt); |
| 59 | static void svc_rdma_free(struct svc_xprt *xprt); |
| 60 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); |
| 61 | static void rq_cq_reap(struct svcxprt_rdma *xprt); |
| 62 | static void sq_cq_reap(struct svcxprt_rdma *xprt); |
| 63 | |
Roel Kluin | 5eaa65b | 2008-12-10 15:18:31 -0800 | [diff] [blame] | 64 | static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 65 | static DEFINE_SPINLOCK(dto_lock); |
| 66 | static LIST_HEAD(dto_xprt_q); |
| 67 | |
| 68 | static struct svc_xprt_ops svc_rdma_ops = { |
| 69 | .xpo_create = svc_rdma_create, |
| 70 | .xpo_recvfrom = svc_rdma_recvfrom, |
| 71 | .xpo_sendto = svc_rdma_sendto, |
| 72 | .xpo_release_rqst = svc_rdma_release_rqst, |
| 73 | .xpo_detach = svc_rdma_detach, |
| 74 | .xpo_free = svc_rdma_free, |
| 75 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, |
| 76 | .xpo_has_wspace = svc_rdma_has_wspace, |
| 77 | .xpo_accept = svc_rdma_accept, |
| 78 | }; |
| 79 | |
| 80 | struct svc_xprt_class svc_rdma_class = { |
| 81 | .xcl_name = "rdma", |
| 82 | .xcl_owner = THIS_MODULE, |
| 83 | .xcl_ops = &svc_rdma_ops, |
| 84 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, |
| 85 | }; |
| 86 | |
Tom Tucker | 8948896 | 2008-05-28 15:14:02 -0500 | [diff] [blame] | 87 | /* WR context cache. Created in svc_rdma.c */ |
| 88 | extern struct kmem_cache *svc_rdma_ctxt_cachep; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 89 | |
| 90 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
| 91 | { |
| 92 | struct svc_rdma_op_ctxt *ctxt; |
| 93 | |
| 94 | while (1) { |
Tom Tucker | 8948896 | 2008-05-28 15:14:02 -0500 | [diff] [blame] | 95 | ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); |
| 96 | if (ctxt) |
| 97 | break; |
| 98 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 99 | } |
Tom Tucker | 8948896 | 2008-05-28 15:14:02 -0500 | [diff] [blame] | 100 | ctxt->xprt = xprt; |
| 101 | INIT_LIST_HEAD(&ctxt->dto_q); |
| 102 | ctxt->count = 0; |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 103 | ctxt->frmr = NULL; |
Tom Tucker | 8948896 | 2008-05-28 15:14:02 -0500 | [diff] [blame] | 104 | atomic_inc(&xprt->sc_ctxt_used); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 105 | return ctxt; |
| 106 | } |
| 107 | |
Tom Tucker | 146b6df | 2008-08-12 15:12:10 -0500 | [diff] [blame] | 108 | void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) |
Tom Tucker | e6ab914 | 2008-05-28 12:08:48 -0500 | [diff] [blame] | 109 | { |
| 110 | struct svcxprt_rdma *xprt = ctxt->xprt; |
| 111 | int i; |
| 112 | for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 113 | /* |
| 114 | * Unmap the DMA addr in the SGE if the lkey matches |
| 115 | * the sc_dma_lkey, otherwise, ignore it since it is |
| 116 | * an FRMR lkey and will be unmapped later when the |
| 117 | * last WR that uses it completes. |
| 118 | */ |
| 119 | if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { |
| 120 | atomic_dec(&xprt->sc_dma_used); |
| 121 | ib_dma_unmap_single(xprt->sc_cm_id->device, |
| 122 | ctxt->sge[i].addr, |
| 123 | ctxt->sge[i].length, |
| 124 | ctxt->direction); |
| 125 | } |
Tom Tucker | e6ab914 | 2008-05-28 12:08:48 -0500 | [diff] [blame] | 126 | } |
| 127 | } |
| 128 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 129 | void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) |
| 130 | { |
| 131 | struct svcxprt_rdma *xprt; |
| 132 | int i; |
| 133 | |
| 134 | BUG_ON(!ctxt); |
| 135 | xprt = ctxt->xprt; |
| 136 | if (free_pages) |
| 137 | for (i = 0; i < ctxt->count; i++) |
| 138 | put_page(ctxt->pages[i]); |
| 139 | |
Tom Tucker | 8948896 | 2008-05-28 15:14:02 -0500 | [diff] [blame] | 140 | kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); |
Tom Tucker | 8740767 | 2008-04-30 20:44:39 -0500 | [diff] [blame] | 141 | atomic_dec(&xprt->sc_ctxt_used); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 142 | } |
| 143 | |
Tom Tucker | ab96ddd | 2008-05-28 13:54:04 -0500 | [diff] [blame] | 144 | /* Temporary NFS request map cache. Created in svc_rdma.c */ |
| 145 | extern struct kmem_cache *svc_rdma_map_cachep; |
| 146 | |
| 147 | /* |
| 148 | * Temporary NFS req mappings are shared across all transport |
| 149 | * instances. These are short lived and should be bounded by the number |
| 150 | * of concurrent server threads * depth of the SQ. |
| 151 | */ |
| 152 | struct svc_rdma_req_map *svc_rdma_get_req_map(void) |
| 153 | { |
| 154 | struct svc_rdma_req_map *map; |
| 155 | while (1) { |
| 156 | map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); |
| 157 | if (map) |
| 158 | break; |
| 159 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); |
| 160 | } |
| 161 | map->count = 0; |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 162 | map->frmr = NULL; |
Tom Tucker | ab96ddd | 2008-05-28 13:54:04 -0500 | [diff] [blame] | 163 | return map; |
| 164 | } |
| 165 | |
| 166 | void svc_rdma_put_req_map(struct svc_rdma_req_map *map) |
| 167 | { |
| 168 | kmem_cache_free(svc_rdma_map_cachep, map); |
| 169 | } |
| 170 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 171 | /* ib_cq event handler */ |
| 172 | static void cq_event_handler(struct ib_event *event, void *context) |
| 173 | { |
| 174 | struct svc_xprt *xprt = context; |
| 175 | dprintk("svcrdma: received CQ event id=%d, context=%p\n", |
| 176 | event->event, context); |
| 177 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 178 | } |
| 179 | |
| 180 | /* QP event handler */ |
| 181 | static void qp_event_handler(struct ib_event *event, void *context) |
| 182 | { |
| 183 | struct svc_xprt *xprt = context; |
| 184 | |
| 185 | switch (event->event) { |
| 186 | /* These are considered benign events */ |
| 187 | case IB_EVENT_PATH_MIG: |
| 188 | case IB_EVENT_COMM_EST: |
| 189 | case IB_EVENT_SQ_DRAINED: |
| 190 | case IB_EVENT_QP_LAST_WQE_REACHED: |
| 191 | dprintk("svcrdma: QP event %d received for QP=%p\n", |
| 192 | event->event, event->element.qp); |
| 193 | break; |
| 194 | /* These are considered fatal events */ |
| 195 | case IB_EVENT_PATH_MIG_ERR: |
| 196 | case IB_EVENT_QP_FATAL: |
| 197 | case IB_EVENT_QP_REQ_ERR: |
| 198 | case IB_EVENT_QP_ACCESS_ERR: |
| 199 | case IB_EVENT_DEVICE_FATAL: |
| 200 | default: |
| 201 | dprintk("svcrdma: QP ERROR event %d received for QP=%p, " |
| 202 | "closing transport\n", |
| 203 | event->event, event->element.qp); |
| 204 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 205 | break; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * Data Transfer Operation Tasklet |
| 211 | * |
| 212 | * Walks a list of transports with I/O pending, removing entries as |
| 213 | * they are added to the server's I/O pending list. Two bits indicate |
| 214 | * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave |
| 215 | * spinlock that serializes access to the transport list with the RQ |
| 216 | * and SQ interrupt handlers. |
| 217 | */ |
| 218 | static void dto_tasklet_func(unsigned long data) |
| 219 | { |
| 220 | struct svcxprt_rdma *xprt; |
| 221 | unsigned long flags; |
| 222 | |
| 223 | spin_lock_irqsave(&dto_lock, flags); |
| 224 | while (!list_empty(&dto_xprt_q)) { |
| 225 | xprt = list_entry(dto_xprt_q.next, |
| 226 | struct svcxprt_rdma, sc_dto_q); |
| 227 | list_del_init(&xprt->sc_dto_q); |
| 228 | spin_unlock_irqrestore(&dto_lock, flags); |
| 229 | |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 230 | rq_cq_reap(xprt); |
| 231 | sq_cq_reap(xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 232 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 233 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 234 | spin_lock_irqsave(&dto_lock, flags); |
| 235 | } |
| 236 | spin_unlock_irqrestore(&dto_lock, flags); |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Receive Queue Completion Handler |
| 241 | * |
| 242 | * Since an RQ completion handler is called on interrupt context, we |
| 243 | * need to defer the handling of the I/O to a tasklet |
| 244 | */ |
| 245 | static void rq_comp_handler(struct ib_cq *cq, void *cq_context) |
| 246 | { |
| 247 | struct svcxprt_rdma *xprt = cq_context; |
| 248 | unsigned long flags; |
| 249 | |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 250 | /* Guard against unconditional flush call for destroyed QP */ |
| 251 | if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) |
| 252 | return; |
| 253 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 254 | /* |
| 255 | * Set the bit regardless of whether or not it's on the list |
| 256 | * because it may be on the list already due to an SQ |
| 257 | * completion. |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 258 | */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 259 | set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); |
| 260 | |
| 261 | /* |
| 262 | * If this transport is not already on the DTO transport queue, |
| 263 | * add it |
| 264 | */ |
| 265 | spin_lock_irqsave(&dto_lock, flags); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 266 | if (list_empty(&xprt->sc_dto_q)) { |
| 267 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 268 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 269 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 270 | spin_unlock_irqrestore(&dto_lock, flags); |
| 271 | |
| 272 | /* Tasklet does all the work to avoid irqsave locks. */ |
| 273 | tasklet_schedule(&dto_tasklet); |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * rq_cq_reap - Process the RQ CQ. |
| 278 | * |
| 279 | * Take all completing WC off the CQE and enqueue the associated DTO |
| 280 | * context on the dto_q for the transport. |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 281 | * |
| 282 | * Note that caller must hold a transport reference. |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 283 | */ |
| 284 | static void rq_cq_reap(struct svcxprt_rdma *xprt) |
| 285 | { |
| 286 | int ret; |
| 287 | struct ib_wc wc; |
| 288 | struct svc_rdma_op_ctxt *ctxt = NULL; |
| 289 | |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 290 | if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) |
| 291 | return; |
| 292 | |
| 293 | ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 294 | atomic_inc(&rdma_stat_rq_poll); |
| 295 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 296 | while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { |
| 297 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; |
| 298 | ctxt->wc_status = wc.status; |
| 299 | ctxt->byte_len = wc.byte_len; |
Tom Tucker | e6ab914 | 2008-05-28 12:08:48 -0500 | [diff] [blame] | 300 | svc_rdma_unmap_dma(ctxt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 301 | if (wc.status != IB_WC_SUCCESS) { |
| 302 | /* Close the transport */ |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 303 | dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 304 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 305 | svc_rdma_put_context(ctxt, 1); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 306 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 307 | continue; |
| 308 | } |
Tom Tucker | 47698e0 | 2008-05-06 11:49:05 -0500 | [diff] [blame] | 309 | spin_lock_bh(&xprt->sc_rq_dto_lock); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 310 | list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); |
Tom Tucker | 47698e0 | 2008-05-06 11:49:05 -0500 | [diff] [blame] | 311 | spin_unlock_bh(&xprt->sc_rq_dto_lock); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 312 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 313 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 314 | |
| 315 | if (ctxt) |
| 316 | atomic_inc(&rdma_stat_rq_prod); |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 317 | |
| 318 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); |
| 319 | /* |
| 320 | * If data arrived before established event, |
| 321 | * don't enqueue. This defers RPC I/O until the |
| 322 | * RDMA connection is complete. |
| 323 | */ |
| 324 | if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) |
| 325 | svc_xprt_enqueue(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | /* |
Tom Tucker | e118321 | 2008-10-03 15:22:18 -0500 | [diff] [blame] | 329 | * Processs a completion context |
| 330 | */ |
| 331 | static void process_context(struct svcxprt_rdma *xprt, |
| 332 | struct svc_rdma_op_ctxt *ctxt) |
| 333 | { |
| 334 | svc_rdma_unmap_dma(ctxt); |
| 335 | |
| 336 | switch (ctxt->wr_op) { |
| 337 | case IB_WR_SEND: |
Tom Tucker | afd566e | 2008-10-03 15:45:03 -0500 | [diff] [blame] | 338 | if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) |
| 339 | svc_rdma_put_frmr(xprt, ctxt->frmr); |
Tom Tucker | e118321 | 2008-10-03 15:22:18 -0500 | [diff] [blame] | 340 | svc_rdma_put_context(ctxt, 1); |
| 341 | break; |
| 342 | |
| 343 | case IB_WR_RDMA_WRITE: |
| 344 | svc_rdma_put_context(ctxt, 0); |
| 345 | break; |
| 346 | |
| 347 | case IB_WR_RDMA_READ: |
Tom Tucker | 146b6df | 2008-08-12 15:12:10 -0500 | [diff] [blame] | 348 | case IB_WR_RDMA_READ_WITH_INV: |
Tom Tucker | e118321 | 2008-10-03 15:22:18 -0500 | [diff] [blame] | 349 | if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { |
| 350 | struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; |
| 351 | BUG_ON(!read_hdr); |
Tom Tucker | 146b6df | 2008-08-12 15:12:10 -0500 | [diff] [blame] | 352 | if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) |
| 353 | svc_rdma_put_frmr(xprt, ctxt->frmr); |
Tom Tucker | e118321 | 2008-10-03 15:22:18 -0500 | [diff] [blame] | 354 | spin_lock_bh(&xprt->sc_rq_dto_lock); |
| 355 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); |
| 356 | list_add_tail(&read_hdr->dto_q, |
| 357 | &xprt->sc_read_complete_q); |
| 358 | spin_unlock_bh(&xprt->sc_rq_dto_lock); |
| 359 | svc_xprt_enqueue(&xprt->sc_xprt); |
| 360 | } |
| 361 | svc_rdma_put_context(ctxt, 0); |
| 362 | break; |
| 363 | |
| 364 | default: |
| 365 | printk(KERN_ERR "svcrdma: unexpected completion type, " |
| 366 | "opcode=%d\n", |
| 367 | ctxt->wr_op); |
| 368 | break; |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | /* |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 373 | * Send Queue Completion Handler - potentially called on interrupt context. |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 374 | * |
| 375 | * Note that caller must hold a transport reference. |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 376 | */ |
| 377 | static void sq_cq_reap(struct svcxprt_rdma *xprt) |
| 378 | { |
| 379 | struct svc_rdma_op_ctxt *ctxt = NULL; |
| 380 | struct ib_wc wc; |
| 381 | struct ib_cq *cq = xprt->sc_sq_cq; |
| 382 | int ret; |
| 383 | |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 384 | if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) |
| 385 | return; |
| 386 | |
| 387 | ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 388 | atomic_inc(&rdma_stat_sq_poll); |
| 389 | while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 390 | if (wc.status != IB_WC_SUCCESS) |
| 391 | /* Close the transport */ |
| 392 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 393 | |
| 394 | /* Decrement used SQ WR count */ |
| 395 | atomic_dec(&xprt->sc_sq_count); |
| 396 | wake_up(&xprt->sc_send_wait); |
| 397 | |
Tom Tucker | e118321 | 2008-10-03 15:22:18 -0500 | [diff] [blame] | 398 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; |
| 399 | if (ctxt) |
| 400 | process_context(xprt, ctxt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 401 | |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 402 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | if (ctxt) |
| 406 | atomic_inc(&rdma_stat_sq_prod); |
| 407 | } |
| 408 | |
| 409 | static void sq_comp_handler(struct ib_cq *cq, void *cq_context) |
| 410 | { |
| 411 | struct svcxprt_rdma *xprt = cq_context; |
| 412 | unsigned long flags; |
| 413 | |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 414 | /* Guard against unconditional flush call for destroyed QP */ |
| 415 | if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) |
| 416 | return; |
| 417 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 418 | /* |
| 419 | * Set the bit regardless of whether or not it's on the list |
| 420 | * because it may be on the list already due to an RQ |
| 421 | * completion. |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 422 | */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 423 | set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); |
| 424 | |
| 425 | /* |
| 426 | * If this transport is not already on the DTO transport queue, |
| 427 | * add it |
| 428 | */ |
| 429 | spin_lock_irqsave(&dto_lock, flags); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 430 | if (list_empty(&xprt->sc_dto_q)) { |
| 431 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 432 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 433 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 434 | spin_unlock_irqrestore(&dto_lock, flags); |
| 435 | |
| 436 | /* Tasklet does all the work to avoid irqsave locks. */ |
| 437 | tasklet_schedule(&dto_tasklet); |
| 438 | } |
| 439 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 440 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, |
| 441 | int listener) |
| 442 | { |
| 443 | struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); |
| 444 | |
| 445 | if (!cma_xprt) |
| 446 | return NULL; |
| 447 | svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv); |
| 448 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); |
| 449 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); |
| 450 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); |
| 451 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 452 | INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 453 | init_waitqueue_head(&cma_xprt->sc_send_wait); |
| 454 | |
| 455 | spin_lock_init(&cma_xprt->sc_lock); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 456 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 457 | spin_lock_init(&cma_xprt->sc_frmr_q_lock); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 458 | |
| 459 | cma_xprt->sc_ord = svcrdma_ord; |
| 460 | |
| 461 | cma_xprt->sc_max_req_size = svcrdma_max_req_size; |
| 462 | cma_xprt->sc_max_requests = svcrdma_max_requests; |
| 463 | cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; |
| 464 | atomic_set(&cma_xprt->sc_sq_count, 0); |
Tom Tucker | 87295b6 | 2008-05-28 13:17:44 -0500 | [diff] [blame] | 465 | atomic_set(&cma_xprt->sc_ctxt_used, 0); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 466 | |
Tom Tucker | 8948896 | 2008-05-28 15:14:02 -0500 | [diff] [blame] | 467 | if (listener) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 468 | set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); |
| 469 | |
| 470 | return cma_xprt; |
| 471 | } |
| 472 | |
| 473 | struct page *svc_rdma_get_page(void) |
| 474 | { |
| 475 | struct page *page; |
| 476 | |
| 477 | while ((page = alloc_page(GFP_KERNEL)) == NULL) { |
| 478 | /* If we can't get memory, wait a bit and try again */ |
| 479 | printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " |
| 480 | "jiffies.\n"); |
| 481 | schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); |
| 482 | } |
| 483 | return page; |
| 484 | } |
| 485 | |
| 486 | int svc_rdma_post_recv(struct svcxprt_rdma *xprt) |
| 487 | { |
| 488 | struct ib_recv_wr recv_wr, *bad_recv_wr; |
| 489 | struct svc_rdma_op_ctxt *ctxt; |
| 490 | struct page *page; |
Tom Tucker | a5abf4e | 2008-09-30 14:05:41 -0500 | [diff] [blame] | 491 | dma_addr_t pa; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 492 | int sge_no; |
| 493 | int buflen; |
| 494 | int ret; |
| 495 | |
| 496 | ctxt = svc_rdma_get_context(xprt); |
| 497 | buflen = 0; |
| 498 | ctxt->direction = DMA_FROM_DEVICE; |
| 499 | for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { |
| 500 | BUG_ON(sge_no >= xprt->sc_max_sge); |
| 501 | page = svc_rdma_get_page(); |
| 502 | ctxt->pages[sge_no] = page; |
| 503 | pa = ib_dma_map_page(xprt->sc_cm_id->device, |
| 504 | page, 0, PAGE_SIZE, |
| 505 | DMA_FROM_DEVICE); |
Tom Tucker | a5abf4e | 2008-09-30 14:05:41 -0500 | [diff] [blame] | 506 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) |
| 507 | goto err_put_ctxt; |
| 508 | atomic_inc(&xprt->sc_dma_used); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 509 | ctxt->sge[sge_no].addr = pa; |
| 510 | ctxt->sge[sge_no].length = PAGE_SIZE; |
Tom Tucker | a5abf4e | 2008-09-30 14:05:41 -0500 | [diff] [blame] | 511 | ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 512 | buflen += PAGE_SIZE; |
| 513 | } |
| 514 | ctxt->count = sge_no; |
| 515 | recv_wr.next = NULL; |
| 516 | recv_wr.sg_list = &ctxt->sge[0]; |
| 517 | recv_wr.num_sge = ctxt->count; |
| 518 | recv_wr.wr_id = (u64)(unsigned long)ctxt; |
| 519 | |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 520 | svc_xprt_get(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 521 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 522 | if (ret) { |
| 523 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 05a0826 | 2008-04-25 14:11:31 -0500 | [diff] [blame] | 524 | svc_rdma_put_context(ctxt, 1); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 525 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 526 | return ret; |
Tom Tucker | a5abf4e | 2008-09-30 14:05:41 -0500 | [diff] [blame] | 527 | |
| 528 | err_put_ctxt: |
| 529 | svc_rdma_put_context(ctxt, 1); |
| 530 | return -ENOMEM; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | /* |
| 534 | * This function handles the CONNECT_REQUEST event on a listening |
| 535 | * endpoint. It is passed the cma_id for the _new_ connection. The context in |
| 536 | * this cma_id is inherited from the listening cma_id and is the svc_xprt |
| 537 | * structure for the listening endpoint. |
| 538 | * |
| 539 | * This function creates a new xprt for the new connection and enqueues it on |
| 540 | * the accept queue for the listent xprt. When the listen thread is kicked, it |
| 541 | * will call the recvfrom method on the listen xprt which will accept the new |
| 542 | * connection. |
| 543 | */ |
Tom Tucker | 36ef25e | 2008-05-19 19:00:24 -0500 | [diff] [blame] | 544 | static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 545 | { |
| 546 | struct svcxprt_rdma *listen_xprt = new_cma_id->context; |
| 547 | struct svcxprt_rdma *newxprt; |
Tom Tucker | af261af | 2008-05-07 13:52:42 -0500 | [diff] [blame] | 548 | struct sockaddr *sa; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 549 | |
| 550 | /* Create a new transport */ |
| 551 | newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); |
| 552 | if (!newxprt) { |
| 553 | dprintk("svcrdma: failed to create new transport\n"); |
| 554 | return; |
| 555 | } |
| 556 | newxprt->sc_cm_id = new_cma_id; |
| 557 | new_cma_id->context = newxprt; |
| 558 | dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", |
| 559 | newxprt, newxprt->sc_cm_id, listen_xprt); |
| 560 | |
Tom Tucker | 36ef25e | 2008-05-19 19:00:24 -0500 | [diff] [blame] | 561 | /* Save client advertised inbound read limit for use later in accept. */ |
| 562 | newxprt->sc_ord = client_ird; |
| 563 | |
Tom Tucker | af261af | 2008-05-07 13:52:42 -0500 | [diff] [blame] | 564 | /* Set the local and remote addresses in the transport */ |
| 565 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; |
| 566 | svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); |
| 567 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; |
| 568 | svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); |
| 569 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 570 | /* |
| 571 | * Enqueue the new transport on the accept queue of the listening |
| 572 | * transport |
| 573 | */ |
| 574 | spin_lock_bh(&listen_xprt->sc_lock); |
| 575 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); |
| 576 | spin_unlock_bh(&listen_xprt->sc_lock); |
| 577 | |
| 578 | /* |
| 579 | * Can't use svc_xprt_received here because we are not on a |
| 580 | * rqstp thread |
| 581 | */ |
| 582 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); |
| 583 | svc_xprt_enqueue(&listen_xprt->sc_xprt); |
| 584 | } |
| 585 | |
| 586 | /* |
| 587 | * Handles events generated on the listening endpoint. These events will be |
| 588 | * either be incoming connect requests or adapter removal events. |
| 589 | */ |
| 590 | static int rdma_listen_handler(struct rdma_cm_id *cma_id, |
| 591 | struct rdma_cm_event *event) |
| 592 | { |
| 593 | struct svcxprt_rdma *xprt = cma_id->context; |
| 594 | int ret = 0; |
| 595 | |
| 596 | switch (event->event) { |
| 597 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
| 598 | dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " |
| 599 | "event=%d\n", cma_id, cma_id->context, event->event); |
Tom Tucker | 36ef25e | 2008-05-19 19:00:24 -0500 | [diff] [blame] | 600 | handle_connect_req(cma_id, |
Tom Tucker | 67080c8 | 2008-10-03 12:41:14 -0500 | [diff] [blame] | 601 | event->param.conn.initiator_depth); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 602 | break; |
| 603 | |
| 604 | case RDMA_CM_EVENT_ESTABLISHED: |
| 605 | /* Accept complete */ |
| 606 | dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " |
| 607 | "cm_id=%p\n", xprt, cma_id); |
| 608 | break; |
| 609 | |
| 610 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 611 | dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", |
| 612 | xprt, cma_id); |
| 613 | if (xprt) |
| 614 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 615 | break; |
| 616 | |
| 617 | default: |
| 618 | dprintk("svcrdma: Unexpected event on listening endpoint %p, " |
| 619 | "event=%d\n", cma_id, event->event); |
| 620 | break; |
| 621 | } |
| 622 | |
| 623 | return ret; |
| 624 | } |
| 625 | |
| 626 | static int rdma_cma_handler(struct rdma_cm_id *cma_id, |
| 627 | struct rdma_cm_event *event) |
| 628 | { |
| 629 | struct svc_xprt *xprt = cma_id->context; |
| 630 | struct svcxprt_rdma *rdma = |
| 631 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 632 | switch (event->event) { |
| 633 | case RDMA_CM_EVENT_ESTABLISHED: |
| 634 | /* Accept complete */ |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 635 | svc_xprt_get(xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 636 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " |
| 637 | "cm_id=%p\n", xprt, cma_id); |
| 638 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); |
| 639 | svc_xprt_enqueue(xprt); |
| 640 | break; |
| 641 | case RDMA_CM_EVENT_DISCONNECTED: |
| 642 | dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", |
| 643 | xprt, cma_id); |
| 644 | if (xprt) { |
| 645 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 646 | svc_xprt_enqueue(xprt); |
Tom Tucker | 120693d | 2008-04-24 14:17:21 -0500 | [diff] [blame] | 647 | svc_xprt_put(xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 648 | } |
| 649 | break; |
| 650 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
| 651 | dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " |
| 652 | "event=%d\n", cma_id, xprt, event->event); |
| 653 | if (xprt) { |
| 654 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
| 655 | svc_xprt_enqueue(xprt); |
| 656 | } |
| 657 | break; |
| 658 | default: |
| 659 | dprintk("svcrdma: Unexpected event on DTO endpoint %p, " |
| 660 | "event=%d\n", cma_id, event->event); |
| 661 | break; |
| 662 | } |
| 663 | return 0; |
| 664 | } |
| 665 | |
| 666 | /* |
| 667 | * Create a listening RDMA service endpoint. |
| 668 | */ |
| 669 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
| 670 | struct sockaddr *sa, int salen, |
| 671 | int flags) |
| 672 | { |
| 673 | struct rdma_cm_id *listen_id; |
| 674 | struct svcxprt_rdma *cma_xprt; |
| 675 | struct svc_xprt *xprt; |
| 676 | int ret; |
| 677 | |
| 678 | dprintk("svcrdma: Creating RDMA socket\n"); |
| 679 | |
| 680 | cma_xprt = rdma_create_xprt(serv, 1); |
| 681 | if (!cma_xprt) |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 682 | return ERR_PTR(-ENOMEM); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 683 | xprt = &cma_xprt->sc_xprt; |
| 684 | |
| 685 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); |
| 686 | if (IS_ERR(listen_id)) { |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 687 | ret = PTR_ERR(listen_id); |
| 688 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); |
| 689 | goto err0; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 690 | } |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 691 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 692 | ret = rdma_bind_addr(listen_id, sa); |
| 693 | if (ret) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 694 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 695 | goto err1; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 696 | } |
| 697 | cma_xprt->sc_cm_id = listen_id; |
| 698 | |
| 699 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); |
| 700 | if (ret) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 701 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 702 | goto err1; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | /* |
| 706 | * We need to use the address from the cm_id in case the |
| 707 | * caller specified 0 for the port number. |
| 708 | */ |
| 709 | sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; |
| 710 | svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); |
| 711 | |
| 712 | return &cma_xprt->sc_xprt; |
Tom Tucker | 58e8f62 | 2008-05-06 09:45:54 -0500 | [diff] [blame] | 713 | |
| 714 | err1: |
| 715 | rdma_destroy_id(listen_id); |
| 716 | err0: |
| 717 | kfree(cma_xprt); |
| 718 | return ERR_PTR(ret); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 719 | } |
| 720 | |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 721 | static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) |
| 722 | { |
| 723 | struct ib_mr *mr; |
| 724 | struct ib_fast_reg_page_list *pl; |
| 725 | struct svc_rdma_fastreg_mr *frmr; |
| 726 | |
| 727 | frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); |
| 728 | if (!frmr) |
| 729 | goto err; |
| 730 | |
| 731 | mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); |
| 732 | if (!mr) |
| 733 | goto err_free_frmr; |
| 734 | |
| 735 | pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, |
| 736 | RPCSVC_MAXPAGES); |
| 737 | if (!pl) |
| 738 | goto err_free_mr; |
| 739 | |
| 740 | frmr->mr = mr; |
| 741 | frmr->page_list = pl; |
| 742 | INIT_LIST_HEAD(&frmr->frmr_list); |
| 743 | return frmr; |
| 744 | |
| 745 | err_free_mr: |
| 746 | ib_dereg_mr(mr); |
| 747 | err_free_frmr: |
| 748 | kfree(frmr); |
| 749 | err: |
| 750 | return ERR_PTR(-ENOMEM); |
| 751 | } |
| 752 | |
| 753 | static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) |
| 754 | { |
| 755 | struct svc_rdma_fastreg_mr *frmr; |
| 756 | |
| 757 | while (!list_empty(&xprt->sc_frmr_q)) { |
| 758 | frmr = list_entry(xprt->sc_frmr_q.next, |
| 759 | struct svc_rdma_fastreg_mr, frmr_list); |
| 760 | list_del_init(&frmr->frmr_list); |
| 761 | ib_dereg_mr(frmr->mr); |
| 762 | ib_free_fast_reg_page_list(frmr->page_list); |
| 763 | kfree(frmr); |
| 764 | } |
| 765 | } |
| 766 | |
| 767 | struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) |
| 768 | { |
| 769 | struct svc_rdma_fastreg_mr *frmr = NULL; |
| 770 | |
| 771 | spin_lock_bh(&rdma->sc_frmr_q_lock); |
| 772 | if (!list_empty(&rdma->sc_frmr_q)) { |
| 773 | frmr = list_entry(rdma->sc_frmr_q.next, |
| 774 | struct svc_rdma_fastreg_mr, frmr_list); |
| 775 | list_del_init(&frmr->frmr_list); |
| 776 | frmr->map_len = 0; |
| 777 | frmr->page_list_len = 0; |
| 778 | } |
| 779 | spin_unlock_bh(&rdma->sc_frmr_q_lock); |
| 780 | if (frmr) |
| 781 | return frmr; |
| 782 | |
| 783 | return rdma_alloc_frmr(rdma); |
| 784 | } |
| 785 | |
| 786 | static void frmr_unmap_dma(struct svcxprt_rdma *xprt, |
| 787 | struct svc_rdma_fastreg_mr *frmr) |
| 788 | { |
| 789 | int page_no; |
| 790 | for (page_no = 0; page_no < frmr->page_list_len; page_no++) { |
| 791 | dma_addr_t addr = frmr->page_list->page_list[page_no]; |
| 792 | if (ib_dma_mapping_error(frmr->mr->device, addr)) |
| 793 | continue; |
| 794 | atomic_dec(&xprt->sc_dma_used); |
| 795 | ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE, |
| 796 | frmr->direction); |
| 797 | } |
| 798 | } |
| 799 | |
| 800 | void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, |
| 801 | struct svc_rdma_fastreg_mr *frmr) |
| 802 | { |
| 803 | if (frmr) { |
| 804 | frmr_unmap_dma(rdma, frmr); |
| 805 | spin_lock_bh(&rdma->sc_frmr_q_lock); |
| 806 | BUG_ON(!list_empty(&frmr->frmr_list)); |
| 807 | list_add(&frmr->frmr_list, &rdma->sc_frmr_q); |
| 808 | spin_unlock_bh(&rdma->sc_frmr_q_lock); |
| 809 | } |
| 810 | } |
| 811 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 812 | /* |
| 813 | * This is the xpo_recvfrom function for listening endpoints. Its |
| 814 | * purpose is to accept incoming connections. The CMA callback handler |
| 815 | * has already created a new transport and attached it to the new CMA |
| 816 | * ID. |
| 817 | * |
| 818 | * There is a queue of pending connections hung on the listening |
| 819 | * transport. This queue contains the new svc_xprt structure. This |
| 820 | * function takes svc_xprt structures off the accept_q and completes |
| 821 | * the connection. |
| 822 | */ |
| 823 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) |
| 824 | { |
| 825 | struct svcxprt_rdma *listen_rdma; |
| 826 | struct svcxprt_rdma *newxprt = NULL; |
| 827 | struct rdma_conn_param conn_param; |
| 828 | struct ib_qp_init_attr qp_attr; |
| 829 | struct ib_device_attr devattr; |
Ingo Molnar | ed72b9c | 2008-11-25 16:49:37 -0800 | [diff] [blame] | 830 | int uninitialized_var(dma_mr_acc); |
Tom Tucker | 3a5c638 | 2008-09-30 13:46:13 -0500 | [diff] [blame] | 831 | int need_dma_mr; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 832 | int ret; |
| 833 | int i; |
| 834 | |
| 835 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 836 | clear_bit(XPT_CONN, &xprt->xpt_flags); |
| 837 | /* Get the next entry off the accept list */ |
| 838 | spin_lock_bh(&listen_rdma->sc_lock); |
| 839 | if (!list_empty(&listen_rdma->sc_accept_q)) { |
| 840 | newxprt = list_entry(listen_rdma->sc_accept_q.next, |
| 841 | struct svcxprt_rdma, sc_accept_q); |
| 842 | list_del_init(&newxprt->sc_accept_q); |
| 843 | } |
| 844 | if (!list_empty(&listen_rdma->sc_accept_q)) |
| 845 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); |
| 846 | spin_unlock_bh(&listen_rdma->sc_lock); |
| 847 | if (!newxprt) |
| 848 | return NULL; |
| 849 | |
| 850 | dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", |
| 851 | newxprt, newxprt->sc_cm_id); |
| 852 | |
| 853 | ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); |
| 854 | if (ret) { |
| 855 | dprintk("svcrdma: could not query device attributes on " |
| 856 | "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); |
| 857 | goto errout; |
| 858 | } |
| 859 | |
| 860 | /* Qualify the transport resource defaults with the |
| 861 | * capabilities of this particular device */ |
| 862 | newxprt->sc_max_sge = min((size_t)devattr.max_sge, |
| 863 | (size_t)RPCSVC_MAXPAGES); |
| 864 | newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, |
| 865 | (size_t)svcrdma_max_requests); |
| 866 | newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; |
| 867 | |
Tom Tucker | 36ef25e | 2008-05-19 19:00:24 -0500 | [diff] [blame] | 868 | /* |
| 869 | * Limit ORD based on client limit, local device limit, and |
| 870 | * configured svcrdma limit. |
| 871 | */ |
| 872 | newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord); |
| 873 | newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 874 | |
| 875 | newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); |
| 876 | if (IS_ERR(newxprt->sc_pd)) { |
| 877 | dprintk("svcrdma: error creating PD for connect request\n"); |
| 878 | goto errout; |
| 879 | } |
| 880 | newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, |
| 881 | sq_comp_handler, |
| 882 | cq_event_handler, |
| 883 | newxprt, |
| 884 | newxprt->sc_sq_depth, |
| 885 | 0); |
| 886 | if (IS_ERR(newxprt->sc_sq_cq)) { |
| 887 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); |
| 888 | goto errout; |
| 889 | } |
| 890 | newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, |
| 891 | rq_comp_handler, |
| 892 | cq_event_handler, |
| 893 | newxprt, |
| 894 | newxprt->sc_max_requests, |
| 895 | 0); |
| 896 | if (IS_ERR(newxprt->sc_rq_cq)) { |
| 897 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); |
| 898 | goto errout; |
| 899 | } |
| 900 | |
| 901 | memset(&qp_attr, 0, sizeof qp_attr); |
| 902 | qp_attr.event_handler = qp_event_handler; |
| 903 | qp_attr.qp_context = &newxprt->sc_xprt; |
| 904 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; |
| 905 | qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; |
| 906 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; |
| 907 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; |
| 908 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |
| 909 | qp_attr.qp_type = IB_QPT_RC; |
| 910 | qp_attr.send_cq = newxprt->sc_sq_cq; |
| 911 | qp_attr.recv_cq = newxprt->sc_rq_cq; |
| 912 | dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" |
| 913 | " cm_id->device=%p, sc_pd->device=%p\n" |
| 914 | " cap.max_send_wr = %d\n" |
| 915 | " cap.max_recv_wr = %d\n" |
| 916 | " cap.max_send_sge = %d\n" |
| 917 | " cap.max_recv_sge = %d\n", |
| 918 | newxprt->sc_cm_id, newxprt->sc_pd, |
| 919 | newxprt->sc_cm_id->device, newxprt->sc_pd->device, |
| 920 | qp_attr.cap.max_send_wr, |
| 921 | qp_attr.cap.max_recv_wr, |
| 922 | qp_attr.cap.max_send_sge, |
| 923 | qp_attr.cap.max_recv_sge); |
| 924 | |
| 925 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); |
| 926 | if (ret) { |
| 927 | /* |
| 928 | * XXX: This is a hack. We need a xx_request_qp interface |
| 929 | * that will adjust the qp_attr's with a best-effort |
| 930 | * number |
| 931 | */ |
| 932 | qp_attr.cap.max_send_sge -= 2; |
| 933 | qp_attr.cap.max_recv_sge -= 2; |
| 934 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, |
| 935 | &qp_attr); |
| 936 | if (ret) { |
| 937 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); |
| 938 | goto errout; |
| 939 | } |
| 940 | newxprt->sc_max_sge = qp_attr.cap.max_send_sge; |
| 941 | newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; |
| 942 | newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; |
| 943 | newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; |
| 944 | } |
| 945 | newxprt->sc_qp = newxprt->sc_cm_id->qp; |
| 946 | |
Tom Tucker | 3a5c638 | 2008-09-30 13:46:13 -0500 | [diff] [blame] | 947 | /* |
| 948 | * Use the most secure set of MR resources based on the |
| 949 | * transport type and available memory management features in |
| 950 | * the device. Here's the table implemented below: |
| 951 | * |
| 952 | * Fast Global DMA Remote WR |
| 953 | * Reg LKEY MR Access |
| 954 | * Sup'd Sup'd Needed Needed |
| 955 | * |
| 956 | * IWARP N N Y Y |
| 957 | * N Y Y Y |
| 958 | * Y N Y N |
| 959 | * Y Y N - |
| 960 | * |
| 961 | * IB N N Y N |
| 962 | * N Y N - |
| 963 | * Y N Y N |
| 964 | * Y Y N - |
| 965 | * |
| 966 | * NB: iWARP requires remote write access for the data sink |
| 967 | * of an RDMA_READ. IB does not. |
| 968 | */ |
| 969 | if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { |
| 970 | newxprt->sc_frmr_pg_list_len = |
| 971 | devattr.max_fast_reg_page_list_len; |
| 972 | newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; |
| 973 | } |
| 974 | |
| 975 | /* |
| 976 | * Determine if a DMA MR is required and if so, what privs are required |
| 977 | */ |
| 978 | switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) { |
| 979 | case RDMA_TRANSPORT_IWARP: |
| 980 | newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; |
| 981 | if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { |
| 982 | need_dma_mr = 1; |
| 983 | dma_mr_acc = |
| 984 | (IB_ACCESS_LOCAL_WRITE | |
| 985 | IB_ACCESS_REMOTE_WRITE); |
| 986 | } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { |
| 987 | need_dma_mr = 1; |
| 988 | dma_mr_acc = IB_ACCESS_LOCAL_WRITE; |
| 989 | } else |
| 990 | need_dma_mr = 0; |
| 991 | break; |
| 992 | case RDMA_TRANSPORT_IB: |
| 993 | if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { |
| 994 | need_dma_mr = 1; |
| 995 | dma_mr_acc = IB_ACCESS_LOCAL_WRITE; |
| 996 | } else |
| 997 | need_dma_mr = 0; |
| 998 | break; |
| 999 | default: |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1000 | goto errout; |
| 1001 | } |
| 1002 | |
Tom Tucker | 3a5c638 | 2008-09-30 13:46:13 -0500 | [diff] [blame] | 1003 | /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ |
| 1004 | if (need_dma_mr) { |
| 1005 | /* Register all of physical memory */ |
| 1006 | newxprt->sc_phys_mr = |
| 1007 | ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); |
| 1008 | if (IS_ERR(newxprt->sc_phys_mr)) { |
| 1009 | dprintk("svcrdma: Failed to create DMA MR ret=%d\n", |
| 1010 | ret); |
| 1011 | goto errout; |
| 1012 | } |
| 1013 | newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; |
| 1014 | } else |
| 1015 | newxprt->sc_dma_lkey = |
| 1016 | newxprt->sc_cm_id->device->local_dma_lkey; |
| 1017 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1018 | /* Post receive buffers */ |
| 1019 | for (i = 0; i < newxprt->sc_max_requests; i++) { |
| 1020 | ret = svc_rdma_post_recv(newxprt); |
| 1021 | if (ret) { |
| 1022 | dprintk("svcrdma: failure posting receive buffers\n"); |
| 1023 | goto errout; |
| 1024 | } |
| 1025 | } |
| 1026 | |
| 1027 | /* Swap out the handler */ |
| 1028 | newxprt->sc_cm_id->event_handler = rdma_cma_handler; |
| 1029 | |
Tom Tucker | af261af | 2008-05-07 13:52:42 -0500 | [diff] [blame] | 1030 | /* |
| 1031 | * Arm the CQs for the SQ and RQ before accepting so we can't |
| 1032 | * miss the first message |
| 1033 | */ |
| 1034 | ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); |
| 1035 | ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); |
| 1036 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1037 | /* Accept Connection */ |
| 1038 | set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); |
| 1039 | memset(&conn_param, 0, sizeof conn_param); |
| 1040 | conn_param.responder_resources = 0; |
| 1041 | conn_param.initiator_depth = newxprt->sc_ord; |
| 1042 | ret = rdma_accept(newxprt->sc_cm_id, &conn_param); |
| 1043 | if (ret) { |
| 1044 | dprintk("svcrdma: failed to accept new connection, ret=%d\n", |
| 1045 | ret); |
| 1046 | goto errout; |
| 1047 | } |
| 1048 | |
| 1049 | dprintk("svcrdma: new connection %p accepted with the following " |
| 1050 | "attributes:\n" |
Harvey Harrison | 21454aa | 2008-10-31 00:54:56 -0700 | [diff] [blame] | 1051 | " local_ip : %pI4\n" |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1052 | " local_port : %d\n" |
Harvey Harrison | 21454aa | 2008-10-31 00:54:56 -0700 | [diff] [blame] | 1053 | " remote_ip : %pI4\n" |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1054 | " remote_port : %d\n" |
| 1055 | " max_sge : %d\n" |
| 1056 | " sq_depth : %d\n" |
| 1057 | " max_requests : %d\n" |
| 1058 | " ord : %d\n", |
| 1059 | newxprt, |
Harvey Harrison | 21454aa | 2008-10-31 00:54:56 -0700 | [diff] [blame] | 1060 | &((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 1061 | route.addr.src_addr)->sin_addr.s_addr, |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1062 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 1063 | route.addr.src_addr)->sin_port), |
Harvey Harrison | 21454aa | 2008-10-31 00:54:56 -0700 | [diff] [blame] | 1064 | &((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 1065 | route.addr.dst_addr)->sin_addr.s_addr, |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1066 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> |
| 1067 | route.addr.dst_addr)->sin_port), |
| 1068 | newxprt->sc_max_sge, |
| 1069 | newxprt->sc_sq_depth, |
| 1070 | newxprt->sc_max_requests, |
| 1071 | newxprt->sc_ord); |
| 1072 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1073 | return &newxprt->sc_xprt; |
| 1074 | |
| 1075 | errout: |
| 1076 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1077 | /* Take a reference in case the DTO handler runs */ |
| 1078 | svc_xprt_get(&newxprt->sc_xprt); |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 1079 | if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1080 | ib_destroy_qp(newxprt->sc_qp); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1081 | rdma_destroy_id(newxprt->sc_cm_id); |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1082 | /* This call to put will destroy the transport */ |
| 1083 | svc_xprt_put(&newxprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1084 | return NULL; |
| 1085 | } |
| 1086 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1087 | static void svc_rdma_release_rqst(struct svc_rqst *rqstp) |
| 1088 | { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1089 | } |
| 1090 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1091 | /* |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 1092 | * When connected, an svc_xprt has at least two references: |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1093 | * |
| 1094 | * - A reference held by the cm_id between the ESTABLISHED and |
| 1095 | * DISCONNECTED events. If the remote peer disconnected first, this |
| 1096 | * reference could be gone. |
| 1097 | * |
| 1098 | * - A reference held by the svc_recv code that called this function |
| 1099 | * as part of close processing. |
| 1100 | * |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 1101 | * At a minimum one references should still be held. |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1102 | */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1103 | static void svc_rdma_detach(struct svc_xprt *xprt) |
| 1104 | { |
| 1105 | struct svcxprt_rdma *rdma = |
| 1106 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1107 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1108 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1109 | /* Disconnect and flush posted WQE */ |
| 1110 | rdma_disconnect(rdma->sc_cm_id); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1111 | } |
| 1112 | |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1113 | static void __svc_rdma_free(struct work_struct *work) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1114 | { |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1115 | struct svcxprt_rdma *rdma = |
| 1116 | container_of(work, struct svcxprt_rdma, sc_work); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1117 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1118 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1119 | /* We should only be called from kref_put */ |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1120 | BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); |
| 1121 | |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1122 | /* |
| 1123 | * Destroy queued, but not processed read completions. Note |
| 1124 | * that this cleanup has to be done before destroying the |
| 1125 | * cm_id because the device ptr is needed to unmap the dma in |
| 1126 | * svc_rdma_put_context. |
| 1127 | */ |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1128 | while (!list_empty(&rdma->sc_read_complete_q)) { |
| 1129 | struct svc_rdma_op_ctxt *ctxt; |
| 1130 | ctxt = list_entry(rdma->sc_read_complete_q.next, |
| 1131 | struct svc_rdma_op_ctxt, |
| 1132 | dto_q); |
| 1133 | list_del_init(&ctxt->dto_q); |
| 1134 | svc_rdma_put_context(ctxt, 1); |
| 1135 | } |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1136 | |
| 1137 | /* Destroy queued, but not processed recv completions */ |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1138 | while (!list_empty(&rdma->sc_rq_dto_q)) { |
| 1139 | struct svc_rdma_op_ctxt *ctxt; |
| 1140 | ctxt = list_entry(rdma->sc_rq_dto_q.next, |
| 1141 | struct svc_rdma_op_ctxt, |
| 1142 | dto_q); |
| 1143 | list_del_init(&ctxt->dto_q); |
| 1144 | svc_rdma_put_context(ctxt, 1); |
| 1145 | } |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1146 | |
| 1147 | /* Warn if we leaked a resource or under-referenced */ |
| 1148 | WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); |
Tom Tucker | 87295b6 | 2008-05-28 13:17:44 -0500 | [diff] [blame] | 1149 | WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1150 | |
Tom Tucker | 64be8608 | 2008-10-06 14:45:18 -0500 | [diff] [blame] | 1151 | /* De-allocate fastreg mr */ |
| 1152 | rdma_dealloc_frmr_q(rdma); |
| 1153 | |
Tom Tucker | 1711386 | 2008-05-01 11:13:50 -0500 | [diff] [blame] | 1154 | /* Destroy the QP if present (not a listener) */ |
| 1155 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) |
| 1156 | ib_destroy_qp(rdma->sc_qp); |
| 1157 | |
Tom Tucker | c48cbb4 | 2008-03-11 14:31:39 -0400 | [diff] [blame] | 1158 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
| 1159 | ib_destroy_cq(rdma->sc_sq_cq); |
| 1160 | |
| 1161 | if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) |
| 1162 | ib_destroy_cq(rdma->sc_rq_cq); |
| 1163 | |
| 1164 | if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) |
| 1165 | ib_dereg_mr(rdma->sc_phys_mr); |
| 1166 | |
| 1167 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
| 1168 | ib_dealloc_pd(rdma->sc_pd); |
| 1169 | |
Tom Tucker | 356d0a1 | 2008-05-01 11:25:02 -0500 | [diff] [blame] | 1170 | /* Destroy the CM ID */ |
| 1171 | rdma_destroy_id(rdma->sc_cm_id); |
| 1172 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1173 | kfree(rdma); |
| 1174 | } |
| 1175 | |
Tom Tucker | 8da91ea | 2008-04-30 22:00:46 -0500 | [diff] [blame] | 1176 | static void svc_rdma_free(struct svc_xprt *xprt) |
| 1177 | { |
| 1178 | struct svcxprt_rdma *rdma = |
| 1179 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 1180 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); |
| 1181 | schedule_work(&rdma->sc_work); |
| 1182 | } |
| 1183 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1184 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
| 1185 | { |
| 1186 | struct svcxprt_rdma *rdma = |
| 1187 | container_of(xprt, struct svcxprt_rdma, sc_xprt); |
| 1188 | |
| 1189 | /* |
| 1190 | * If there are fewer SQ WR available than required to send a |
| 1191 | * simple response, return false. |
| 1192 | */ |
| 1193 | if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) |
| 1194 | return 0; |
| 1195 | |
| 1196 | /* |
| 1197 | * ...or there are already waiters on the SQ, |
| 1198 | * return false. |
| 1199 | */ |
| 1200 | if (waitqueue_active(&rdma->sc_send_wait)) |
| 1201 | return 0; |
| 1202 | |
| 1203 | /* Otherwise return true. */ |
| 1204 | return 1; |
| 1205 | } |
| 1206 | |
Tom Tucker | e118321 | 2008-10-03 15:22:18 -0500 | [diff] [blame] | 1207 | /* |
| 1208 | * Attempt to register the kvec representing the RPC memory with the |
| 1209 | * device. |
| 1210 | * |
| 1211 | * Returns: |
| 1212 | * NULL : The device does not support fastreg or there were no more |
| 1213 | * fastreg mr. |
| 1214 | * frmr : The kvec register request was successfully posted. |
| 1215 | * <0 : An error was encountered attempting to register the kvec. |
| 1216 | */ |
| 1217 | int svc_rdma_fastreg(struct svcxprt_rdma *xprt, |
| 1218 | struct svc_rdma_fastreg_mr *frmr) |
| 1219 | { |
| 1220 | struct ib_send_wr fastreg_wr; |
| 1221 | u8 key; |
| 1222 | |
| 1223 | /* Bump the key */ |
| 1224 | key = (u8)(frmr->mr->lkey & 0x000000FF); |
| 1225 | ib_update_fast_reg_key(frmr->mr, ++key); |
| 1226 | |
| 1227 | /* Prepare FASTREG WR */ |
| 1228 | memset(&fastreg_wr, 0, sizeof fastreg_wr); |
| 1229 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; |
| 1230 | fastreg_wr.send_flags = IB_SEND_SIGNALED; |
| 1231 | fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; |
| 1232 | fastreg_wr.wr.fast_reg.page_list = frmr->page_list; |
| 1233 | fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; |
| 1234 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
| 1235 | fastreg_wr.wr.fast_reg.length = frmr->map_len; |
| 1236 | fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; |
| 1237 | fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; |
| 1238 | return svc_rdma_send(xprt, &fastreg_wr); |
| 1239 | } |
| 1240 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1241 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
| 1242 | { |
Tom Tucker | 5b180a9 | 2008-08-11 14:10:19 -0500 | [diff] [blame] | 1243 | struct ib_send_wr *bad_wr, *n_wr; |
| 1244 | int wr_count; |
| 1245 | int i; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1246 | int ret; |
| 1247 | |
| 1248 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
Tom Tucker | 9d6347a | 2008-04-25 15:51:27 -0500 | [diff] [blame] | 1249 | return -ENOTCONN; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1250 | |
| 1251 | BUG_ON(wr->send_flags != IB_SEND_SIGNALED); |
Tom Tucker | 5b180a9 | 2008-08-11 14:10:19 -0500 | [diff] [blame] | 1252 | wr_count = 1; |
| 1253 | for (n_wr = wr->next; n_wr; n_wr = n_wr->next) |
| 1254 | wr_count++; |
| 1255 | |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1256 | /* If the SQ is full, wait until an SQ entry is available */ |
| 1257 | while (1) { |
| 1258 | spin_lock_bh(&xprt->sc_lock); |
Tom Tucker | 5b180a9 | 2008-08-11 14:10:19 -0500 | [diff] [blame] | 1259 | if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1260 | spin_unlock_bh(&xprt->sc_lock); |
| 1261 | atomic_inc(&rdma_stat_sq_starve); |
Tom Tucker | dbcd00e | 2008-05-06 11:33:11 -0500 | [diff] [blame] | 1262 | |
| 1263 | /* See if we can opportunistically reap SQ WR to make room */ |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1264 | sq_cq_reap(xprt); |
| 1265 | |
| 1266 | /* Wait until SQ WR available if SQ still full */ |
| 1267 | wait_event(xprt->sc_send_wait, |
| 1268 | atomic_read(&xprt->sc_sq_count) < |
| 1269 | xprt->sc_sq_depth); |
Tom Tucker | 830bb59 | 2008-03-11 12:44:27 -0500 | [diff] [blame] | 1270 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
| 1271 | return 0; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1272 | continue; |
| 1273 | } |
Tom Tucker | 5b180a9 | 2008-08-11 14:10:19 -0500 | [diff] [blame] | 1274 | /* Take a transport ref for each WR posted */ |
| 1275 | for (i = 0; i < wr_count; i++) |
| 1276 | svc_xprt_get(&xprt->sc_xprt); |
| 1277 | |
| 1278 | /* Bump used SQ WR count and post */ |
| 1279 | atomic_add(wr_count, &xprt->sc_sq_count); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1280 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); |
Tom Tucker | 5b180a9 | 2008-08-11 14:10:19 -0500 | [diff] [blame] | 1281 | if (ret) { |
| 1282 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
| 1283 | atomic_sub(wr_count, &xprt->sc_sq_count); |
| 1284 | for (i = 0; i < wr_count; i ++) |
| 1285 | svc_xprt_put(&xprt->sc_xprt); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1286 | dprintk("svcrdma: failed to post SQ WR rc=%d, " |
| 1287 | "sc_sq_count=%d, sc_sq_depth=%d\n", |
| 1288 | ret, atomic_read(&xprt->sc_sq_count), |
| 1289 | xprt->sc_sq_depth); |
Tom Tucker | 0905c0f | 2008-05-01 10:49:03 -0500 | [diff] [blame] | 1290 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1291 | spin_unlock_bh(&xprt->sc_lock); |
Tom Tucker | 5b180a9 | 2008-08-11 14:10:19 -0500 | [diff] [blame] | 1292 | if (ret) |
| 1293 | wake_up(&xprt->sc_send_wait); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1294 | break; |
| 1295 | } |
| 1296 | return ret; |
| 1297 | } |
| 1298 | |
Tom Tucker | 008fdbc | 2008-05-07 15:47:42 -0500 | [diff] [blame] | 1299 | void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, |
| 1300 | enum rpcrdma_errcode err) |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1301 | { |
| 1302 | struct ib_send_wr err_wr; |
| 1303 | struct ib_sge sge; |
| 1304 | struct page *p; |
| 1305 | struct svc_rdma_op_ctxt *ctxt; |
| 1306 | u32 *va; |
| 1307 | int length; |
| 1308 | int ret; |
| 1309 | |
| 1310 | p = svc_rdma_get_page(); |
| 1311 | va = page_address(p); |
| 1312 | |
| 1313 | /* XDR encode error */ |
| 1314 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); |
| 1315 | |
| 1316 | /* Prepare SGE for local address */ |
| 1317 | sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, |
| 1318 | p, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
Tom Tucker | 04911b5 | 2008-08-11 15:14:53 -0500 | [diff] [blame] | 1319 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { |
| 1320 | put_page(p); |
| 1321 | return; |
| 1322 | } |
| 1323 | atomic_inc(&xprt->sc_dma_used); |
| 1324 | sge.lkey = xprt->sc_dma_lkey; |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1325 | sge.length = length; |
| 1326 | |
| 1327 | ctxt = svc_rdma_get_context(xprt); |
| 1328 | ctxt->count = 1; |
| 1329 | ctxt->pages[0] = p; |
| 1330 | |
| 1331 | /* Prepare SEND WR */ |
| 1332 | memset(&err_wr, 0, sizeof err_wr); |
| 1333 | ctxt->wr_op = IB_WR_SEND; |
| 1334 | err_wr.wr_id = (unsigned long)ctxt; |
| 1335 | err_wr.sg_list = &sge; |
| 1336 | err_wr.num_sge = 1; |
| 1337 | err_wr.opcode = IB_WR_SEND; |
| 1338 | err_wr.send_flags = IB_SEND_SIGNALED; |
| 1339 | |
| 1340 | /* Post It */ |
| 1341 | ret = svc_rdma_send(xprt, &err_wr); |
| 1342 | if (ret) { |
Tom Tucker | 008fdbc | 2008-05-07 15:47:42 -0500 | [diff] [blame] | 1343 | dprintk("svcrdma: Error %d posting send for protocol error\n", |
| 1344 | ret); |
Tom Tucker | 04911b5 | 2008-08-11 15:14:53 -0500 | [diff] [blame] | 1345 | ib_dma_unmap_page(xprt->sc_cm_id->device, |
| 1346 | sge.addr, PAGE_SIZE, |
| 1347 | DMA_FROM_DEVICE); |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1348 | svc_rdma_put_context(ctxt, 1); |
| 1349 | } |
Tom Tucker | 377f9b2 | 2007-12-12 16:13:21 -0600 | [diff] [blame] | 1350 | } |