Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Oracle. All rights reserved. |
| 3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | /* Lightweight memory registration using Fast Registration Work |
| 7 | * Requests (FRWR). Also referred to sometimes as FRMR mode. |
| 8 | * |
| 9 | * FRWR features ordered asynchronous registration and deregistration |
| 10 | * of arbitrarily sized memory regions. This is the fastest and safest |
| 11 | * but most complex memory registration mode. |
| 12 | */ |
| 13 | |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 14 | /* Normal operation |
| 15 | * |
| 16 | * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG |
| 17 | * Work Request (frmr_op_map). When the RDMA operation is finished, this |
| 18 | * Memory Region is invalidated using a LOCAL_INV Work Request |
| 19 | * (frmr_op_unmap). |
| 20 | * |
| 21 | * Typically these Work Requests are not signaled, and neither are RDMA |
| 22 | * SEND Work Requests (with the exception of signaling occasionally to |
| 23 | * prevent provider work queue overflows). This greatly reduces HCA |
| 24 | * interrupt workload. |
| 25 | * |
| 26 | * As an optimization, frwr_op_unmap marks MRs INVALID before the |
| 27 | * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on |
| 28 | * rb_mws immediately so that no work (like managing a linked list |
| 29 | * under a spinlock) is needed in the completion upcall. |
| 30 | * |
| 31 | * But this means that frwr_op_map() can occasionally encounter an MR |
| 32 | * that is INVALID but the LOCAL_INV WR has not completed. Work Queue |
| 33 | * ordering prevents a subsequent FAST_REG WR from executing against |
| 34 | * that MR while it is still being invalidated. |
| 35 | */ |
| 36 | |
| 37 | /* Transport recovery |
| 38 | * |
| 39 | * ->op_map and the transport connect worker cannot run at the same |
| 40 | * time, but ->op_unmap can fire while the transport connect worker |
| 41 | * is running. Thus MR recovery is handled in ->op_map, to guarantee |
| 42 | * that recovered MRs are owned by a sending RPC, and not one where |
| 43 | * ->op_unmap could fire at the same time transport reconnect is |
| 44 | * being done. |
| 45 | * |
| 46 | * When the underlying transport disconnects, MRs are left in one of |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 47 | * four states: |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 48 | * |
| 49 | * INVALID: The MR was not in use before the QP entered ERROR state. |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 50 | * |
| 51 | * VALID: The MR was registered before the QP entered ERROR state. |
| 52 | * |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 53 | * FLUSHED_FR: The MR was being registered when the QP entered ERROR |
| 54 | * state, and the pending WR was flushed. |
| 55 | * |
| 56 | * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR |
| 57 | * state, and the pending WR was flushed. |
| 58 | * |
| 59 | * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered |
| 60 | * with ib_dereg_mr and then are re-initialized. Because MR recovery |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 61 | * allocates fresh resources, it is deferred to a workqueue, and the |
| 62 | * recovered MRs are placed back on the rb_mws list when recovery is |
| 63 | * complete. frwr_op_map allocates another MR for the current RPC while |
| 64 | * the broken MR is reset. |
| 65 | * |
| 66 | * To ensure that frwr_op_map doesn't encounter an MR that is marked |
| 67 | * INVALID but that is about to be flushed due to a previous transport |
| 68 | * disconnect, the transport connect worker attempts to drain all |
| 69 | * pending send queue WRs before the transport is reconnected. |
| 70 | */ |
| 71 | |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 72 | #include <linux/sunrpc/rpc_rdma.h> |
| 73 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 74 | #include "xprt_rdma.h" |
| 75 | |
| 76 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 77 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 78 | #endif |
| 79 | |
Chuck Lever | b54054c | 2016-06-29 13:53:27 -0400 | [diff] [blame] | 80 | bool |
| 81 | frwr_is_supported(struct rpcrdma_ia *ia) |
| 82 | { |
| 83 | struct ib_device_attr *attrs = &ia->ri_device->attrs; |
| 84 | |
| 85 | if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) |
| 86 | goto out_not_supported; |
| 87 | if (attrs->max_fast_reg_page_list_len == 0) |
| 88 | goto out_not_supported; |
| 89 | return true; |
| 90 | |
| 91 | out_not_supported: |
| 92 | pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n", |
| 93 | ia->ri_device->name); |
| 94 | return false; |
| 95 | } |
| 96 | |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 97 | static int |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 98 | frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 99 | { |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 100 | unsigned int depth = ia->ri_max_frmr_depth; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 101 | struct rpcrdma_frmr *f = &r->frmr; |
| 102 | int rc; |
| 103 | |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 104 | f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 105 | if (IS_ERR(f->fr_mr)) |
| 106 | goto out_mr_err; |
| 107 | |
| 108 | r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL); |
| 109 | if (!r->mw_sg) |
| 110 | goto out_list_err; |
| 111 | |
| 112 | sg_init_table(r->mw_sg, depth); |
| 113 | init_completion(&f->fr_linv_done); |
| 114 | return 0; |
| 115 | |
| 116 | out_mr_err: |
| 117 | rc = PTR_ERR(f->fr_mr); |
| 118 | dprintk("RPC: %s: ib_alloc_mr status %i\n", |
| 119 | __func__, rc); |
| 120 | return rc; |
| 121 | |
| 122 | out_list_err: |
| 123 | rc = -ENOMEM; |
| 124 | dprintk("RPC: %s: sg allocation failure\n", |
| 125 | __func__); |
| 126 | ib_dereg_mr(f->fr_mr); |
| 127 | return rc; |
| 128 | } |
| 129 | |
| 130 | static void |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 131 | frwr_op_release_mr(struct rpcrdma_mw *r) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 132 | { |
| 133 | int rc; |
| 134 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 135 | /* Ensure MW is not on any rl_registered list */ |
| 136 | if (!list_empty(&r->mw_list)) |
| 137 | list_del(&r->mw_list); |
| 138 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 139 | rc = ib_dereg_mr(r->frmr.fr_mr); |
| 140 | if (rc) |
| 141 | pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n", |
| 142 | r, rc); |
| 143 | kfree(r->mw_sg); |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 144 | kfree(r); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static int |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 148 | __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r) |
| 149 | { |
| 150 | struct rpcrdma_frmr *f = &r->frmr; |
| 151 | int rc; |
| 152 | |
| 153 | rc = ib_dereg_mr(f->fr_mr); |
| 154 | if (rc) { |
| 155 | pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n", |
| 156 | rc, r); |
| 157 | return rc; |
| 158 | } |
| 159 | |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 160 | f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 161 | ia->ri_max_frmr_depth); |
| 162 | if (IS_ERR(f->fr_mr)) { |
| 163 | pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n", |
| 164 | PTR_ERR(f->fr_mr), r); |
| 165 | return PTR_ERR(f->fr_mr); |
| 166 | } |
| 167 | |
Chuck Lever | eeb3061 | 2016-09-15 10:57:40 -0400 | [diff] [blame] | 168 | dprintk("RPC: %s: recovered FRMR %p\n", __func__, f); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 169 | f->fr_state = FRMR_IS_INVALID; |
| 170 | return 0; |
| 171 | } |
| 172 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 173 | /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR. |
| 174 | * |
| 175 | * There's no recovery if this fails. The FRMR is abandoned, but |
| 176 | * remains in rb_all. It will be cleaned up when the transport is |
| 177 | * destroyed. |
| 178 | */ |
Chuck Lever | 660bb49 | 2016-05-02 14:42:21 -0400 | [diff] [blame] | 179 | static void |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 180 | frwr_op_recover_mr(struct rpcrdma_mw *mw) |
Chuck Lever | 660bb49 | 2016-05-02 14:42:21 -0400 | [diff] [blame] | 181 | { |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 182 | enum rpcrdma_frmr_state state = mw->frmr.fr_state; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 183 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
Chuck Lever | 660bb49 | 2016-05-02 14:42:21 -0400 | [diff] [blame] | 184 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 660bb49 | 2016-05-02 14:42:21 -0400 | [diff] [blame] | 185 | int rc; |
| 186 | |
| 187 | rc = __frwr_reset_mr(ia, mw); |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 188 | if (state != FRMR_FLUSHED_LI) |
| 189 | ib_dma_unmap_sg(ia->ri_device, |
| 190 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
Chuck Lever | 2ffc871 | 2016-06-29 13:54:08 -0400 | [diff] [blame] | 191 | if (rc) |
| 192 | goto out_release; |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 193 | |
Chuck Lever | 660bb49 | 2016-05-02 14:42:21 -0400 | [diff] [blame] | 194 | rpcrdma_put_mw(r_xprt, mw); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 195 | r_xprt->rx_stats.mrs_recovered++; |
Chuck Lever | 2ffc871 | 2016-06-29 13:54:08 -0400 | [diff] [blame] | 196 | return; |
| 197 | |
| 198 | out_release: |
| 199 | pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw); |
| 200 | r_xprt->rx_stats.mrs_orphaned++; |
| 201 | |
| 202 | spin_lock(&r_xprt->rx_buf.rb_mwlock); |
| 203 | list_del(&mw->mw_all); |
| 204 | spin_unlock(&r_xprt->rx_buf.rb_mwlock); |
| 205 | |
| 206 | frwr_op_release_mr(mw); |
Chuck Lever | 951e721 | 2015-05-26 11:52:25 -0400 | [diff] [blame] | 207 | } |
| 208 | |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 209 | static int |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 210 | frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
| 211 | struct rpcrdma_create_data_internal *cdata) |
| 212 | { |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 213 | struct ib_device_attr *attrs = &ia->ri_device->attrs; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 214 | int depth, delta; |
| 215 | |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 216 | ia->ri_mrtype = IB_MR_TYPE_MEM_REG; |
| 217 | if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) |
| 218 | ia->ri_mrtype = IB_MR_TYPE_SG_GAPS; |
| 219 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 220 | ia->ri_max_frmr_depth = |
| 221 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 222 | attrs->max_fast_reg_page_list_len); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 223 | dprintk("RPC: %s: device's max FR page list len = %u\n", |
| 224 | __func__, ia->ri_max_frmr_depth); |
| 225 | |
| 226 | /* Add room for frmr register and invalidate WRs. |
| 227 | * 1. FRMR reg WR for head |
| 228 | * 2. FRMR invalidate WR for head |
| 229 | * 3. N FRMR reg WRs for pagelist |
| 230 | * 4. N FRMR invalidate WRs for pagelist |
| 231 | * 5. FRMR reg WR for tail |
| 232 | * 6. FRMR invalidate WR for tail |
| 233 | * 7. The RDMA_SEND WR |
| 234 | */ |
| 235 | depth = 7; |
| 236 | |
| 237 | /* Calculate N if the device max FRMR depth is smaller than |
| 238 | * RPCRDMA_MAX_DATA_SEGS. |
| 239 | */ |
| 240 | if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { |
| 241 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; |
| 242 | do { |
| 243 | depth += 2; /* FRMR reg + invalidate */ |
| 244 | delta -= ia->ri_max_frmr_depth; |
| 245 | } while (delta > 0); |
| 246 | } |
| 247 | |
| 248 | ep->rep_attr.cap.max_send_wr *= depth; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 249 | if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) { |
| 250 | cdata->max_requests = attrs->max_qp_wr / depth; |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 251 | if (!cdata->max_requests) |
| 252 | return -EINVAL; |
| 253 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * |
| 254 | depth; |
| 255 | } |
| 256 | |
Chuck Lever | 87cfb9a | 2016-09-15 10:57:07 -0400 | [diff] [blame] | 257 | ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS / |
| 258 | ia->ri_max_frmr_depth); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 259 | return 0; |
| 260 | } |
| 261 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 262 | /* FRWR mode conveys a list of pages per chunk segment. The |
| 263 | * maximum length of that list is the FRWR page list depth. |
| 264 | */ |
| 265 | static size_t |
| 266 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) |
| 267 | { |
| 268 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
| 269 | |
| 270 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | 9493174 | 2016-05-02 14:40:56 -0400 | [diff] [blame] | 271 | RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth); |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 272 | } |
| 273 | |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 274 | static void |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 275 | __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr) |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 276 | { |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 277 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
| 278 | pr_err("rpcrdma: %s: %s (%u/0x%x)\n", |
| 279 | wr, ib_wc_status_msg(wc->status), |
| 280 | wc->status, wc->vendor_err); |
Chuck Lever | e46ac34 | 2015-03-30 14:35:35 -0400 | [diff] [blame] | 281 | } |
| 282 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 283 | /** |
| 284 | * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC |
| 285 | * @cq: completion queue (ignored) |
| 286 | * @wc: completed WR |
| 287 | * |
| 288 | */ |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 289 | static void |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 290 | frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 291 | { |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 292 | struct rpcrdma_frmr *frmr; |
| 293 | struct ib_cqe *cqe; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 294 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 295 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 296 | if (wc->status != IB_WC_SUCCESS) { |
| 297 | cqe = wc->wr_cqe; |
| 298 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 299 | frmr->fr_state = FRMR_FLUSHED_FR; |
| 300 | __frwr_sendcompletion_flush(wc, "fastreg"); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 301 | } |
| 302 | } |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 303 | |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 304 | /** |
| 305 | * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC |
| 306 | * @cq: completion queue (ignored) |
| 307 | * @wc: completed WR |
| 308 | * |
| 309 | */ |
| 310 | static void |
| 311 | frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) |
| 312 | { |
| 313 | struct rpcrdma_frmr *frmr; |
| 314 | struct ib_cqe *cqe; |
| 315 | |
| 316 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 317 | if (wc->status != IB_WC_SUCCESS) { |
| 318 | cqe = wc->wr_cqe; |
| 319 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 320 | frmr->fr_state = FRMR_FLUSHED_LI; |
| 321 | __frwr_sendcompletion_flush(wc, "localinv"); |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 322 | } |
| 323 | } |
| 324 | |
| 325 | /** |
| 326 | * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC |
| 327 | * @cq: completion queue (ignored) |
| 328 | * @wc: completed WR |
| 329 | * |
| 330 | * Awaken anyone waiting for an MR to finish being fenced. |
| 331 | */ |
| 332 | static void |
| 333 | frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) |
| 334 | { |
| 335 | struct rpcrdma_frmr *frmr; |
| 336 | struct ib_cqe *cqe; |
| 337 | |
| 338 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
| 339 | cqe = wc->wr_cqe; |
| 340 | frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe); |
Chuck Lever | 62bdf94 | 2016-11-07 16:16:24 -0500 | [diff] [blame] | 341 | if (wc->status != IB_WC_SUCCESS) { |
| 342 | frmr->fr_state = FRMR_FLUSHED_LI; |
| 343 | __frwr_sendcompletion_flush(wc, "localinv"); |
| 344 | } |
Daniel Wagner | 5690a22 | 2016-09-23 10:41:57 +0200 | [diff] [blame] | 345 | complete(&frmr->fr_linv_done); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 346 | } |
| 347 | |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 348 | /* Post a REG_MR Work Request to register a memory region |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 349 | * for remote access via RDMA READ or RDMA WRITE. |
| 350 | */ |
| 351 | static int |
| 352 | frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 353 | int nsegs, bool writing, struct rpcrdma_mw **out) |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 354 | { |
| 355 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 356 | bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 357 | struct rpcrdma_mw *mw; |
| 358 | struct rpcrdma_frmr *frmr; |
| 359 | struct ib_mr *mr; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 360 | struct ib_reg_wr *reg_wr; |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 361 | struct ib_send_wr *bad_wr; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 362 | int rc, i, n, dma_nents; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 363 | u8 key; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 364 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 365 | mw = NULL; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 366 | do { |
| 367 | if (mw) |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 368 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 369 | mw = rpcrdma_get_mw(r_xprt); |
| 370 | if (!mw) |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 371 | return -ENOBUFS; |
Chuck Lever | c882a65 | 2016-03-04 11:28:45 -0500 | [diff] [blame] | 372 | } while (mw->frmr.fr_state != FRMR_IS_INVALID); |
| 373 | frmr = &mw->frmr; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 374 | frmr->fr_state = FRMR_IS_VALID; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 375 | mr = frmr->fr_mr; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 376 | reg_wr = &frmr->fr_regwr; |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 377 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 378 | if (nsegs > ia->ri_max_frmr_depth) |
| 379 | nsegs = ia->ri_max_frmr_depth; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 380 | for (i = 0; i < nsegs;) { |
| 381 | if (seg->mr_page) |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 382 | sg_set_page(&mw->mw_sg[i], |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 383 | seg->mr_page, |
| 384 | seg->mr_len, |
| 385 | offset_in_page(seg->mr_offset)); |
| 386 | else |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 387 | sg_set_buf(&mw->mw_sg[i], seg->mr_offset, |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 388 | seg->mr_len); |
| 389 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 390 | ++seg; |
| 391 | ++i; |
Chuck Lever | 5e9fc6a | 2016-11-29 10:52:24 -0500 | [diff] [blame] | 392 | if (holes_ok) |
| 393 | continue; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 394 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
| 395 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 396 | break; |
| 397 | } |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 398 | mw->mw_nents = i; |
| 399 | mw->mw_dir = rpcrdma_data_dir(writing); |
Chuck Lever | a54d405 | 2016-06-29 13:53:52 -0400 | [diff] [blame] | 400 | if (i == 0) |
| 401 | goto out_dmamap_err; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 402 | |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 403 | dma_nents = ib_dma_map_sg(ia->ri_device, |
| 404 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
| 405 | if (!dma_nents) |
| 406 | goto out_dmamap_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 407 | |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 408 | n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE); |
| 409 | if (unlikely(n != mw->mw_nents)) |
| 410 | goto out_mapmr_err; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 411 | |
| 412 | dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n", |
Chuck Lever | eeb3061 | 2016-09-15 10:57:40 -0400 | [diff] [blame] | 413 | __func__, frmr, mw->mw_nents, mr->length); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 414 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 415 | key = (u8)(mr->rkey & 0x000000FF); |
| 416 | ib_update_fast_reg_key(mr, ++key); |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 417 | |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 418 | reg_wr->wr.next = NULL; |
| 419 | reg_wr->wr.opcode = IB_WR_REG_MR; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 420 | frmr->fr_cqe.done = frwr_wc_fastreg; |
| 421 | reg_wr->wr.wr_cqe = &frmr->fr_cqe; |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 422 | reg_wr->wr.num_sge = 0; |
| 423 | reg_wr->wr.send_flags = 0; |
| 424 | reg_wr->mr = mr; |
| 425 | reg_wr->key = mr->rkey; |
| 426 | reg_wr->access = writing ? |
| 427 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
| 428 | IB_ACCESS_REMOTE_READ; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 429 | |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 430 | rpcrdma_set_signaled(&r_xprt->rx_ep, ®_wr->wr); |
Chuck Lever | 3cf4e16 | 2015-12-16 17:22:31 -0500 | [diff] [blame] | 431 | rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 432 | if (rc) |
| 433 | goto out_senderr; |
| 434 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 435 | mw->mw_handle = mr->rkey; |
| 436 | mw->mw_length = mr->length; |
| 437 | mw->mw_offset = mr->iova; |
Sagi Grimberg | 4143f34 | 2015-10-13 19:11:35 +0300 | [diff] [blame] | 438 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 439 | *out = mw; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 440 | return mw->mw_nents; |
| 441 | |
| 442 | out_dmamap_err: |
| 443 | pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n", |
| 444 | mw->mw_sg, mw->mw_nents); |
Chuck Lever | 42fe28f | 2016-06-29 13:53:02 -0400 | [diff] [blame] | 445 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 446 | return -EIO; |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 447 | |
| 448 | out_mapmr_err: |
| 449 | pr_err("rpcrdma: failed to map mr %p (%u/%u)\n", |
| 450 | frmr->fr_mr, n, mw->mw_nents); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 451 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 452 | return -EIO; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 453 | |
| 454 | out_senderr: |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 455 | pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 456 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 457 | return -ENOTCONN; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 458 | } |
| 459 | |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 460 | /* Invalidate all memory regions that were registered for "req". |
| 461 | * |
| 462 | * Sleeps until it is safe for the host CPU to access the |
| 463 | * previously mapped memory regions. |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 464 | * |
| 465 | * Caller ensures that req->rl_registered is not empty. |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 466 | */ |
| 467 | static void |
| 468 | frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
| 469 | { |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 470 | struct ib_send_wr *first, **prev, *last, *bad_wr; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 471 | struct rpcrdma_rep *rep = req->rl_reply; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 472 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 473 | struct rpcrdma_mw *mw, *tmp; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 474 | struct rpcrdma_frmr *f; |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 475 | int count, rc; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 476 | |
| 477 | dprintk("RPC: %s: req %p\n", __func__, req); |
| 478 | |
| 479 | /* ORDER: Invalidate all of the req's MRs first |
| 480 | * |
| 481 | * Chain the LOCAL_INV Work Requests and post them with |
| 482 | * a single ib_post_send() call. |
| 483 | */ |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 484 | f = NULL; |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 485 | count = 0; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 486 | prev = &first; |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 487 | list_for_each_entry(mw, &req->rl_registered, mw_list) { |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 488 | mw->frmr.fr_state = FRMR_IS_INVALID; |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 489 | |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 490 | if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) && |
| 491 | (mw->mw_handle == rep->rr_inv_rkey)) |
| 492 | continue; |
| 493 | |
| 494 | f = &mw->frmr; |
| 495 | dprintk("RPC: %s: invalidating frmr %p\n", |
| 496 | __func__, f); |
| 497 | |
| 498 | f->fr_cqe.done = frwr_wc_localinv; |
| 499 | last = &f->fr_invwr; |
| 500 | memset(last, 0, sizeof(*last)); |
| 501 | last->wr_cqe = &f->fr_cqe; |
| 502 | last->opcode = IB_WR_LOCAL_INV; |
| 503 | last->ex.invalidate_rkey = mw->mw_handle; |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 504 | count++; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 505 | |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 506 | *prev = last; |
| 507 | prev = &last->next; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 508 | } |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 509 | if (!f) |
| 510 | goto unmap; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 511 | |
| 512 | /* Strong send queue ordering guarantees that when the |
| 513 | * last WR in the chain completes, all WRs in the chain |
| 514 | * are complete. |
| 515 | */ |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 516 | last->send_flags = IB_SEND_SIGNALED; |
Chuck Lever | 2fa8f88 | 2016-03-04 11:28:53 -0500 | [diff] [blame] | 517 | f->fr_cqe.done = frwr_wc_localinv_wake; |
| 518 | reinit_completion(&f->fr_linv_done); |
Chuck Lever | 8d38de6 | 2016-11-29 10:52:16 -0500 | [diff] [blame] | 519 | |
| 520 | /* Initialize CQ count, since there is always a signaled |
| 521 | * WR being posted here. The new cqcount depends on how |
| 522 | * many SQEs are about to be consumed. |
| 523 | */ |
| 524 | rpcrdma_init_cqcount(&r_xprt->rx_ep, count); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 525 | |
| 526 | /* Transport disconnect drains the receive CQ before it |
| 527 | * replaces the QP. The RPC reply handler won't call us |
| 528 | * unless ri_id->qp is a valid pointer. |
| 529 | */ |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 530 | r_xprt->rx_stats.local_inv_needed++; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 531 | rc = ib_post_send(ia->ri_id->qp, first, &bad_wr); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 532 | if (rc) |
| 533 | goto reset_mrs; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 534 | |
| 535 | wait_for_completion(&f->fr_linv_done); |
| 536 | |
| 537 | /* ORDER: Now DMA unmap all of the req's MRs, and return |
| 538 | * them to the free MW list. |
| 539 | */ |
Chuck Lever | b892a69 | 2016-03-04 11:28:01 -0500 | [diff] [blame] | 540 | unmap: |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 541 | list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 542 | dprintk("RPC: %s: DMA unmapping frmr %p\n", |
Chuck Lever | eeb3061 | 2016-09-15 10:57:40 -0400 | [diff] [blame] | 543 | __func__, &mw->frmr); |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 544 | list_del_init(&mw->mw_list); |
Chuck Lever | 564471d | 2016-06-29 13:52:21 -0400 | [diff] [blame] | 545 | ib_dma_unmap_sg(ia->ri_device, |
| 546 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 547 | rpcrdma_put_mw(r_xprt, mw); |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 548 | } |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 549 | return; |
| 550 | |
| 551 | reset_mrs: |
Chuck Lever | 7a89f9c | 2016-06-29 13:53:43 -0400 | [diff] [blame] | 552 | pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc); |
| 553 | rdma_disconnect(ia->ri_id); |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 554 | |
| 555 | /* Find and reset the MRs in the LOCAL_INV WRs that did not |
| 556 | * get posted. This is synchronous, and slow. |
| 557 | */ |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 558 | list_for_each_entry(mw, &req->rl_registered, mw_list) { |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 559 | f = &mw->frmr; |
Chuck Lever | a100fda | 2016-11-29 10:52:57 -0500 | [diff] [blame^] | 560 | if (mw->mw_handle == bad_wr->ex.invalidate_rkey) { |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 561 | __frwr_reset_mr(ia, mw); |
| 562 | bad_wr = bad_wr->next; |
| 563 | } |
Chuck Lever | d7a21c1 | 2016-05-02 14:42:12 -0400 | [diff] [blame] | 564 | } |
| 565 | goto unmap; |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 566 | } |
| 567 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 568 | /* Use a slow, safe mechanism to invalidate all memory regions |
| 569 | * that were registered for "req". |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 570 | */ |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 571 | static void |
| 572 | frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, |
| 573 | bool sync) |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 574 | { |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 575 | struct rpcrdma_mw *mw; |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 576 | |
Chuck Lever | 9d6b040 | 2016-06-29 13:54:16 -0400 | [diff] [blame] | 577 | while (!list_empty(&req->rl_registered)) { |
| 578 | mw = list_first_entry(&req->rl_registered, |
| 579 | struct rpcrdma_mw, mw_list); |
| 580 | list_del_init(&mw->mw_list); |
Chuck Lever | c14d86e | 2015-05-26 11:52:35 -0400 | [diff] [blame] | 581 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 582 | if (sync) |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 583 | frwr_op_recover_mr(mw); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 584 | else |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 585 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 586 | } |
Chuck Lever | 6814bae | 2015-03-30 14:34:48 -0400 | [diff] [blame] | 587 | } |
| 588 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 589 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 590 | .ro_map = frwr_op_map, |
Chuck Lever | c9918ff | 2015-12-16 17:22:47 -0500 | [diff] [blame] | 591 | .ro_unmap_sync = frwr_op_unmap_sync, |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 592 | .ro_unmap_safe = frwr_op_unmap_safe, |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame] | 593 | .ro_recover_mr = frwr_op_recover_mr, |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 594 | .ro_open = frwr_op_open, |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 595 | .ro_maxpages = frwr_op_maxpages, |
Chuck Lever | e2ac236 | 2016-06-29 13:54:00 -0400 | [diff] [blame] | 596 | .ro_init_mr = frwr_op_init_mr, |
| 597 | .ro_release_mr = frwr_op_release_mr, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 598 | .ro_displayname = "frwr", |
Chuck Lever | c8b920b | 2016-09-15 10:57:16 -0400 | [diff] [blame] | 599 | .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 600 | }; |