Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Oracle. All rights reserved. |
| 3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | /* Lightweight memory registration using Fast Memory Regions (FMR). |
| 7 | * Referred to sometimes as MTHCAFMR mode. |
| 8 | * |
| 9 | * FMR uses synchronous memory registration and deregistration. |
| 10 | * FMR registration is known to be fast, but FMR deregistration |
| 11 | * can take tens of usecs to complete. |
| 12 | */ |
| 13 | |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 14 | /* Normal operation |
| 15 | * |
| 16 | * A Memory Region is prepared for RDMA READ or WRITE using the |
| 17 | * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is |
| 18 | * finished, the Memory Region is unmapped using the ib_unmap_fmr |
| 19 | * verb (fmr_op_unmap). |
| 20 | */ |
| 21 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 22 | #include "xprt_rdma.h" |
| 23 | |
| 24 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 25 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 26 | #endif |
| 27 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 28 | /* Maximum scatter/gather per FMR */ |
| 29 | #define RPCRDMA_MAX_FMR_SGES (64) |
| 30 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 31 | /* Access mode of externally registered pages */ |
| 32 | enum { |
| 33 | RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE | |
| 34 | IB_ACCESS_REMOTE_READ, |
| 35 | }; |
| 36 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 37 | static int |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 38 | __fmr_init(struct rpcrdma_mw *mw, struct ib_pd *pd) |
| 39 | { |
| 40 | static struct ib_fmr_attr fmr_attr = { |
| 41 | .max_pages = RPCRDMA_MAX_FMR_SGES, |
| 42 | .max_maps = 1, |
| 43 | .page_shift = PAGE_SHIFT |
| 44 | }; |
| 45 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 46 | mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES, |
| 47 | sizeof(u64), GFP_KERNEL); |
| 48 | if (!mw->fmr.fm_physaddrs) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 49 | goto out_free; |
| 50 | |
| 51 | mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES, |
| 52 | sizeof(*mw->mw_sg), GFP_KERNEL); |
| 53 | if (!mw->mw_sg) |
| 54 | goto out_free; |
| 55 | |
| 56 | sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES); |
| 57 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 58 | mw->fmr.fm_mr = ib_alloc_fmr(pd, RPCRDMA_FMR_ACCESS_FLAGS, |
| 59 | &fmr_attr); |
| 60 | if (IS_ERR(mw->fmr.fm_mr)) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 61 | goto out_fmr_err; |
| 62 | |
| 63 | return 0; |
| 64 | |
| 65 | out_fmr_err: |
| 66 | dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__, |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 67 | PTR_ERR(mw->fmr.fm_mr)); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 68 | |
| 69 | out_free: |
| 70 | kfree(mw->mw_sg); |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 71 | kfree(mw->fmr.fm_physaddrs); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 72 | return -ENOMEM; |
| 73 | } |
| 74 | |
| 75 | static int |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 76 | __fmr_unmap(struct rpcrdma_mw *mw) |
| 77 | { |
| 78 | LIST_HEAD(l); |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 79 | int rc; |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 80 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 81 | list_add(&mw->fmr.fm_mr->list, &l); |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 82 | rc = ib_unmap_fmr(&l); |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 83 | list_del_init(&mw->fmr.fm_mr->list); |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 84 | return rc; |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 85 | } |
| 86 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 87 | static void |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 88 | __fmr_release(struct rpcrdma_mw *r) |
| 89 | { |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 90 | LIST_HEAD(unmap_list); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 91 | int rc; |
| 92 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 93 | kfree(r->fmr.fm_physaddrs); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 94 | kfree(r->mw_sg); |
| 95 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 96 | /* In case this one was left mapped, try to unmap it |
| 97 | * to prevent dealloc_fmr from failing with EBUSY |
| 98 | */ |
| 99 | rc = __fmr_unmap(r); |
| 100 | if (rc) |
| 101 | pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n", |
| 102 | r, rc); |
| 103 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 104 | rc = ib_dealloc_fmr(r->fmr.fm_mr); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 105 | if (rc) |
| 106 | pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n", |
| 107 | r, rc); |
| 108 | } |
| 109 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 110 | /* Reset of a single FMR. |
| 111 | * |
| 112 | * There's no recovery if this fails. The FMR is abandoned, but |
| 113 | * remains in rb_all. It will be cleaned up when the transport is |
| 114 | * destroyed. |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 115 | */ |
| 116 | static void |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 117 | fmr_op_recover_mr(struct rpcrdma_mw *mw) |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 118 | { |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 119 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
| 120 | int rc; |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 121 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 122 | /* ORDER: invalidate first */ |
| 123 | rc = __fmr_unmap(mw); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 124 | |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 125 | /* ORDER: then DMA unmap */ |
| 126 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
| 127 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
| 128 | if (rc) { |
| 129 | pr_err("rpcrdma: FMR reset status %d, %p orphaned\n", |
| 130 | rc, mw); |
| 131 | r_xprt->rx_stats.mrs_orphaned++; |
| 132 | return; |
| 133 | } |
| 134 | |
| 135 | rpcrdma_put_mw(r_xprt, mw); |
| 136 | r_xprt->rx_stats.mrs_recovered++; |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 137 | } |
| 138 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 139 | static int |
| 140 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
| 141 | struct rpcrdma_create_data_internal *cdata) |
| 142 | { |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 143 | rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1, |
| 144 | RPCRDMA_MAX_DATA_SEGS / |
| 145 | RPCRDMA_MAX_FMR_SGES)); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 146 | return 0; |
| 147 | } |
| 148 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 149 | /* FMR mode conveys up to 64 pages of payload per chunk segment. |
| 150 | */ |
| 151 | static size_t |
| 152 | fmr_op_maxpages(struct rpcrdma_xprt *r_xprt) |
| 153 | { |
| 154 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | 9493174 | 2016-05-02 14:40:56 -0400 | [diff] [blame] | 155 | RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES); |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 156 | } |
| 157 | |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 158 | static int |
| 159 | fmr_op_init(struct rpcrdma_xprt *r_xprt) |
| 160 | { |
| 161 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 162 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; |
| 163 | struct rpcrdma_mw *r; |
| 164 | int i, rc; |
| 165 | |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 166 | spin_lock_init(&buf->rb_mwlock); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 167 | INIT_LIST_HEAD(&buf->rb_mws); |
| 168 | INIT_LIST_HEAD(&buf->rb_all); |
| 169 | |
Chuck Lever | 40c6ed0 | 2015-05-26 11:53:33 -0400 | [diff] [blame] | 170 | i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1); |
| 171 | i += 2; /* head + tail */ |
| 172 | i *= buf->rb_max_requests; /* one set for each RPC slot */ |
| 173 | dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 174 | |
| 175 | while (i--) { |
| 176 | r = kzalloc(sizeof(*r), GFP_KERNEL); |
| 177 | if (!r) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 178 | return -ENOMEM; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 179 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 180 | rc = __fmr_init(r, pd); |
| 181 | if (rc) { |
| 182 | kfree(r); |
| 183 | return rc; |
| 184 | } |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 185 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 186 | r->mw_xprt = r_xprt; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 187 | list_add(&r->mw_list, &buf->rb_mws); |
| 188 | list_add(&r->mw_all, &buf->rb_all); |
| 189 | } |
| 190 | return 0; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 191 | } |
| 192 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 193 | /* Use the ib_map_phys_fmr() verb to register a memory region |
| 194 | * for remote access via RDMA READ or RDMA WRITE. |
| 195 | */ |
| 196 | static int |
| 197 | fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
| 198 | int nsegs, bool writing) |
| 199 | { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 200 | struct rpcrdma_mr_seg *seg1 = seg; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 201 | int len, pageoff, i, rc; |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 202 | struct rpcrdma_mw *mw; |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 203 | u64 *dma_pages; |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 204 | |
| 205 | mw = seg1->rl_mw; |
| 206 | seg1->rl_mw = NULL; |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 207 | if (mw) |
| 208 | rpcrdma_defer_mr_recovery(mw); |
| 209 | mw = rpcrdma_get_mw(r_xprt); |
| 210 | if (!mw) |
| 211 | return -ENOMEM; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 212 | |
| 213 | pageoff = offset_in_page(seg1->mr_offset); |
| 214 | seg1->mr_offset -= pageoff; /* start of page */ |
| 215 | seg1->mr_len += pageoff; |
| 216 | len = -pageoff; |
| 217 | if (nsegs > RPCRDMA_MAX_FMR_SGES) |
| 218 | nsegs = RPCRDMA_MAX_FMR_SGES; |
| 219 | for (i = 0; i < nsegs;) { |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 220 | if (seg->mr_page) |
| 221 | sg_set_page(&mw->mw_sg[i], |
| 222 | seg->mr_page, |
| 223 | seg->mr_len, |
| 224 | offset_in_page(seg->mr_offset)); |
| 225 | else |
| 226 | sg_set_buf(&mw->mw_sg[i], seg->mr_offset, |
| 227 | seg->mr_len); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 228 | len += seg->mr_len; |
| 229 | ++seg; |
| 230 | ++i; |
| 231 | /* Check for holes */ |
| 232 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
| 233 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 234 | break; |
| 235 | } |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 236 | mw->mw_nents = i; |
| 237 | mw->mw_dir = rpcrdma_data_dir(writing); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 238 | |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 239 | if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device, |
| 240 | mw->mw_sg, mw->mw_nents, mw->mw_dir)) |
| 241 | goto out_dmamap_err; |
| 242 | |
| 243 | for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++) |
| 244 | dma_pages[i] = sg_dma_address(&mw->mw_sg[i]); |
| 245 | rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents, |
| 246 | dma_pages[0]); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 247 | if (rc) |
| 248 | goto out_maperr; |
| 249 | |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 250 | seg1->rl_mw = mw; |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 251 | seg1->mr_rkey = mw->fmr.fm_mr->rkey; |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 252 | seg1->mr_base = dma_pages[0] + pageoff; |
| 253 | seg1->mr_nsegs = mw->mw_nents; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 254 | seg1->mr_len = len; |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 255 | return mw->mw_nents; |
| 256 | |
| 257 | out_dmamap_err: |
| 258 | pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n", |
| 259 | mw->mw_sg, mw->mw_nents); |
| 260 | return -ENOMEM; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 261 | |
| 262 | out_maperr: |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 263 | pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", |
| 264 | len, (unsigned long long)dma_pages[0], |
| 265 | pageoff, mw->mw_nents, rc); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 266 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 267 | return rc; |
| 268 | } |
| 269 | |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 270 | /* Invalidate all memory regions that were registered for "req". |
| 271 | * |
| 272 | * Sleeps until it is safe for the host CPU to access the |
| 273 | * previously mapped memory regions. |
| 274 | */ |
| 275 | static void |
| 276 | fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
| 277 | { |
| 278 | struct rpcrdma_mr_seg *seg; |
| 279 | unsigned int i, nchunks; |
| 280 | struct rpcrdma_mw *mw; |
| 281 | LIST_HEAD(unmap_list); |
| 282 | int rc; |
| 283 | |
| 284 | dprintk("RPC: %s: req %p\n", __func__, req); |
| 285 | |
| 286 | /* ORDER: Invalidate all of the req's MRs first |
| 287 | * |
| 288 | * ib_unmap_fmr() is slow, so use a single call instead |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 289 | * of one call per mapped FMR. |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 290 | */ |
| 291 | for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { |
| 292 | seg = &req->rl_segments[i]; |
| 293 | mw = seg->rl_mw; |
| 294 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 295 | list_add_tail(&mw->fmr.fm_mr->list, &unmap_list); |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 296 | |
| 297 | i += seg->mr_nsegs; |
| 298 | } |
| 299 | rc = ib_unmap_fmr(&unmap_list); |
| 300 | if (rc) |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 301 | goto out_reset; |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 302 | |
| 303 | /* ORDER: Now DMA unmap all of the req's MRs, and return |
| 304 | * them to the free MW list. |
| 305 | */ |
| 306 | for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { |
| 307 | seg = &req->rl_segments[i]; |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 308 | mw = seg->rl_mw; |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 309 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 310 | list_del_init(&mw->fmr.fm_mr->list); |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 311 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
| 312 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
| 313 | rpcrdma_put_mw(r_xprt, mw); |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 314 | |
| 315 | i += seg->mr_nsegs; |
| 316 | seg->mr_nsegs = 0; |
Chuck Lever | 763bc23 | 2016-05-02 14:42:38 -0400 | [diff] [blame] | 317 | seg->rl_mw = NULL; |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | req->rl_nchunks = 0; |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 321 | return; |
| 322 | |
| 323 | out_reset: |
| 324 | pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc); |
| 325 | |
| 326 | for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { |
| 327 | seg = &req->rl_segments[i]; |
| 328 | mw = seg->rl_mw; |
| 329 | |
| 330 | list_del_init(&mw->fmr.fm_mr->list); |
| 331 | fmr_op_recover_mr(mw); |
| 332 | |
| 333 | i += seg->mr_nsegs; |
| 334 | } |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 335 | } |
| 336 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 337 | /* Use a slow, safe mechanism to invalidate all memory regions |
| 338 | * that were registered for "req". |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 339 | */ |
| 340 | static void |
| 341 | fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, |
| 342 | bool sync) |
| 343 | { |
| 344 | struct rpcrdma_mr_seg *seg; |
| 345 | struct rpcrdma_mw *mw; |
| 346 | unsigned int i; |
| 347 | |
| 348 | for (i = 0; req->rl_nchunks; req->rl_nchunks--) { |
| 349 | seg = &req->rl_segments[i]; |
| 350 | mw = seg->rl_mw; |
| 351 | |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 352 | if (sync) |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 353 | fmr_op_recover_mr(mw); |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame] | 354 | else |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 355 | rpcrdma_defer_mr_recovery(mw); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 356 | |
| 357 | i += seg->mr_nsegs; |
| 358 | seg->mr_nsegs = 0; |
| 359 | seg->rl_mw = NULL; |
| 360 | } |
| 361 | } |
| 362 | |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 363 | static void |
| 364 | fmr_op_destroy(struct rpcrdma_buffer *buf) |
| 365 | { |
| 366 | struct rpcrdma_mw *r; |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 367 | |
| 368 | while (!list_empty(&buf->rb_all)) { |
| 369 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); |
| 370 | list_del(&r->mw_all); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 371 | __fmr_release(r); |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 372 | kfree(r); |
| 373 | } |
| 374 | } |
| 375 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 376 | const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 377 | .ro_map = fmr_op_map, |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 378 | .ro_unmap_sync = fmr_op_unmap_sync, |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 379 | .ro_unmap_safe = fmr_op_unmap_safe, |
Chuck Lever | 505bbe6 | 2016-06-29 13:52:54 -0400 | [diff] [blame^] | 380 | .ro_recover_mr = fmr_op_recover_mr, |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 381 | .ro_open = fmr_op_open, |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 382 | .ro_maxpages = fmr_op_maxpages, |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 383 | .ro_init = fmr_op_init, |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 384 | .ro_destroy = fmr_op_destroy, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 385 | .ro_displayname = "fmr", |
| 386 | }; |