Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015 Oracle. All rights reserved. |
| 3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | /* Lightweight memory registration using Fast Memory Regions (FMR). |
| 7 | * Referred to sometimes as MTHCAFMR mode. |
| 8 | * |
| 9 | * FMR uses synchronous memory registration and deregistration. |
| 10 | * FMR registration is known to be fast, but FMR deregistration |
| 11 | * can take tens of usecs to complete. |
| 12 | */ |
| 13 | |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 14 | /* Normal operation |
| 15 | * |
| 16 | * A Memory Region is prepared for RDMA READ or WRITE using the |
| 17 | * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is |
| 18 | * finished, the Memory Region is unmapped using the ib_unmap_fmr |
| 19 | * verb (fmr_op_unmap). |
| 20 | */ |
| 21 | |
| 22 | /* Transport recovery |
| 23 | * |
| 24 | * After a transport reconnect, fmr_op_map re-uses the MR already |
| 25 | * allocated for the RPC, but generates a fresh rkey then maps the |
| 26 | * MR again. This process is synchronous. |
| 27 | */ |
| 28 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 29 | #include "xprt_rdma.h" |
| 30 | |
| 31 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 32 | # define RPCDBG_FACILITY RPCDBG_TRANS |
| 33 | #endif |
| 34 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 35 | /* Maximum scatter/gather per FMR */ |
| 36 | #define RPCRDMA_MAX_FMR_SGES (64) |
| 37 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 38 | /* Access mode of externally registered pages */ |
| 39 | enum { |
| 40 | RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE | |
| 41 | IB_ACCESS_REMOTE_READ, |
| 42 | }; |
| 43 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 44 | static struct workqueue_struct *fmr_recovery_wq; |
| 45 | |
| 46 | #define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND) |
| 47 | |
| 48 | int |
| 49 | fmr_alloc_recovery_wq(void) |
| 50 | { |
| 51 | fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0); |
| 52 | return !fmr_recovery_wq ? -ENOMEM : 0; |
| 53 | } |
| 54 | |
| 55 | void |
| 56 | fmr_destroy_recovery_wq(void) |
| 57 | { |
| 58 | struct workqueue_struct *wq; |
| 59 | |
| 60 | if (!fmr_recovery_wq) |
| 61 | return; |
| 62 | |
| 63 | wq = fmr_recovery_wq; |
| 64 | fmr_recovery_wq = NULL; |
| 65 | destroy_workqueue(wq); |
| 66 | } |
| 67 | |
| 68 | static int |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 69 | __fmr_init(struct rpcrdma_mw *mw, struct ib_pd *pd) |
| 70 | { |
| 71 | static struct ib_fmr_attr fmr_attr = { |
| 72 | .max_pages = RPCRDMA_MAX_FMR_SGES, |
| 73 | .max_maps = 1, |
| 74 | .page_shift = PAGE_SHIFT |
| 75 | }; |
| 76 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 77 | mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES, |
| 78 | sizeof(u64), GFP_KERNEL); |
| 79 | if (!mw->fmr.fm_physaddrs) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 80 | goto out_free; |
| 81 | |
| 82 | mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES, |
| 83 | sizeof(*mw->mw_sg), GFP_KERNEL); |
| 84 | if (!mw->mw_sg) |
| 85 | goto out_free; |
| 86 | |
| 87 | sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES); |
| 88 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 89 | mw->fmr.fm_mr = ib_alloc_fmr(pd, RPCRDMA_FMR_ACCESS_FLAGS, |
| 90 | &fmr_attr); |
| 91 | if (IS_ERR(mw->fmr.fm_mr)) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 92 | goto out_fmr_err; |
| 93 | |
| 94 | return 0; |
| 95 | |
| 96 | out_fmr_err: |
| 97 | dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__, |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 98 | PTR_ERR(mw->fmr.fm_mr)); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 99 | |
| 100 | out_free: |
| 101 | kfree(mw->mw_sg); |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 102 | kfree(mw->fmr.fm_physaddrs); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 103 | return -ENOMEM; |
| 104 | } |
| 105 | |
| 106 | static int |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 107 | __fmr_unmap(struct rpcrdma_mw *mw) |
| 108 | { |
| 109 | LIST_HEAD(l); |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 110 | int rc; |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 111 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 112 | list_add(&mw->fmr.fm_mr->list, &l); |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 113 | rc = ib_unmap_fmr(&l); |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 114 | list_del_init(&mw->fmr.fm_mr->list); |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 115 | return rc; |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 116 | } |
| 117 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 118 | static void |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 119 | __fmr_dma_unmap(struct rpcrdma_mw *mw) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 120 | { |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 121 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 122 | |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 123 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
| 124 | mw->mw_sg, mw->mw_nents, mw->mw_dir); |
| 125 | rpcrdma_put_mw(r_xprt, mw); |
| 126 | } |
| 127 | |
| 128 | static void |
| 129 | __fmr_reset_and_unmap(struct rpcrdma_mw *mw) |
| 130 | { |
| 131 | int rc; |
| 132 | |
| 133 | /* ORDER */ |
| 134 | rc = __fmr_unmap(mw); |
| 135 | if (rc) { |
| 136 | pr_warn("rpcrdma: ib_unmap_fmr status %d, fmr %p orphaned\n", |
| 137 | rc, mw); |
| 138 | return; |
| 139 | } |
| 140 | __fmr_dma_unmap(mw); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | static void |
| 144 | __fmr_release(struct rpcrdma_mw *r) |
| 145 | { |
| 146 | int rc; |
| 147 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 148 | kfree(r->fmr.fm_physaddrs); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 149 | kfree(r->mw_sg); |
| 150 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 151 | rc = ib_dealloc_fmr(r->fmr.fm_mr); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 152 | if (rc) |
| 153 | pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n", |
| 154 | r, rc); |
| 155 | } |
| 156 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 157 | /* Deferred reset of a single FMR. Generate a fresh rkey by |
| 158 | * replacing the MR. There's no recovery if this fails. |
| 159 | */ |
| 160 | static void |
| 161 | __fmr_recovery_worker(struct work_struct *work) |
| 162 | { |
| 163 | struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw, |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 164 | mw_work); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 165 | |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 166 | __fmr_reset_and_unmap(mw); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 167 | return; |
| 168 | } |
| 169 | |
| 170 | /* A broken MR was discovered in a context that can't sleep. |
| 171 | * Defer recovery to the recovery worker. |
| 172 | */ |
| 173 | static void |
| 174 | __fmr_queue_recovery(struct rpcrdma_mw *mw) |
| 175 | { |
| 176 | INIT_WORK(&mw->mw_work, __fmr_recovery_worker); |
| 177 | queue_work(fmr_recovery_wq, &mw->mw_work); |
| 178 | } |
| 179 | |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 180 | static int |
| 181 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
| 182 | struct rpcrdma_create_data_internal *cdata) |
| 183 | { |
Chuck Lever | 302d3de | 2016-05-02 14:41:05 -0400 | [diff] [blame] | 184 | rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1, |
| 185 | RPCRDMA_MAX_DATA_SEGS / |
| 186 | RPCRDMA_MAX_FMR_SGES)); |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 187 | return 0; |
| 188 | } |
| 189 | |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 190 | /* FMR mode conveys up to 64 pages of payload per chunk segment. |
| 191 | */ |
| 192 | static size_t |
| 193 | fmr_op_maxpages(struct rpcrdma_xprt *r_xprt) |
| 194 | { |
| 195 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
Chuck Lever | 9493174 | 2016-05-02 14:40:56 -0400 | [diff] [blame] | 196 | RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES); |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 197 | } |
| 198 | |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 199 | static int |
| 200 | fmr_op_init(struct rpcrdma_xprt *r_xprt) |
| 201 | { |
| 202 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 203 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; |
| 204 | struct rpcrdma_mw *r; |
| 205 | int i, rc; |
| 206 | |
Chuck Lever | 58d1dcf | 2015-05-26 11:53:13 -0400 | [diff] [blame] | 207 | spin_lock_init(&buf->rb_mwlock); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 208 | INIT_LIST_HEAD(&buf->rb_mws); |
| 209 | INIT_LIST_HEAD(&buf->rb_all); |
| 210 | |
Chuck Lever | 40c6ed0 | 2015-05-26 11:53:33 -0400 | [diff] [blame] | 211 | i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1); |
| 212 | i += 2; /* head + tail */ |
| 213 | i *= buf->rb_max_requests; /* one set for each RPC slot */ |
| 214 | dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 215 | |
| 216 | while (i--) { |
| 217 | r = kzalloc(sizeof(*r), GFP_KERNEL); |
| 218 | if (!r) |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 219 | return -ENOMEM; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 220 | |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 221 | rc = __fmr_init(r, pd); |
| 222 | if (rc) { |
| 223 | kfree(r); |
| 224 | return rc; |
| 225 | } |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 226 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 227 | r->mw_xprt = r_xprt; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 228 | list_add(&r->mw_list, &buf->rb_mws); |
| 229 | list_add(&r->mw_all, &buf->rb_all); |
| 230 | } |
| 231 | return 0; |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 232 | } |
| 233 | |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 234 | /* Use the ib_map_phys_fmr() verb to register a memory region |
| 235 | * for remote access via RDMA READ or RDMA WRITE. |
| 236 | */ |
| 237 | static int |
| 238 | fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, |
| 239 | int nsegs, bool writing) |
| 240 | { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 241 | struct rpcrdma_mr_seg *seg1 = seg; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 242 | int len, pageoff, i, rc; |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 243 | struct rpcrdma_mw *mw; |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 244 | u64 *dma_pages; |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 245 | |
| 246 | mw = seg1->rl_mw; |
| 247 | seg1->rl_mw = NULL; |
| 248 | if (!mw) { |
| 249 | mw = rpcrdma_get_mw(r_xprt); |
| 250 | if (!mw) |
| 251 | return -ENOMEM; |
| 252 | } else { |
| 253 | /* this is a retransmit; generate a fresh rkey */ |
| 254 | rc = __fmr_unmap(mw); |
| 255 | if (rc) |
| 256 | return rc; |
| 257 | } |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 258 | |
| 259 | pageoff = offset_in_page(seg1->mr_offset); |
| 260 | seg1->mr_offset -= pageoff; /* start of page */ |
| 261 | seg1->mr_len += pageoff; |
| 262 | len = -pageoff; |
| 263 | if (nsegs > RPCRDMA_MAX_FMR_SGES) |
| 264 | nsegs = RPCRDMA_MAX_FMR_SGES; |
| 265 | for (i = 0; i < nsegs;) { |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 266 | if (seg->mr_page) |
| 267 | sg_set_page(&mw->mw_sg[i], |
| 268 | seg->mr_page, |
| 269 | seg->mr_len, |
| 270 | offset_in_page(seg->mr_offset)); |
| 271 | else |
| 272 | sg_set_buf(&mw->mw_sg[i], seg->mr_offset, |
| 273 | seg->mr_len); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 274 | len += seg->mr_len; |
| 275 | ++seg; |
| 276 | ++i; |
| 277 | /* Check for holes */ |
| 278 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || |
| 279 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) |
| 280 | break; |
| 281 | } |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 282 | mw->mw_nents = i; |
| 283 | mw->mw_dir = rpcrdma_data_dir(writing); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 284 | |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 285 | if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device, |
| 286 | mw->mw_sg, mw->mw_nents, mw->mw_dir)) |
| 287 | goto out_dmamap_err; |
| 288 | |
| 289 | for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++) |
| 290 | dma_pages[i] = sg_dma_address(&mw->mw_sg[i]); |
| 291 | rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents, |
| 292 | dma_pages[0]); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 293 | if (rc) |
| 294 | goto out_maperr; |
| 295 | |
Chuck Lever | fc7fbb5 | 2015-05-26 11:52:16 -0400 | [diff] [blame] | 296 | seg1->rl_mw = mw; |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 297 | seg1->mr_rkey = mw->fmr.fm_mr->rkey; |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 298 | seg1->mr_base = dma_pages[0] + pageoff; |
| 299 | seg1->mr_nsegs = mw->mw_nents; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 300 | seg1->mr_len = len; |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 301 | return mw->mw_nents; |
| 302 | |
| 303 | out_dmamap_err: |
| 304 | pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n", |
| 305 | mw->mw_sg, mw->mw_nents); |
| 306 | return -ENOMEM; |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 307 | |
| 308 | out_maperr: |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 309 | pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", |
| 310 | len, (unsigned long long)dma_pages[0], |
| 311 | pageoff, mw->mw_nents, rc); |
| 312 | __fmr_dma_unmap(mw); |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 313 | return rc; |
| 314 | } |
| 315 | |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 316 | /* Invalidate all memory regions that were registered for "req". |
| 317 | * |
| 318 | * Sleeps until it is safe for the host CPU to access the |
| 319 | * previously mapped memory regions. |
| 320 | */ |
| 321 | static void |
| 322 | fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
| 323 | { |
| 324 | struct rpcrdma_mr_seg *seg; |
| 325 | unsigned int i, nchunks; |
| 326 | struct rpcrdma_mw *mw; |
| 327 | LIST_HEAD(unmap_list); |
| 328 | int rc; |
| 329 | |
| 330 | dprintk("RPC: %s: req %p\n", __func__, req); |
| 331 | |
| 332 | /* ORDER: Invalidate all of the req's MRs first |
| 333 | * |
| 334 | * ib_unmap_fmr() is slow, so use a single call instead |
| 335 | * of one call per mapped MR. |
| 336 | */ |
| 337 | for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { |
| 338 | seg = &req->rl_segments[i]; |
| 339 | mw = seg->rl_mw; |
| 340 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 341 | list_add_tail(&mw->fmr.fm_mr->list, &unmap_list); |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 342 | |
| 343 | i += seg->mr_nsegs; |
| 344 | } |
| 345 | rc = ib_unmap_fmr(&unmap_list); |
| 346 | if (rc) |
| 347 | pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc); |
| 348 | |
| 349 | /* ORDER: Now DMA unmap all of the req's MRs, and return |
| 350 | * them to the free MW list. |
| 351 | */ |
| 352 | for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { |
| 353 | seg = &req->rl_segments[i]; |
Chuck Lever | 38f1932e | 2016-06-29 13:52:12 -0400 | [diff] [blame] | 354 | mw = seg->rl_mw; |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 355 | |
Chuck Lever | 88975eb | 2016-06-29 13:52:37 -0400 | [diff] [blame] | 356 | list_del_init(&mw->fmr.fm_mr->list); |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 357 | __fmr_dma_unmap(mw); |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 358 | |
| 359 | i += seg->mr_nsegs; |
| 360 | seg->mr_nsegs = 0; |
Chuck Lever | 763bc23 | 2016-05-02 14:42:38 -0400 | [diff] [blame] | 361 | seg->rl_mw = NULL; |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | req->rl_nchunks = 0; |
| 365 | } |
| 366 | |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 367 | /* Use a slow, safe mechanism to invalidate all memory regions |
| 368 | * that were registered for "req". |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 369 | */ |
| 370 | static void |
| 371 | fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, |
| 372 | bool sync) |
| 373 | { |
| 374 | struct rpcrdma_mr_seg *seg; |
| 375 | struct rpcrdma_mw *mw; |
| 376 | unsigned int i; |
| 377 | |
| 378 | for (i = 0; req->rl_nchunks; req->rl_nchunks--) { |
| 379 | seg = &req->rl_segments[i]; |
| 380 | mw = seg->rl_mw; |
| 381 | |
Chuck Lever | fcdfb96 | 2016-06-29 13:52:45 -0400 | [diff] [blame^] | 382 | if (sync) |
| 383 | __fmr_reset_and_unmap(mw); |
| 384 | else |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 385 | __fmr_queue_recovery(mw); |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 386 | |
| 387 | i += seg->mr_nsegs; |
| 388 | seg->mr_nsegs = 0; |
| 389 | seg->rl_mw = NULL; |
| 390 | } |
| 391 | } |
| 392 | |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 393 | static void |
| 394 | fmr_op_destroy(struct rpcrdma_buffer *buf) |
| 395 | { |
| 396 | struct rpcrdma_mw *r; |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 397 | |
| 398 | while (!list_empty(&buf->rb_all)) { |
| 399 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); |
| 400 | list_del(&r->mw_all); |
Chuck Lever | d48b1d2 | 2016-06-29 13:52:29 -0400 | [diff] [blame] | 401 | __fmr_release(r); |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 402 | kfree(r); |
| 403 | } |
| 404 | } |
| 405 | |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 406 | const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { |
Chuck Lever | 9c1b4d7 | 2015-03-30 14:34:39 -0400 | [diff] [blame] | 407 | .ro_map = fmr_op_map, |
Chuck Lever | 7c7a539 | 2015-12-16 17:22:55 -0500 | [diff] [blame] | 408 | .ro_unmap_sync = fmr_op_unmap_sync, |
Chuck Lever | ead3f26 | 2016-05-02 14:42:46 -0400 | [diff] [blame] | 409 | .ro_unmap_safe = fmr_op_unmap_safe, |
Chuck Lever | 3968cb5 | 2015-03-30 14:35:26 -0400 | [diff] [blame] | 410 | .ro_open = fmr_op_open, |
Chuck Lever | 1c9351e | 2015-03-30 14:34:30 -0400 | [diff] [blame] | 411 | .ro_maxpages = fmr_op_maxpages, |
Chuck Lever | 91e70e7 | 2015-03-30 14:34:58 -0400 | [diff] [blame] | 412 | .ro_init = fmr_op_init, |
Chuck Lever | 4561f34 | 2015-03-30 14:35:17 -0400 | [diff] [blame] | 413 | .ro_destroy = fmr_op_destroy, |
Chuck Lever | a0ce85f | 2015-03-30 14:34:21 -0400 | [diff] [blame] | 414 | .ro_displayname = "fmr", |
| 415 | }; |