blob: 31a45786137b4cce7da259a42a14f0761d64b641 [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
8 *
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
12 */
13
Chuck Leverc14d86e2015-05-26 11:52:35 -040014/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
19 * (frmr_op_unmap).
20 *
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
24 * interrupt workload.
25 *
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
30 *
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
35 */
36
37/* Transport recovery
38 *
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
44 * being done.
45 *
46 * When the underlying transport disconnects, MRs are left in one of
47 * three states:
48 *
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
51 *
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
54 *
55 * VALID: The MR was registered before the QP entered ERROR state.
56 *
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
63 *
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
68 */
69
Chuck Levera0ce85f2015-03-30 14:34:21 -040070#include "xprt_rdma.h"
71
72#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73# define RPCDBG_FACILITY RPCDBG_TRANS
74#endif
75
Chuck Lever951e7212015-05-26 11:52:25 -040076static struct workqueue_struct *frwr_recovery_wq;
77
78#define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM)
79
80int
81frwr_alloc_recovery_wq(void)
82{
83 frwr_recovery_wq = alloc_workqueue("frwr_recovery",
84 FRWR_RECOVERY_WQ_FLAGS, 0);
85 return !frwr_recovery_wq ? -ENOMEM : 0;
86}
87
88void
89frwr_destroy_recovery_wq(void)
90{
91 struct workqueue_struct *wq;
92
93 if (!frwr_recovery_wq)
94 return;
95
96 wq = frwr_recovery_wq;
97 frwr_recovery_wq = NULL;
98 destroy_workqueue(wq);
99}
100
101/* Deferred reset of a single FRMR. Generate a fresh rkey by
102 * replacing the MR.
103 *
104 * There's no recovery if this fails. The FRMR is abandoned, but
105 * remains in rb_all. It will be cleaned up when the transport is
106 * destroyed.
107 */
108static void
109__frwr_recovery_worker(struct work_struct *work)
110{
111 struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
112 r.frmr.fr_work);
113 struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt;
114 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
115 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
116
117 if (ib_dereg_mr(r->r.frmr.fr_mr))
118 goto out_fail;
119
Sagi Grimberg0410e382015-07-30 10:32:39 +0300120 r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
Chuck Lever951e7212015-05-26 11:52:25 -0400121 if (IS_ERR(r->r.frmr.fr_mr))
122 goto out_fail;
123
124 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
125 r->r.frmr.fr_state = FRMR_IS_INVALID;
126 rpcrdma_put_mw(r_xprt, r);
127 return;
128
129out_fail:
130 pr_warn("RPC: %s: FRMR %p unrecovered\n",
131 __func__, r);
132}
133
134/* A broken MR was discovered in a context that can't sleep.
135 * Defer recovery to the recovery worker.
136 */
137static void
138__frwr_queue_recovery(struct rpcrdma_mw *r)
139{
140 INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker);
141 queue_work(frwr_recovery_wq, &r->r.frmr.fr_work);
142}
143
Chuck Lever91e70e72015-03-30 14:34:58 -0400144static int
145__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
146 unsigned int depth)
147{
148 struct rpcrdma_frmr *f = &r->r.frmr;
149 int rc;
150
Sagi Grimberg0410e382015-07-30 10:32:39 +0300151 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
Chuck Lever91e70e72015-03-30 14:34:58 -0400152 if (IS_ERR(f->fr_mr))
153 goto out_mr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300154
155 f->sg = kcalloc(depth, sizeof(*f->sg), GFP_KERNEL);
156 if (!f->sg)
Chuck Lever91e70e72015-03-30 14:34:58 -0400157 goto out_list_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300158
159 sg_init_table(f->sg, depth);
160
Chuck Lever91e70e72015-03-30 14:34:58 -0400161 return 0;
162
163out_mr_err:
164 rc = PTR_ERR(f->fr_mr);
Sagi Grimberg0410e382015-07-30 10:32:39 +0300165 dprintk("RPC: %s: ib_alloc_mr status %i\n",
Chuck Lever91e70e72015-03-30 14:34:58 -0400166 __func__, rc);
167 return rc;
168
169out_list_err:
Sagi Grimberg4143f342015-10-13 19:11:35 +0300170 rc = -ENOMEM;
171 dprintk("RPC: %s: sg allocation failure\n",
172 __func__);
Chuck Lever91e70e72015-03-30 14:34:58 -0400173 ib_dereg_mr(f->fr_mr);
174 return rc;
175}
176
Chuck Lever31a701a2015-03-30 14:35:07 -0400177static void
178__frwr_release(struct rpcrdma_mw *r)
179{
180 int rc;
181
182 rc = ib_dereg_mr(r->r.frmr.fr_mr);
183 if (rc)
184 dprintk("RPC: %s: ib_dereg_mr status %i\n",
185 __func__, rc);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300186 kfree(r->r.frmr.sg);
Chuck Lever31a701a2015-03-30 14:35:07 -0400187}
188
Chuck Lever3968cb52015-03-30 14:35:26 -0400189static int
190frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
191 struct rpcrdma_create_data_internal *cdata)
192{
193 struct ib_device_attr *devattr = &ia->ri_devattr;
194 int depth, delta;
195
196 ia->ri_max_frmr_depth =
197 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
198 devattr->max_fast_reg_page_list_len);
199 dprintk("RPC: %s: device's max FR page list len = %u\n",
200 __func__, ia->ri_max_frmr_depth);
201
202 /* Add room for frmr register and invalidate WRs.
203 * 1. FRMR reg WR for head
204 * 2. FRMR invalidate WR for head
205 * 3. N FRMR reg WRs for pagelist
206 * 4. N FRMR invalidate WRs for pagelist
207 * 5. FRMR reg WR for tail
208 * 6. FRMR invalidate WR for tail
209 * 7. The RDMA_SEND WR
210 */
211 depth = 7;
212
213 /* Calculate N if the device max FRMR depth is smaller than
214 * RPCRDMA_MAX_DATA_SEGS.
215 */
216 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
217 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
218 do {
219 depth += 2; /* FRMR reg + invalidate */
220 delta -= ia->ri_max_frmr_depth;
221 } while (delta > 0);
222 }
223
224 ep->rep_attr.cap.max_send_wr *= depth;
225 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
226 cdata->max_requests = devattr->max_qp_wr / depth;
227 if (!cdata->max_requests)
228 return -EINVAL;
229 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
230 depth;
231 }
232
233 return 0;
234}
235
Chuck Lever1c9351e2015-03-30 14:34:30 -0400236/* FRWR mode conveys a list of pages per chunk segment. The
237 * maximum length of that list is the FRWR page list depth.
238 */
239static size_t
240frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
241{
242 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
243
244 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
245 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
246}
247
Chuck Levere46ac342015-03-30 14:35:35 -0400248/* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */
249static void
250frwr_sendcompletion(struct ib_wc *wc)
251{
252 struct rpcrdma_mw *r;
253
254 if (likely(wc->status == IB_WC_SUCCESS))
255 return;
256
257 /* WARNING: Only wr_id and status are reliable at this point */
258 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
Steve Wise86105862015-09-21 12:24:34 -0500259 if (wc->status == IB_WC_WR_FLUSH_ERR)
260 dprintk("RPC: %s: frmr %p flushed\n", __func__, r);
261 else
262 pr_warn("RPC: %s: frmr %p error, status %s (%d)\n",
263 __func__, r, ib_wc_status_msg(wc->status), wc->status);
Chuck Levere46ac342015-03-30 14:35:35 -0400264 r->r.frmr.fr_state = FRMR_IS_STALE;
265}
266
Chuck Lever91e70e72015-03-30 14:34:58 -0400267static int
268frwr_op_init(struct rpcrdma_xprt *r_xprt)
269{
270 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400271 struct ib_device *device = r_xprt->rx_ia.ri_device;
Chuck Lever91e70e72015-03-30 14:34:58 -0400272 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
273 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
274 int i;
275
Chuck Lever58d1dcf2015-05-26 11:53:13 -0400276 spin_lock_init(&buf->rb_mwlock);
Chuck Lever91e70e72015-03-30 14:34:58 -0400277 INIT_LIST_HEAD(&buf->rb_mws);
278 INIT_LIST_HEAD(&buf->rb_all);
279
Chuck Lever40c6ed02015-05-26 11:53:33 -0400280 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
281 i += 2; /* head + tail */
282 i *= buf->rb_max_requests; /* one set for each RPC slot */
283 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
Chuck Lever91e70e72015-03-30 14:34:58 -0400284
285 while (i--) {
286 struct rpcrdma_mw *r;
287 int rc;
288
289 r = kzalloc(sizeof(*r), GFP_KERNEL);
290 if (!r)
291 return -ENOMEM;
292
293 rc = __frwr_init(r, pd, device, depth);
294 if (rc) {
295 kfree(r);
296 return rc;
297 }
298
299 list_add(&r->mw_list, &buf->rb_mws);
300 list_add(&r->mw_all, &buf->rb_all);
Chuck Levere46ac342015-03-30 14:35:35 -0400301 r->mw_sendcompletion = frwr_sendcompletion;
Chuck Lever951e7212015-05-26 11:52:25 -0400302 r->r.frmr.fr_xprt = r_xprt;
Chuck Lever91e70e72015-03-30 14:34:58 -0400303 }
304
305 return 0;
306}
307
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400308/* Post a FAST_REG Work Request to register a memory region
309 * for remote access via RDMA READ or RDMA WRITE.
310 */
311static int
312frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
313 int nsegs, bool writing)
314{
315 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400316 struct ib_device *device = ia->ri_device;
Chuck Leverd6547882015-03-30 14:35:44 -0400317 enum dma_data_direction direction = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400318 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400319 struct rpcrdma_mw *mw;
320 struct rpcrdma_frmr *frmr;
321 struct ib_mr *mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500322 struct ib_reg_wr *reg_wr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100323 struct ib_send_wr *bad_wr;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300324 int rc, i, n, dma_nents;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400325 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400326
Chuck Leverc14d86e2015-05-26 11:52:35 -0400327 mw = seg1->rl_mw;
328 seg1->rl_mw = NULL;
329 do {
330 if (mw)
331 __frwr_queue_recovery(mw);
332 mw = rpcrdma_get_mw(r_xprt);
333 if (!mw)
334 return -ENOMEM;
335 } while (mw->r.frmr.fr_state != FRMR_IS_INVALID);
336 frmr = &mw->r.frmr;
337 frmr->fr_state = FRMR_IS_VALID;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300338 mr = frmr->fr_mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500339 reg_wr = &frmr->fr_regwr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400340
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400341 if (nsegs > ia->ri_max_frmr_depth)
342 nsegs = ia->ri_max_frmr_depth;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400343
Sagi Grimberg4143f342015-10-13 19:11:35 +0300344 for (i = 0; i < nsegs;) {
345 if (seg->mr_page)
346 sg_set_page(&frmr->sg[i],
347 seg->mr_page,
348 seg->mr_len,
349 offset_in_page(seg->mr_offset));
350 else
351 sg_set_buf(&frmr->sg[i], seg->mr_offset,
352 seg->mr_len);
353
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400354 ++seg;
355 ++i;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300356
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400357 /* Check for holes */
358 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
359 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
360 break;
361 }
Sagi Grimberg4143f342015-10-13 19:11:35 +0300362 frmr->sg_nents = i;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400363
Sagi Grimberg4143f342015-10-13 19:11:35 +0300364 dma_nents = ib_dma_map_sg(device, frmr->sg, frmr->sg_nents, direction);
365 if (!dma_nents) {
366 pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n",
367 __func__, frmr->sg, frmr->sg_nents);
368 return -ENOMEM;
369 }
370
371 n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, PAGE_SIZE);
372 if (unlikely(n != frmr->sg_nents)) {
373 pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
374 __func__, frmr->fr_mr, n, frmr->sg_nents);
375 rc = n < 0 ? n : -EINVAL;
376 goto out_senderr;
377 }
378
379 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
380 __func__, mw, frmr->sg_nents, mr->length);
381
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400382 key = (u8)(mr->rkey & 0x000000FF);
383 ib_update_fast_reg_key(mr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300384
Chuck Lever3cf4e162015-12-16 17:22:31 -0500385 reg_wr->wr.next = NULL;
386 reg_wr->wr.opcode = IB_WR_REG_MR;
387 reg_wr->wr.wr_id = (uintptr_t)mw;
388 reg_wr->wr.num_sge = 0;
389 reg_wr->wr.send_flags = 0;
390 reg_wr->mr = mr;
391 reg_wr->key = mr->rkey;
392 reg_wr->access = writing ?
393 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
394 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400395
396 DECR_CQCOUNT(&r_xprt->rx_ep);
Chuck Lever3cf4e162015-12-16 17:22:31 -0500397 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400398 if (rc)
399 goto out_senderr;
400
Sagi Grimberg4143f342015-10-13 19:11:35 +0300401 seg1->mr_dir = direction;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400402 seg1->rl_mw = mw;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400403 seg1->mr_rkey = mr->rkey;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300404 seg1->mr_base = mr->iova;
405 seg1->mr_nsegs = frmr->sg_nents;
406 seg1->mr_len = mr->length;
407
408 return frmr->sg_nents;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400409
410out_senderr:
411 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300412 ib_dma_unmap_sg(device, frmr->sg, dma_nents, direction);
Chuck Leverc14d86e2015-05-26 11:52:35 -0400413 __frwr_queue_recovery(mw);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400414 return rc;
415}
416
Chuck Lever6814bae2015-03-30 14:34:48 -0400417/* Post a LOCAL_INV Work Request to prevent further remote access
418 * via RDMA READ or RDMA WRITE.
419 */
420static int
421frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
422{
423 struct rpcrdma_mr_seg *seg1 = seg;
424 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400425 struct rpcrdma_mw *mw = seg1->rl_mw;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300426 struct rpcrdma_frmr *frmr = &mw->r.frmr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500427 struct ib_send_wr *invalidate_wr, *bad_wr;
Chuck Lever6814bae2015-03-30 14:34:48 -0400428 int rc, nsegs = seg->mr_nsegs;
429
Chuck Leverc14d86e2015-05-26 11:52:35 -0400430 dprintk("RPC: %s: FRMR %p\n", __func__, mw);
431
432 seg1->rl_mw = NULL;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300433 frmr->fr_state = FRMR_IS_INVALID;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500434 invalidate_wr = &mw->r.frmr.fr_invwr;
Chuck Lever6814bae2015-03-30 14:34:48 -0400435
Chuck Lever3cf4e162015-12-16 17:22:31 -0500436 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
437 invalidate_wr->wr_id = (uintptr_t)mw;
438 invalidate_wr->opcode = IB_WR_LOCAL_INV;
439 invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey;
Chuck Lever6814bae2015-03-30 14:34:48 -0400440 DECR_CQCOUNT(&r_xprt->rx_ep);
441
Sagi Grimberg4143f342015-10-13 19:11:35 +0300442 ib_dma_unmap_sg(ia->ri_device, frmr->sg, frmr->sg_nents, seg1->mr_dir);
Chuck Lever89e0d1122015-05-26 11:51:56 -0400443 read_lock(&ia->ri_qplock);
Chuck Lever3cf4e162015-12-16 17:22:31 -0500444 rc = ib_post_send(ia->ri_id->qp, invalidate_wr, &bad_wr);
Chuck Lever6814bae2015-03-30 14:34:48 -0400445 read_unlock(&ia->ri_qplock);
446 if (rc)
447 goto out_err;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400448
449 rpcrdma_put_mw(r_xprt, mw);
Chuck Lever6814bae2015-03-30 14:34:48 -0400450 return nsegs;
451
452out_err:
Chuck Lever6814bae2015-03-30 14:34:48 -0400453 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
Chuck Leverc14d86e2015-05-26 11:52:35 -0400454 __frwr_queue_recovery(mw);
Chuck Lever6814bae2015-03-30 14:34:48 -0400455 return nsegs;
456}
457
Chuck Lever4561f342015-03-30 14:35:17 -0400458static void
459frwr_op_destroy(struct rpcrdma_buffer *buf)
460{
461 struct rpcrdma_mw *r;
462
Chuck Leverc14d86e2015-05-26 11:52:35 -0400463 /* Ensure stale MWs for "buf" are no longer in flight */
464 flush_workqueue(frwr_recovery_wq);
465
Chuck Lever4561f342015-03-30 14:35:17 -0400466 while (!list_empty(&buf->rb_all)) {
467 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
468 list_del(&r->mw_all);
469 __frwr_release(r);
470 kfree(r);
471 }
472}
473
Chuck Levera0ce85f2015-03-30 14:34:21 -0400474const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400475 .ro_map = frwr_op_map,
Chuck Lever6814bae2015-03-30 14:34:48 -0400476 .ro_unmap = frwr_op_unmap,
Chuck Lever3968cb52015-03-30 14:35:26 -0400477 .ro_open = frwr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400478 .ro_maxpages = frwr_op_maxpages,
Chuck Lever91e70e72015-03-30 14:34:58 -0400479 .ro_init = frwr_op_init,
Chuck Lever4561f342015-03-30 14:35:17 -0400480 .ro_destroy = frwr_op_destroy,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400481 .ro_displayname = "frwr",
482};