blob: ce245dc4acabc8c67184c6577ef4e5000826ac4a [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
8 *
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
12 */
13
Chuck Leverc14d86e2015-05-26 11:52:35 -040014/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
19 * (frmr_op_unmap).
20 *
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
24 * interrupt workload.
25 *
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
30 *
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
35 */
36
37/* Transport recovery
38 *
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
44 * being done.
45 *
46 * When the underlying transport disconnects, MRs are left in one of
47 * three states:
48 *
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
51 *
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
54 *
55 * VALID: The MR was registered before the QP entered ERROR state.
56 *
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
63 *
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
68 */
69
Chuck Levera0ce85f2015-03-30 14:34:21 -040070#include "xprt_rdma.h"
71
72#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73# define RPCDBG_FACILITY RPCDBG_TRANS
74#endif
75
Chuck Lever951e7212015-05-26 11:52:25 -040076static struct workqueue_struct *frwr_recovery_wq;
77
78#define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM)
79
80int
81frwr_alloc_recovery_wq(void)
82{
83 frwr_recovery_wq = alloc_workqueue("frwr_recovery",
84 FRWR_RECOVERY_WQ_FLAGS, 0);
85 return !frwr_recovery_wq ? -ENOMEM : 0;
86}
87
88void
89frwr_destroy_recovery_wq(void)
90{
91 struct workqueue_struct *wq;
92
93 if (!frwr_recovery_wq)
94 return;
95
96 wq = frwr_recovery_wq;
97 frwr_recovery_wq = NULL;
98 destroy_workqueue(wq);
99}
100
Chuck Leverd7a21c12016-05-02 14:42:12 -0400101static int
102__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
103{
104 struct rpcrdma_frmr *f = &r->frmr;
105 int rc;
106
107 rc = ib_dereg_mr(f->fr_mr);
108 if (rc) {
109 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
110 rc, r);
111 return rc;
112 }
113
114 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
115 ia->ri_max_frmr_depth);
116 if (IS_ERR(f->fr_mr)) {
117 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
118 PTR_ERR(f->fr_mr), r);
119 return PTR_ERR(f->fr_mr);
120 }
121
122 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
123 f->fr_state = FRMR_IS_INVALID;
124 return 0;
125}
126
Chuck Lever951e7212015-05-26 11:52:25 -0400127/* Deferred reset of a single FRMR. Generate a fresh rkey by
128 * replacing the MR.
129 *
130 * There's no recovery if this fails. The FRMR is abandoned, but
131 * remains in rb_all. It will be cleaned up when the transport is
132 * destroyed.
133 */
134static void
135__frwr_recovery_worker(struct work_struct *work)
136{
137 struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
Chuck Leverc882a652016-03-04 11:28:45 -0500138 frmr.fr_work);
139 struct rpcrdma_xprt *r_xprt = r->frmr.fr_xprt;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400140 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
141 int rc;
Chuck Lever951e7212015-05-26 11:52:25 -0400142
Chuck Leverd7a21c12016-05-02 14:42:12 -0400143 rc = __frwr_reset_mr(ia, r);
144 if (rc)
145 return;
Chuck Lever951e7212015-05-26 11:52:25 -0400146
Chuck Lever951e7212015-05-26 11:52:25 -0400147 rpcrdma_put_mw(r_xprt, r);
148 return;
Chuck Lever951e7212015-05-26 11:52:25 -0400149}
150
151/* A broken MR was discovered in a context that can't sleep.
152 * Defer recovery to the recovery worker.
153 */
154static void
155__frwr_queue_recovery(struct rpcrdma_mw *r)
156{
Chuck Leverc882a652016-03-04 11:28:45 -0500157 INIT_WORK(&r->frmr.fr_work, __frwr_recovery_worker);
158 queue_work(frwr_recovery_wq, &r->frmr.fr_work);
Chuck Lever951e7212015-05-26 11:52:25 -0400159}
160
Chuck Lever91e70e72015-03-30 14:34:58 -0400161static int
162__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
163 unsigned int depth)
164{
Chuck Leverc882a652016-03-04 11:28:45 -0500165 struct rpcrdma_frmr *f = &r->frmr;
Chuck Lever91e70e72015-03-30 14:34:58 -0400166 int rc;
167
Sagi Grimberg0410e382015-07-30 10:32:39 +0300168 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
Chuck Lever91e70e72015-03-30 14:34:58 -0400169 if (IS_ERR(f->fr_mr))
170 goto out_mr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300171
Chuck Lever55fdfce2016-05-02 14:41:56 -0400172 f->fr_sg = kcalloc(depth, sizeof(*f->fr_sg), GFP_KERNEL);
173 if (!f->fr_sg)
Chuck Lever91e70e72015-03-30 14:34:58 -0400174 goto out_list_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300175
Chuck Lever55fdfce2016-05-02 14:41:56 -0400176 sg_init_table(f->fr_sg, depth);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300177
Chuck Lever2fa8f882016-03-04 11:28:53 -0500178 init_completion(&f->fr_linv_done);
179
Chuck Lever91e70e72015-03-30 14:34:58 -0400180 return 0;
181
182out_mr_err:
183 rc = PTR_ERR(f->fr_mr);
Sagi Grimberg0410e382015-07-30 10:32:39 +0300184 dprintk("RPC: %s: ib_alloc_mr status %i\n",
Chuck Lever91e70e72015-03-30 14:34:58 -0400185 __func__, rc);
186 return rc;
187
188out_list_err:
Sagi Grimberg4143f342015-10-13 19:11:35 +0300189 rc = -ENOMEM;
190 dprintk("RPC: %s: sg allocation failure\n",
191 __func__);
Chuck Lever91e70e72015-03-30 14:34:58 -0400192 ib_dereg_mr(f->fr_mr);
193 return rc;
194}
195
Chuck Lever31a701a2015-03-30 14:35:07 -0400196static void
197__frwr_release(struct rpcrdma_mw *r)
198{
199 int rc;
200
Chuck Leverc882a652016-03-04 11:28:45 -0500201 rc = ib_dereg_mr(r->frmr.fr_mr);
Chuck Lever31a701a2015-03-30 14:35:07 -0400202 if (rc)
203 dprintk("RPC: %s: ib_dereg_mr status %i\n",
204 __func__, rc);
Chuck Lever55fdfce2016-05-02 14:41:56 -0400205 kfree(r->frmr.fr_sg);
Chuck Lever31a701a2015-03-30 14:35:07 -0400206}
207
Chuck Lever3968cb52015-03-30 14:35:26 -0400208static int
209frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
210 struct rpcrdma_create_data_internal *cdata)
211{
Chuck Lever3968cb52015-03-30 14:35:26 -0400212 int depth, delta;
213
214 ia->ri_max_frmr_depth =
215 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Or Gerlitze3e45b12015-12-18 10:59:48 +0200216 ia->ri_device->attrs.max_fast_reg_page_list_len);
Chuck Lever3968cb52015-03-30 14:35:26 -0400217 dprintk("RPC: %s: device's max FR page list len = %u\n",
218 __func__, ia->ri_max_frmr_depth);
219
220 /* Add room for frmr register and invalidate WRs.
221 * 1. FRMR reg WR for head
222 * 2. FRMR invalidate WR for head
223 * 3. N FRMR reg WRs for pagelist
224 * 4. N FRMR invalidate WRs for pagelist
225 * 5. FRMR reg WR for tail
226 * 6. FRMR invalidate WR for tail
227 * 7. The RDMA_SEND WR
228 */
229 depth = 7;
230
231 /* Calculate N if the device max FRMR depth is smaller than
232 * RPCRDMA_MAX_DATA_SEGS.
233 */
234 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
235 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
236 do {
237 depth += 2; /* FRMR reg + invalidate */
238 delta -= ia->ri_max_frmr_depth;
239 } while (delta > 0);
240 }
241
242 ep->rep_attr.cap.max_send_wr *= depth;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200243 if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
244 cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400245 if (!cdata->max_requests)
246 return -EINVAL;
247 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
248 depth;
249 }
250
Chuck Lever302d3de2016-05-02 14:41:05 -0400251 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
252 RPCRDMA_MAX_DATA_SEGS /
253 ia->ri_max_frmr_depth));
Chuck Lever3968cb52015-03-30 14:35:26 -0400254 return 0;
255}
256
Chuck Lever1c9351e2015-03-30 14:34:30 -0400257/* FRWR mode conveys a list of pages per chunk segment. The
258 * maximum length of that list is the FRWR page list depth.
259 */
260static size_t
261frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
262{
263 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
264
265 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever94931742016-05-02 14:40:56 -0400266 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400267}
268
Chuck Levere46ac342015-03-30 14:35:35 -0400269static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500270__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
271 const char *wr)
Chuck Levere46ac342015-03-30 14:35:35 -0400272{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500273 frmr->fr_state = FRMR_IS_STALE;
274 if (wc->status != IB_WC_WR_FLUSH_ERR)
275 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
276 wr, ib_wc_status_msg(wc->status),
277 wc->status, wc->vendor_err);
Chuck Levere46ac342015-03-30 14:35:35 -0400278}
279
Chuck Lever2fa8f882016-03-04 11:28:53 -0500280/**
281 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
282 * @cq: completion queue (ignored)
283 * @wc: completed WR
284 *
285 */
Chuck Leverc9918ff2015-12-16 17:22:47 -0500286static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500287frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500288{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500289 struct rpcrdma_frmr *frmr;
290 struct ib_cqe *cqe;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500291
Chuck Lever2fa8f882016-03-04 11:28:53 -0500292 /* WARNING: Only wr_cqe and status are reliable at this point */
293 if (wc->status != IB_WC_SUCCESS) {
294 cqe = wc->wr_cqe;
295 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
296 __frwr_sendcompletion_flush(wc, frmr, "fastreg");
297 }
298}
Chuck Leverc9918ff2015-12-16 17:22:47 -0500299
Chuck Lever2fa8f882016-03-04 11:28:53 -0500300/**
301 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
302 * @cq: completion queue (ignored)
303 * @wc: completed WR
304 *
305 */
306static void
307frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
308{
309 struct rpcrdma_frmr *frmr;
310 struct ib_cqe *cqe;
311
312 /* WARNING: Only wr_cqe and status are reliable at this point */
313 if (wc->status != IB_WC_SUCCESS) {
314 cqe = wc->wr_cqe;
315 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
316 __frwr_sendcompletion_flush(wc, frmr, "localinv");
317 }
318}
319
320/**
321 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
322 * @cq: completion queue (ignored)
323 * @wc: completed WR
324 *
325 * Awaken anyone waiting for an MR to finish being fenced.
326 */
327static void
328frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
329{
330 struct rpcrdma_frmr *frmr;
331 struct ib_cqe *cqe;
332
333 /* WARNING: Only wr_cqe and status are reliable at this point */
334 cqe = wc->wr_cqe;
335 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
336 if (wc->status != IB_WC_SUCCESS)
337 __frwr_sendcompletion_flush(wc, frmr, "localinv");
338 complete_all(&frmr->fr_linv_done);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500339}
340
Chuck Lever91e70e72015-03-30 14:34:58 -0400341static int
342frwr_op_init(struct rpcrdma_xprt *r_xprt)
343{
344 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400345 struct ib_device *device = r_xprt->rx_ia.ri_device;
Chuck Lever91e70e72015-03-30 14:34:58 -0400346 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
347 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
348 int i;
349
Chuck Lever58d1dcf2015-05-26 11:53:13 -0400350 spin_lock_init(&buf->rb_mwlock);
Chuck Lever91e70e72015-03-30 14:34:58 -0400351 INIT_LIST_HEAD(&buf->rb_mws);
352 INIT_LIST_HEAD(&buf->rb_all);
353
Chuck Lever40c6ed02015-05-26 11:53:33 -0400354 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
355 i += 2; /* head + tail */
356 i *= buf->rb_max_requests; /* one set for each RPC slot */
357 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
Chuck Lever91e70e72015-03-30 14:34:58 -0400358
359 while (i--) {
360 struct rpcrdma_mw *r;
361 int rc;
362
363 r = kzalloc(sizeof(*r), GFP_KERNEL);
364 if (!r)
365 return -ENOMEM;
366
367 rc = __frwr_init(r, pd, device, depth);
368 if (rc) {
369 kfree(r);
370 return rc;
371 }
372
373 list_add(&r->mw_list, &buf->rb_mws);
374 list_add(&r->mw_all, &buf->rb_all);
Chuck Leverc882a652016-03-04 11:28:45 -0500375 r->frmr.fr_xprt = r_xprt;
Chuck Lever91e70e72015-03-30 14:34:58 -0400376 }
377
378 return 0;
379}
380
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400381/* Post a FAST_REG Work Request to register a memory region
382 * for remote access via RDMA READ or RDMA WRITE.
383 */
384static int
385frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
386 int nsegs, bool writing)
387{
388 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400389 struct ib_device *device = ia->ri_device;
Chuck Leverd6547882015-03-30 14:35:44 -0400390 enum dma_data_direction direction = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400391 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400392 struct rpcrdma_mw *mw;
393 struct rpcrdma_frmr *frmr;
394 struct ib_mr *mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500395 struct ib_reg_wr *reg_wr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100396 struct ib_send_wr *bad_wr;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300397 int rc, i, n, dma_nents;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400398 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400399
Chuck Leverc14d86e2015-05-26 11:52:35 -0400400 mw = seg1->rl_mw;
401 seg1->rl_mw = NULL;
402 do {
403 if (mw)
404 __frwr_queue_recovery(mw);
405 mw = rpcrdma_get_mw(r_xprt);
406 if (!mw)
407 return -ENOMEM;
Chuck Leverc882a652016-03-04 11:28:45 -0500408 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
409 frmr = &mw->frmr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400410 frmr->fr_state = FRMR_IS_VALID;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300411 mr = frmr->fr_mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500412 reg_wr = &frmr->fr_regwr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400413
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400414 if (nsegs > ia->ri_max_frmr_depth)
415 nsegs = ia->ri_max_frmr_depth;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400416
Sagi Grimberg4143f342015-10-13 19:11:35 +0300417 for (i = 0; i < nsegs;) {
418 if (seg->mr_page)
Chuck Lever55fdfce2016-05-02 14:41:56 -0400419 sg_set_page(&frmr->fr_sg[i],
Sagi Grimberg4143f342015-10-13 19:11:35 +0300420 seg->mr_page,
421 seg->mr_len,
422 offset_in_page(seg->mr_offset));
423 else
Chuck Lever55fdfce2016-05-02 14:41:56 -0400424 sg_set_buf(&frmr->fr_sg[i], seg->mr_offset,
Sagi Grimberg4143f342015-10-13 19:11:35 +0300425 seg->mr_len);
426
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400427 ++seg;
428 ++i;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300429
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400430 /* Check for holes */
431 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
432 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
433 break;
434 }
Chuck Lever55fdfce2016-05-02 14:41:56 -0400435 frmr->fr_nents = i;
Chuck Levera3aa8b22016-05-02 14:42:04 -0400436 frmr->fr_dir = direction;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400437
Chuck Lever55fdfce2016-05-02 14:41:56 -0400438 dma_nents = ib_dma_map_sg(device, frmr->fr_sg, frmr->fr_nents, direction);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300439 if (!dma_nents) {
440 pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n",
Chuck Lever55fdfce2016-05-02 14:41:56 -0400441 __func__, frmr->fr_sg, frmr->fr_nents);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300442 return -ENOMEM;
443 }
444
Chuck Lever55fdfce2016-05-02 14:41:56 -0400445 n = ib_map_mr_sg(mr, frmr->fr_sg, frmr->fr_nents, PAGE_SIZE);
446 if (unlikely(n != frmr->fr_nents)) {
Sagi Grimberg4143f342015-10-13 19:11:35 +0300447 pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
Chuck Lever55fdfce2016-05-02 14:41:56 -0400448 __func__, frmr->fr_mr, n, frmr->fr_nents);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300449 rc = n < 0 ? n : -EINVAL;
450 goto out_senderr;
451 }
452
453 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
Chuck Lever55fdfce2016-05-02 14:41:56 -0400454 __func__, mw, frmr->fr_nents, mr->length);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300455
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400456 key = (u8)(mr->rkey & 0x000000FF);
457 ib_update_fast_reg_key(mr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300458
Chuck Lever3cf4e162015-12-16 17:22:31 -0500459 reg_wr->wr.next = NULL;
460 reg_wr->wr.opcode = IB_WR_REG_MR;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500461 frmr->fr_cqe.done = frwr_wc_fastreg;
462 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500463 reg_wr->wr.num_sge = 0;
464 reg_wr->wr.send_flags = 0;
465 reg_wr->mr = mr;
466 reg_wr->key = mr->rkey;
467 reg_wr->access = writing ?
468 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
469 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400470
471 DECR_CQCOUNT(&r_xprt->rx_ep);
Chuck Lever3cf4e162015-12-16 17:22:31 -0500472 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400473 if (rc)
474 goto out_senderr;
475
Chuck Leverc14d86e2015-05-26 11:52:35 -0400476 seg1->rl_mw = mw;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400477 seg1->mr_rkey = mr->rkey;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300478 seg1->mr_base = mr->iova;
Chuck Lever55fdfce2016-05-02 14:41:56 -0400479 seg1->mr_nsegs = frmr->fr_nents;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300480 seg1->mr_len = mr->length;
481
Chuck Lever55fdfce2016-05-02 14:41:56 -0400482 return frmr->fr_nents;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400483
484out_senderr:
485 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
Chuck Lever55fdfce2016-05-02 14:41:56 -0400486 ib_dma_unmap_sg(device, frmr->fr_sg, dma_nents, direction);
Chuck Leverc14d86e2015-05-26 11:52:35 -0400487 __frwr_queue_recovery(mw);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400488 return rc;
489}
490
Chuck Leverc9918ff2015-12-16 17:22:47 -0500491static struct ib_send_wr *
492__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
493{
494 struct rpcrdma_mw *mw = seg->rl_mw;
Chuck Leverc882a652016-03-04 11:28:45 -0500495 struct rpcrdma_frmr *f = &mw->frmr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500496 struct ib_send_wr *invalidate_wr;
497
Chuck Leverc9918ff2015-12-16 17:22:47 -0500498 f->fr_state = FRMR_IS_INVALID;
499 invalidate_wr = &f->fr_invwr;
500
501 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
Chuck Lever2fa8f882016-03-04 11:28:53 -0500502 f->fr_cqe.done = frwr_wc_localinv;
503 invalidate_wr->wr_cqe = &f->fr_cqe;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500504 invalidate_wr->opcode = IB_WR_LOCAL_INV;
505 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
506
507 return invalidate_wr;
508}
509
Chuck Leverc9918ff2015-12-16 17:22:47 -0500510/* Invalidate all memory regions that were registered for "req".
511 *
512 * Sleeps until it is safe for the host CPU to access the
513 * previously mapped memory regions.
514 */
515static void
516frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
517{
518 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
519 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
520 struct rpcrdma_mr_seg *seg;
521 unsigned int i, nchunks;
522 struct rpcrdma_frmr *f;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400523 struct rpcrdma_mw *mw;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500524 int rc;
525
526 dprintk("RPC: %s: req %p\n", __func__, req);
527
528 /* ORDER: Invalidate all of the req's MRs first
529 *
530 * Chain the LOCAL_INV Work Requests and post them with
531 * a single ib_post_send() call.
532 */
533 invalidate_wrs = pos = prev = NULL;
534 seg = NULL;
535 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
536 seg = &req->rl_segments[i];
537
538 pos = __frwr_prepare_linv_wr(seg);
539
540 if (!invalidate_wrs)
541 invalidate_wrs = pos;
542 else
543 prev->next = pos;
544 prev = pos;
545
546 i += seg->mr_nsegs;
547 }
Chuck Leverc882a652016-03-04 11:28:45 -0500548 f = &seg->rl_mw->frmr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500549
550 /* Strong send queue ordering guarantees that when the
551 * last WR in the chain completes, all WRs in the chain
552 * are complete.
553 */
554 f->fr_invwr.send_flags = IB_SEND_SIGNALED;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500555 f->fr_cqe.done = frwr_wc_localinv_wake;
556 reinit_completion(&f->fr_linv_done);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500557 INIT_CQCOUNT(&r_xprt->rx_ep);
558
559 /* Transport disconnect drains the receive CQ before it
560 * replaces the QP. The RPC reply handler won't call us
561 * unless ri_id->qp is a valid pointer.
562 */
563 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400564 if (rc)
565 goto reset_mrs;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500566
567 wait_for_completion(&f->fr_linv_done);
568
569 /* ORDER: Now DMA unmap all of the req's MRs, and return
570 * them to the free MW list.
571 */
Chuck Leverb892a692016-03-04 11:28:01 -0500572unmap:
Chuck Leverc9918ff2015-12-16 17:22:47 -0500573 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
574 seg = &req->rl_segments[i];
Chuck Leverd7a21c12016-05-02 14:42:12 -0400575 mw = seg->rl_mw;
576 seg->rl_mw = NULL;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500577
Chuck Leverd7a21c12016-05-02 14:42:12 -0400578 ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents,
579 f->fr_dir);
580 rpcrdma_put_mw(r_xprt, mw);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500581
582 i += seg->mr_nsegs;
583 seg->mr_nsegs = 0;
584 }
585
586 req->rl_nchunks = 0;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400587 return;
588
589reset_mrs:
590 pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
591
592 /* Find and reset the MRs in the LOCAL_INV WRs that did not
593 * get posted. This is synchronous, and slow.
594 */
595 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
596 seg = &req->rl_segments[i];
597 mw = seg->rl_mw;
598 f = &mw->frmr;
599
600 if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
601 __frwr_reset_mr(ia, mw);
602 bad_wr = bad_wr->next;
603 }
604
605 i += seg->mr_nsegs;
606 }
607 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500608}
609
Chuck Lever6814bae2015-03-30 14:34:48 -0400610/* Post a LOCAL_INV Work Request to prevent further remote access
611 * via RDMA READ or RDMA WRITE.
612 */
613static int
614frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
615{
616 struct rpcrdma_mr_seg *seg1 = seg;
617 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400618 struct rpcrdma_mw *mw = seg1->rl_mw;
Chuck Leverc882a652016-03-04 11:28:45 -0500619 struct rpcrdma_frmr *frmr = &mw->frmr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500620 struct ib_send_wr *invalidate_wr, *bad_wr;
Chuck Lever6814bae2015-03-30 14:34:48 -0400621 int rc, nsegs = seg->mr_nsegs;
622
Chuck Leverc14d86e2015-05-26 11:52:35 -0400623 dprintk("RPC: %s: FRMR %p\n", __func__, mw);
624
625 seg1->rl_mw = NULL;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300626 frmr->fr_state = FRMR_IS_INVALID;
Chuck Leverc882a652016-03-04 11:28:45 -0500627 invalidate_wr = &mw->frmr.fr_invwr;
Chuck Lever6814bae2015-03-30 14:34:48 -0400628
Chuck Lever3cf4e162015-12-16 17:22:31 -0500629 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
Chuck Lever2fa8f882016-03-04 11:28:53 -0500630 frmr->fr_cqe.done = frwr_wc_localinv;
631 invalidate_wr->wr_cqe = &frmr->fr_cqe;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500632 invalidate_wr->opcode = IB_WR_LOCAL_INV;
633 invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey;
Chuck Lever6814bae2015-03-30 14:34:48 -0400634 DECR_CQCOUNT(&r_xprt->rx_ep);
635
Chuck Levera3aa8b22016-05-02 14:42:04 -0400636 ib_dma_unmap_sg(ia->ri_device, frmr->fr_sg, frmr->fr_nents, frmr->fr_dir);
Chuck Lever89e0d1122015-05-26 11:51:56 -0400637 read_lock(&ia->ri_qplock);
Chuck Lever3cf4e162015-12-16 17:22:31 -0500638 rc = ib_post_send(ia->ri_id->qp, invalidate_wr, &bad_wr);
Chuck Lever6814bae2015-03-30 14:34:48 -0400639 read_unlock(&ia->ri_qplock);
640 if (rc)
641 goto out_err;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400642
643 rpcrdma_put_mw(r_xprt, mw);
Chuck Lever6814bae2015-03-30 14:34:48 -0400644 return nsegs;
645
646out_err:
Chuck Lever6814bae2015-03-30 14:34:48 -0400647 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
Chuck Leverc14d86e2015-05-26 11:52:35 -0400648 __frwr_queue_recovery(mw);
Chuck Lever6814bae2015-03-30 14:34:48 -0400649 return nsegs;
650}
651
Chuck Lever4561f342015-03-30 14:35:17 -0400652static void
653frwr_op_destroy(struct rpcrdma_buffer *buf)
654{
655 struct rpcrdma_mw *r;
656
Chuck Leverc14d86e2015-05-26 11:52:35 -0400657 /* Ensure stale MWs for "buf" are no longer in flight */
658 flush_workqueue(frwr_recovery_wq);
659
Chuck Lever4561f342015-03-30 14:35:17 -0400660 while (!list_empty(&buf->rb_all)) {
661 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
662 list_del(&r->mw_all);
663 __frwr_release(r);
664 kfree(r);
665 }
666}
667
Chuck Levera0ce85f2015-03-30 14:34:21 -0400668const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400669 .ro_map = frwr_op_map,
Chuck Leverc9918ff2015-12-16 17:22:47 -0500670 .ro_unmap_sync = frwr_op_unmap_sync,
Chuck Lever6814bae2015-03-30 14:34:48 -0400671 .ro_unmap = frwr_op_unmap,
Chuck Lever3968cb52015-03-30 14:35:26 -0400672 .ro_open = frwr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400673 .ro_maxpages = frwr_op_maxpages,
Chuck Lever91e70e72015-03-30 14:34:58 -0400674 .ro_init = frwr_op_init,
Chuck Lever4561f342015-03-30 14:35:17 -0400675 .ro_destroy = frwr_op_destroy,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400676 .ro_displayname = "frwr",
677};