blob: f4c06c8ba6220d49f415274012f776602a34a7ee [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
8 *
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
12 */
13
Chuck Leverc14d86e2015-05-26 11:52:35 -040014/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
19 * (frmr_op_unmap).
20 *
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
24 * interrupt workload.
25 *
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
30 *
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
35 */
36
37/* Transport recovery
38 *
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
44 * being done.
45 *
46 * When the underlying transport disconnects, MRs are left in one of
47 * three states:
48 *
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
51 *
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
54 *
55 * VALID: The MR was registered before the QP entered ERROR state.
56 *
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
63 *
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
68 */
69
Chuck Levera0ce85f2015-03-30 14:34:21 -040070#include "xprt_rdma.h"
71
72#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73# define RPCDBG_FACILITY RPCDBG_TRANS
74#endif
75
Chuck Leverb54054c2016-06-29 13:53:27 -040076bool
77frwr_is_supported(struct rpcrdma_ia *ia)
78{
79 struct ib_device_attr *attrs = &ia->ri_device->attrs;
80
81 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
82 goto out_not_supported;
83 if (attrs->max_fast_reg_page_list_len == 0)
84 goto out_not_supported;
85 return true;
86
87out_not_supported:
88 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
89 ia->ri_device->name);
90 return false;
91}
92
Chuck Leverd7a21c12016-05-02 14:42:12 -040093static int
Chuck Levere2ac2362016-06-29 13:54:00 -040094frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
Chuck Leverd48b1d22016-06-29 13:52:29 -040095{
Chuck Levere2ac2362016-06-29 13:54:00 -040096 unsigned int depth = ia->ri_max_frmr_depth;
Chuck Leverd48b1d22016-06-29 13:52:29 -040097 struct rpcrdma_frmr *f = &r->frmr;
98 int rc;
99
Chuck Levere2ac2362016-06-29 13:54:00 -0400100 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400101 if (IS_ERR(f->fr_mr))
102 goto out_mr_err;
103
104 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
105 if (!r->mw_sg)
106 goto out_list_err;
107
108 sg_init_table(r->mw_sg, depth);
109 init_completion(&f->fr_linv_done);
110 return 0;
111
112out_mr_err:
113 rc = PTR_ERR(f->fr_mr);
114 dprintk("RPC: %s: ib_alloc_mr status %i\n",
115 __func__, rc);
116 return rc;
117
118out_list_err:
119 rc = -ENOMEM;
120 dprintk("RPC: %s: sg allocation failure\n",
121 __func__);
122 ib_dereg_mr(f->fr_mr);
123 return rc;
124}
125
126static void
Chuck Levere2ac2362016-06-29 13:54:00 -0400127frwr_op_release_mr(struct rpcrdma_mw *r)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400128{
129 int rc;
130
131 rc = ib_dereg_mr(r->frmr.fr_mr);
132 if (rc)
133 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
134 r, rc);
135 kfree(r->mw_sg);
Chuck Levere2ac2362016-06-29 13:54:00 -0400136 kfree(r);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400137}
138
139static int
Chuck Leverd7a21c12016-05-02 14:42:12 -0400140__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
141{
142 struct rpcrdma_frmr *f = &r->frmr;
143 int rc;
144
145 rc = ib_dereg_mr(f->fr_mr);
146 if (rc) {
147 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
148 rc, r);
149 return rc;
150 }
151
152 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
153 ia->ri_max_frmr_depth);
154 if (IS_ERR(f->fr_mr)) {
155 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
156 PTR_ERR(f->fr_mr), r);
157 return PTR_ERR(f->fr_mr);
158 }
159
160 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
161 f->fr_state = FRMR_IS_INVALID;
162 return 0;
163}
164
Chuck Lever505bbe62016-06-29 13:52:54 -0400165/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
166 *
167 * There's no recovery if this fails. The FRMR is abandoned, but
168 * remains in rb_all. It will be cleaned up when the transport is
169 * destroyed.
170 */
Chuck Lever660bb492016-05-02 14:42:21 -0400171static void
Chuck Lever505bbe62016-06-29 13:52:54 -0400172frwr_op_recover_mr(struct rpcrdma_mw *mw)
Chuck Lever660bb492016-05-02 14:42:21 -0400173{
Chuck Lever564471d2016-06-29 13:52:21 -0400174 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
Chuck Lever660bb492016-05-02 14:42:21 -0400175 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
Chuck Lever660bb492016-05-02 14:42:21 -0400176 int rc;
177
178 rc = __frwr_reset_mr(ia, mw);
Chuck Lever564471d2016-06-29 13:52:21 -0400179 ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400180 if (rc)
181 goto out_release;
Chuck Lever505bbe62016-06-29 13:52:54 -0400182
Chuck Lever660bb492016-05-02 14:42:21 -0400183 rpcrdma_put_mw(r_xprt, mw);
Chuck Lever505bbe62016-06-29 13:52:54 -0400184 r_xprt->rx_stats.mrs_recovered++;
Chuck Lever2ffc8712016-06-29 13:54:08 -0400185 return;
186
187out_release:
188 pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
189 r_xprt->rx_stats.mrs_orphaned++;
190
191 spin_lock(&r_xprt->rx_buf.rb_mwlock);
192 list_del(&mw->mw_all);
193 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
194
195 frwr_op_release_mr(mw);
Chuck Lever951e7212015-05-26 11:52:25 -0400196}
197
Chuck Lever91e70e72015-03-30 14:34:58 -0400198static int
Chuck Lever3968cb52015-03-30 14:35:26 -0400199frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
200 struct rpcrdma_create_data_internal *cdata)
201{
Chuck Lever3968cb52015-03-30 14:35:26 -0400202 int depth, delta;
203
204 ia->ri_max_frmr_depth =
205 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Or Gerlitze3e45b12015-12-18 10:59:48 +0200206 ia->ri_device->attrs.max_fast_reg_page_list_len);
Chuck Lever3968cb52015-03-30 14:35:26 -0400207 dprintk("RPC: %s: device's max FR page list len = %u\n",
208 __func__, ia->ri_max_frmr_depth);
209
210 /* Add room for frmr register and invalidate WRs.
211 * 1. FRMR reg WR for head
212 * 2. FRMR invalidate WR for head
213 * 3. N FRMR reg WRs for pagelist
214 * 4. N FRMR invalidate WRs for pagelist
215 * 5. FRMR reg WR for tail
216 * 6. FRMR invalidate WR for tail
217 * 7. The RDMA_SEND WR
218 */
219 depth = 7;
220
221 /* Calculate N if the device max FRMR depth is smaller than
222 * RPCRDMA_MAX_DATA_SEGS.
223 */
224 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
225 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
226 do {
227 depth += 2; /* FRMR reg + invalidate */
228 delta -= ia->ri_max_frmr_depth;
229 } while (delta > 0);
230 }
231
232 ep->rep_attr.cap.max_send_wr *= depth;
Or Gerlitze3e45b12015-12-18 10:59:48 +0200233 if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
234 cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
Chuck Lever3968cb52015-03-30 14:35:26 -0400235 if (!cdata->max_requests)
236 return -EINVAL;
237 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
238 depth;
239 }
240
Chuck Lever302d3de2016-05-02 14:41:05 -0400241 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
242 RPCRDMA_MAX_DATA_SEGS /
243 ia->ri_max_frmr_depth));
Chuck Lever3968cb52015-03-30 14:35:26 -0400244 return 0;
245}
246
Chuck Lever1c9351e2015-03-30 14:34:30 -0400247/* FRWR mode conveys a list of pages per chunk segment. The
248 * maximum length of that list is the FRWR page list depth.
249 */
250static size_t
251frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
252{
253 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
254
255 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever94931742016-05-02 14:40:56 -0400256 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400257}
258
Chuck Levere46ac342015-03-30 14:35:35 -0400259static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500260__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
261 const char *wr)
Chuck Levere46ac342015-03-30 14:35:35 -0400262{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500263 frmr->fr_state = FRMR_IS_STALE;
264 if (wc->status != IB_WC_WR_FLUSH_ERR)
265 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
266 wr, ib_wc_status_msg(wc->status),
267 wc->status, wc->vendor_err);
Chuck Levere46ac342015-03-30 14:35:35 -0400268}
269
Chuck Lever2fa8f882016-03-04 11:28:53 -0500270/**
271 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
272 * @cq: completion queue (ignored)
273 * @wc: completed WR
274 *
275 */
Chuck Leverc9918ff2015-12-16 17:22:47 -0500276static void
Chuck Lever2fa8f882016-03-04 11:28:53 -0500277frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
Chuck Leverc9918ff2015-12-16 17:22:47 -0500278{
Chuck Lever2fa8f882016-03-04 11:28:53 -0500279 struct rpcrdma_frmr *frmr;
280 struct ib_cqe *cqe;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500281
Chuck Lever2fa8f882016-03-04 11:28:53 -0500282 /* WARNING: Only wr_cqe and status are reliable at this point */
283 if (wc->status != IB_WC_SUCCESS) {
284 cqe = wc->wr_cqe;
285 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
286 __frwr_sendcompletion_flush(wc, frmr, "fastreg");
287 }
288}
Chuck Leverc9918ff2015-12-16 17:22:47 -0500289
Chuck Lever2fa8f882016-03-04 11:28:53 -0500290/**
291 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
292 * @cq: completion queue (ignored)
293 * @wc: completed WR
294 *
295 */
296static void
297frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
298{
299 struct rpcrdma_frmr *frmr;
300 struct ib_cqe *cqe;
301
302 /* WARNING: Only wr_cqe and status are reliable at this point */
303 if (wc->status != IB_WC_SUCCESS) {
304 cqe = wc->wr_cqe;
305 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
306 __frwr_sendcompletion_flush(wc, frmr, "localinv");
307 }
308}
309
310/**
311 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
312 * @cq: completion queue (ignored)
313 * @wc: completed WR
314 *
315 * Awaken anyone waiting for an MR to finish being fenced.
316 */
317static void
318frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
319{
320 struct rpcrdma_frmr *frmr;
321 struct ib_cqe *cqe;
322
323 /* WARNING: Only wr_cqe and status are reliable at this point */
324 cqe = wc->wr_cqe;
325 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
326 if (wc->status != IB_WC_SUCCESS)
327 __frwr_sendcompletion_flush(wc, frmr, "localinv");
328 complete_all(&frmr->fr_linv_done);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500329}
330
Chuck Lever564471d2016-06-29 13:52:21 -0400331/* Post a REG_MR Work Request to register a memory region
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400332 * for remote access via RDMA READ or RDMA WRITE.
333 */
334static int
335frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
336 int nsegs, bool writing)
337{
338 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
339 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400340 struct rpcrdma_mw *mw;
341 struct rpcrdma_frmr *frmr;
342 struct ib_mr *mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500343 struct ib_reg_wr *reg_wr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100344 struct ib_send_wr *bad_wr;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300345 int rc, i, n, dma_nents;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400346 u8 key;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400347
Chuck Leverc14d86e2015-05-26 11:52:35 -0400348 mw = seg1->rl_mw;
349 seg1->rl_mw = NULL;
350 do {
351 if (mw)
Chuck Lever505bbe62016-06-29 13:52:54 -0400352 rpcrdma_defer_mr_recovery(mw);
Chuck Leverc14d86e2015-05-26 11:52:35 -0400353 mw = rpcrdma_get_mw(r_xprt);
354 if (!mw)
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400355 return -ENOBUFS;
Chuck Leverc882a652016-03-04 11:28:45 -0500356 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
357 frmr = &mw->frmr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400358 frmr->fr_state = FRMR_IS_VALID;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300359 mr = frmr->fr_mr;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500360 reg_wr = &frmr->fr_regwr;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400361
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400362 if (nsegs > ia->ri_max_frmr_depth)
363 nsegs = ia->ri_max_frmr_depth;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300364 for (i = 0; i < nsegs;) {
365 if (seg->mr_page)
Chuck Lever564471d2016-06-29 13:52:21 -0400366 sg_set_page(&mw->mw_sg[i],
Sagi Grimberg4143f342015-10-13 19:11:35 +0300367 seg->mr_page,
368 seg->mr_len,
369 offset_in_page(seg->mr_offset));
370 else
Chuck Lever564471d2016-06-29 13:52:21 -0400371 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
Sagi Grimberg4143f342015-10-13 19:11:35 +0300372 seg->mr_len);
373
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400374 ++seg;
375 ++i;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300376
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400377 /* Check for holes */
378 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
379 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
380 break;
381 }
Chuck Lever564471d2016-06-29 13:52:21 -0400382 mw->mw_nents = i;
383 mw->mw_dir = rpcrdma_data_dir(writing);
Chuck Levera54d4052016-06-29 13:53:52 -0400384 if (i == 0)
385 goto out_dmamap_err;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400386
Chuck Lever564471d2016-06-29 13:52:21 -0400387 dma_nents = ib_dma_map_sg(ia->ri_device,
388 mw->mw_sg, mw->mw_nents, mw->mw_dir);
389 if (!dma_nents)
390 goto out_dmamap_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300391
Chuck Lever564471d2016-06-29 13:52:21 -0400392 n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
393 if (unlikely(n != mw->mw_nents))
394 goto out_mapmr_err;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300395
396 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
Chuck Lever564471d2016-06-29 13:52:21 -0400397 __func__, mw, mw->mw_nents, mr->length);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300398
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400399 key = (u8)(mr->rkey & 0x000000FF);
400 ib_update_fast_reg_key(mr, ++key);
Sagi Grimberg4143f342015-10-13 19:11:35 +0300401
Chuck Lever3cf4e162015-12-16 17:22:31 -0500402 reg_wr->wr.next = NULL;
403 reg_wr->wr.opcode = IB_WR_REG_MR;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500404 frmr->fr_cqe.done = frwr_wc_fastreg;
405 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
Chuck Lever3cf4e162015-12-16 17:22:31 -0500406 reg_wr->wr.num_sge = 0;
407 reg_wr->wr.send_flags = 0;
408 reg_wr->mr = mr;
409 reg_wr->key = mr->rkey;
410 reg_wr->access = writing ?
411 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
412 IB_ACCESS_REMOTE_READ;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400413
414 DECR_CQCOUNT(&r_xprt->rx_ep);
Chuck Lever3cf4e162015-12-16 17:22:31 -0500415 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400416 if (rc)
417 goto out_senderr;
418
Chuck Leverc14d86e2015-05-26 11:52:35 -0400419 seg1->rl_mw = mw;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400420 seg1->mr_rkey = mr->rkey;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300421 seg1->mr_base = mr->iova;
Chuck Lever564471d2016-06-29 13:52:21 -0400422 seg1->mr_nsegs = mw->mw_nents;
Sagi Grimberg4143f342015-10-13 19:11:35 +0300423 seg1->mr_len = mr->length;
424
Chuck Lever564471d2016-06-29 13:52:21 -0400425 return mw->mw_nents;
426
427out_dmamap_err:
428 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
429 mw->mw_sg, mw->mw_nents);
Chuck Lever42fe28f2016-06-29 13:53:02 -0400430 rpcrdma_defer_mr_recovery(mw);
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400431 return -EIO;
Chuck Lever564471d2016-06-29 13:52:21 -0400432
433out_mapmr_err:
434 pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
435 frmr->fr_mr, n, mw->mw_nents);
Chuck Lever505bbe62016-06-29 13:52:54 -0400436 rpcrdma_defer_mr_recovery(mw);
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400437 return -EIO;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400438
439out_senderr:
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400440 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
Chuck Lever505bbe62016-06-29 13:52:54 -0400441 rpcrdma_defer_mr_recovery(mw);
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400442 return -ENOTCONN;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400443}
444
Chuck Leverc9918ff2015-12-16 17:22:47 -0500445static struct ib_send_wr *
446__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
447{
448 struct rpcrdma_mw *mw = seg->rl_mw;
Chuck Leverc882a652016-03-04 11:28:45 -0500449 struct rpcrdma_frmr *f = &mw->frmr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500450 struct ib_send_wr *invalidate_wr;
451
Chuck Leverc9918ff2015-12-16 17:22:47 -0500452 f->fr_state = FRMR_IS_INVALID;
453 invalidate_wr = &f->fr_invwr;
454
455 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
Chuck Lever2fa8f882016-03-04 11:28:53 -0500456 f->fr_cqe.done = frwr_wc_localinv;
457 invalidate_wr->wr_cqe = &f->fr_cqe;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500458 invalidate_wr->opcode = IB_WR_LOCAL_INV;
459 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
460
461 return invalidate_wr;
462}
463
Chuck Leverc9918ff2015-12-16 17:22:47 -0500464/* Invalidate all memory regions that were registered for "req".
465 *
466 * Sleeps until it is safe for the host CPU to access the
467 * previously mapped memory regions.
468 */
469static void
470frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
471{
472 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
473 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
474 struct rpcrdma_mr_seg *seg;
475 unsigned int i, nchunks;
476 struct rpcrdma_frmr *f;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400477 struct rpcrdma_mw *mw;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500478 int rc;
479
480 dprintk("RPC: %s: req %p\n", __func__, req);
481
482 /* ORDER: Invalidate all of the req's MRs first
483 *
484 * Chain the LOCAL_INV Work Requests and post them with
485 * a single ib_post_send() call.
486 */
487 invalidate_wrs = pos = prev = NULL;
488 seg = NULL;
489 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
490 seg = &req->rl_segments[i];
491
492 pos = __frwr_prepare_linv_wr(seg);
493
494 if (!invalidate_wrs)
495 invalidate_wrs = pos;
496 else
497 prev->next = pos;
498 prev = pos;
499
500 i += seg->mr_nsegs;
501 }
Chuck Leverc882a652016-03-04 11:28:45 -0500502 f = &seg->rl_mw->frmr;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500503
504 /* Strong send queue ordering guarantees that when the
505 * last WR in the chain completes, all WRs in the chain
506 * are complete.
507 */
508 f->fr_invwr.send_flags = IB_SEND_SIGNALED;
Chuck Lever2fa8f882016-03-04 11:28:53 -0500509 f->fr_cqe.done = frwr_wc_localinv_wake;
510 reinit_completion(&f->fr_linv_done);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500511 INIT_CQCOUNT(&r_xprt->rx_ep);
512
513 /* Transport disconnect drains the receive CQ before it
514 * replaces the QP. The RPC reply handler won't call us
515 * unless ri_id->qp is a valid pointer.
516 */
517 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400518 if (rc)
519 goto reset_mrs;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500520
521 wait_for_completion(&f->fr_linv_done);
522
523 /* ORDER: Now DMA unmap all of the req's MRs, and return
524 * them to the free MW list.
525 */
Chuck Leverb892a692016-03-04 11:28:01 -0500526unmap:
Chuck Leverc9918ff2015-12-16 17:22:47 -0500527 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
528 seg = &req->rl_segments[i];
Chuck Leverd7a21c12016-05-02 14:42:12 -0400529 mw = seg->rl_mw;
530 seg->rl_mw = NULL;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500531
Chuck Lever564471d2016-06-29 13:52:21 -0400532 ib_dma_unmap_sg(ia->ri_device,
533 mw->mw_sg, mw->mw_nents, mw->mw_dir);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400534 rpcrdma_put_mw(r_xprt, mw);
Chuck Leverc9918ff2015-12-16 17:22:47 -0500535
536 i += seg->mr_nsegs;
537 seg->mr_nsegs = 0;
538 }
539
540 req->rl_nchunks = 0;
Chuck Leverd7a21c12016-05-02 14:42:12 -0400541 return;
542
543reset_mrs:
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400544 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
545 rdma_disconnect(ia->ri_id);
Chuck Leverd7a21c12016-05-02 14:42:12 -0400546
547 /* Find and reset the MRs in the LOCAL_INV WRs that did not
548 * get posted. This is synchronous, and slow.
549 */
550 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
551 seg = &req->rl_segments[i];
552 mw = seg->rl_mw;
553 f = &mw->frmr;
554
555 if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
556 __frwr_reset_mr(ia, mw);
557 bad_wr = bad_wr->next;
558 }
559
560 i += seg->mr_nsegs;
561 }
562 goto unmap;
Chuck Leverc9918ff2015-12-16 17:22:47 -0500563}
564
Chuck Leveread3f262016-05-02 14:42:46 -0400565/* Use a slow, safe mechanism to invalidate all memory regions
566 * that were registered for "req".
Chuck Lever6814bae2015-03-30 14:34:48 -0400567 */
Chuck Leveread3f262016-05-02 14:42:46 -0400568static void
569frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
570 bool sync)
Chuck Lever6814bae2015-03-30 14:34:48 -0400571{
Chuck Leveread3f262016-05-02 14:42:46 -0400572 struct rpcrdma_mr_seg *seg;
573 struct rpcrdma_mw *mw;
574 unsigned int i;
Chuck Lever6814bae2015-03-30 14:34:48 -0400575
Chuck Leveread3f262016-05-02 14:42:46 -0400576 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
577 seg = &req->rl_segments[i];
578 mw = seg->rl_mw;
Chuck Leverc14d86e2015-05-26 11:52:35 -0400579
Chuck Leveread3f262016-05-02 14:42:46 -0400580 if (sync)
Chuck Lever505bbe62016-06-29 13:52:54 -0400581 frwr_op_recover_mr(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400582 else
Chuck Lever505bbe62016-06-29 13:52:54 -0400583 rpcrdma_defer_mr_recovery(mw);
Chuck Lever6814bae2015-03-30 14:34:48 -0400584
Chuck Leveread3f262016-05-02 14:42:46 -0400585 i += seg->mr_nsegs;
586 seg->mr_nsegs = 0;
587 seg->rl_mw = NULL;
588 }
Chuck Lever6814bae2015-03-30 14:34:48 -0400589}
590
Chuck Levera0ce85f2015-03-30 14:34:21 -0400591const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400592 .ro_map = frwr_op_map,
Chuck Leverc9918ff2015-12-16 17:22:47 -0500593 .ro_unmap_sync = frwr_op_unmap_sync,
Chuck Leveread3f262016-05-02 14:42:46 -0400594 .ro_unmap_safe = frwr_op_unmap_safe,
Chuck Lever505bbe62016-06-29 13:52:54 -0400595 .ro_recover_mr = frwr_op_recover_mr,
Chuck Lever3968cb52015-03-30 14:35:26 -0400596 .ro_open = frwr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400597 .ro_maxpages = frwr_op_maxpages,
Chuck Levere2ac2362016-06-29 13:54:00 -0400598 .ro_init_mr = frwr_op_init_mr,
599 .ro_release_mr = frwr_op_release_mr,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400600 .ro_displayname = "frwr",
601};