blob: df5fe17861056ea3b891191cad7e93b6e9579458 [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
8 *
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
12 */
13
Chuck Leverfc7fbb52015-05-26 11:52:16 -040014/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
20 */
21
22/* Transport recovery
23 *
24 * After a transport reconnect, fmr_op_map re-uses the MR already
25 * allocated for the RPC, but generates a fresh rkey then maps the
26 * MR again. This process is synchronous.
27 */
28
Chuck Levera0ce85f2015-03-30 14:34:21 -040029#include "xprt_rdma.h"
30
31#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
32# define RPCDBG_FACILITY RPCDBG_TRANS
33#endif
34
Chuck Lever1c9351e2015-03-30 14:34:30 -040035/* Maximum scatter/gather per FMR */
36#define RPCRDMA_MAX_FMR_SGES (64)
37
Chuck Leverd48b1d22016-06-29 13:52:29 -040038/* Access mode of externally registered pages */
39enum {
40 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
41 IB_ACCESS_REMOTE_READ,
42};
43
Chuck Leveread3f262016-05-02 14:42:46 -040044static struct workqueue_struct *fmr_recovery_wq;
45
46#define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND)
47
48int
49fmr_alloc_recovery_wq(void)
50{
51 fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
52 return !fmr_recovery_wq ? -ENOMEM : 0;
53}
54
55void
56fmr_destroy_recovery_wq(void)
57{
58 struct workqueue_struct *wq;
59
60 if (!fmr_recovery_wq)
61 return;
62
63 wq = fmr_recovery_wq;
64 fmr_recovery_wq = NULL;
65 destroy_workqueue(wq);
66}
67
68static int
Chuck Leverd48b1d22016-06-29 13:52:29 -040069__fmr_init(struct rpcrdma_mw *mw, struct ib_pd *pd)
70{
71 static struct ib_fmr_attr fmr_attr = {
72 .max_pages = RPCRDMA_MAX_FMR_SGES,
73 .max_maps = 1,
74 .page_shift = PAGE_SHIFT
75 };
76
Chuck Lever88975eb2016-06-29 13:52:37 -040077 mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
78 sizeof(u64), GFP_KERNEL);
79 if (!mw->fmr.fm_physaddrs)
Chuck Leverd48b1d22016-06-29 13:52:29 -040080 goto out_free;
81
82 mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
83 sizeof(*mw->mw_sg), GFP_KERNEL);
84 if (!mw->mw_sg)
85 goto out_free;
86
87 sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
88
Chuck Lever88975eb2016-06-29 13:52:37 -040089 mw->fmr.fm_mr = ib_alloc_fmr(pd, RPCRDMA_FMR_ACCESS_FLAGS,
90 &fmr_attr);
91 if (IS_ERR(mw->fmr.fm_mr))
Chuck Leverd48b1d22016-06-29 13:52:29 -040092 goto out_fmr_err;
93
94 return 0;
95
96out_fmr_err:
97 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
Chuck Lever88975eb2016-06-29 13:52:37 -040098 PTR_ERR(mw->fmr.fm_mr));
Chuck Leverd48b1d22016-06-29 13:52:29 -040099
100out_free:
101 kfree(mw->mw_sg);
Chuck Lever88975eb2016-06-29 13:52:37 -0400102 kfree(mw->fmr.fm_physaddrs);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400103 return -ENOMEM;
104}
105
106static int
Chuck Leveread3f262016-05-02 14:42:46 -0400107__fmr_unmap(struct rpcrdma_mw *mw)
108{
109 LIST_HEAD(l);
Chuck Lever38f1932e2016-06-29 13:52:12 -0400110 int rc;
Chuck Leveread3f262016-05-02 14:42:46 -0400111
Chuck Lever88975eb2016-06-29 13:52:37 -0400112 list_add(&mw->fmr.fm_mr->list, &l);
Chuck Lever38f1932e2016-06-29 13:52:12 -0400113 rc = ib_unmap_fmr(&l);
Chuck Lever88975eb2016-06-29 13:52:37 -0400114 list_del_init(&mw->fmr.fm_mr->list);
Chuck Lever38f1932e2016-06-29 13:52:12 -0400115 return rc;
Chuck Leveread3f262016-05-02 14:42:46 -0400116}
117
Chuck Leverd48b1d22016-06-29 13:52:29 -0400118static void
Chuck Leverfcdfb962016-06-29 13:52:45 -0400119__fmr_dma_unmap(struct rpcrdma_mw *mw)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400120{
Chuck Leverfcdfb962016-06-29 13:52:45 -0400121 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
Chuck Leverd48b1d22016-06-29 13:52:29 -0400122
Chuck Leverfcdfb962016-06-29 13:52:45 -0400123 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
124 mw->mw_sg, mw->mw_nents, mw->mw_dir);
125 rpcrdma_put_mw(r_xprt, mw);
126}
127
128static void
129__fmr_reset_and_unmap(struct rpcrdma_mw *mw)
130{
131 int rc;
132
133 /* ORDER */
134 rc = __fmr_unmap(mw);
135 if (rc) {
136 pr_warn("rpcrdma: ib_unmap_fmr status %d, fmr %p orphaned\n",
137 rc, mw);
138 return;
139 }
140 __fmr_dma_unmap(mw);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400141}
142
143static void
144__fmr_release(struct rpcrdma_mw *r)
145{
146 int rc;
147
Chuck Lever88975eb2016-06-29 13:52:37 -0400148 kfree(r->fmr.fm_physaddrs);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400149 kfree(r->mw_sg);
150
Chuck Lever88975eb2016-06-29 13:52:37 -0400151 rc = ib_dealloc_fmr(r->fmr.fm_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400152 if (rc)
153 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
154 r, rc);
155}
156
Chuck Leveread3f262016-05-02 14:42:46 -0400157/* Deferred reset of a single FMR. Generate a fresh rkey by
158 * replacing the MR. There's no recovery if this fails.
159 */
160static void
161__fmr_recovery_worker(struct work_struct *work)
162{
163 struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
Chuck Leverfcdfb962016-06-29 13:52:45 -0400164 mw_work);
Chuck Leveread3f262016-05-02 14:42:46 -0400165
Chuck Leverfcdfb962016-06-29 13:52:45 -0400166 __fmr_reset_and_unmap(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400167 return;
168}
169
170/* A broken MR was discovered in a context that can't sleep.
171 * Defer recovery to the recovery worker.
172 */
173static void
174__fmr_queue_recovery(struct rpcrdma_mw *mw)
175{
176 INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
177 queue_work(fmr_recovery_wq, &mw->mw_work);
178}
179
Chuck Lever3968cb52015-03-30 14:35:26 -0400180static int
181fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
182 struct rpcrdma_create_data_internal *cdata)
183{
Chuck Lever302d3de2016-05-02 14:41:05 -0400184 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
185 RPCRDMA_MAX_DATA_SEGS /
186 RPCRDMA_MAX_FMR_SGES));
Chuck Lever3968cb52015-03-30 14:35:26 -0400187 return 0;
188}
189
Chuck Lever1c9351e2015-03-30 14:34:30 -0400190/* FMR mode conveys up to 64 pages of payload per chunk segment.
191 */
192static size_t
193fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
194{
195 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever94931742016-05-02 14:40:56 -0400196 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400197}
198
Chuck Lever91e70e72015-03-30 14:34:58 -0400199static int
200fmr_op_init(struct rpcrdma_xprt *r_xprt)
201{
202 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Lever91e70e72015-03-30 14:34:58 -0400203 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
204 struct rpcrdma_mw *r;
205 int i, rc;
206
Chuck Lever58d1dcf2015-05-26 11:53:13 -0400207 spin_lock_init(&buf->rb_mwlock);
Chuck Lever91e70e72015-03-30 14:34:58 -0400208 INIT_LIST_HEAD(&buf->rb_mws);
209 INIT_LIST_HEAD(&buf->rb_all);
210
Chuck Lever40c6ed02015-05-26 11:53:33 -0400211 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
212 i += 2; /* head + tail */
213 i *= buf->rb_max_requests; /* one set for each RPC slot */
214 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
Chuck Lever91e70e72015-03-30 14:34:58 -0400215
216 while (i--) {
217 r = kzalloc(sizeof(*r), GFP_KERNEL);
218 if (!r)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400219 return -ENOMEM;
Chuck Lever91e70e72015-03-30 14:34:58 -0400220
Chuck Leverd48b1d22016-06-29 13:52:29 -0400221 rc = __fmr_init(r, pd);
222 if (rc) {
223 kfree(r);
224 return rc;
225 }
Chuck Lever91e70e72015-03-30 14:34:58 -0400226
Chuck Leveread3f262016-05-02 14:42:46 -0400227 r->mw_xprt = r_xprt;
Chuck Lever91e70e72015-03-30 14:34:58 -0400228 list_add(&r->mw_list, &buf->rb_mws);
229 list_add(&r->mw_all, &buf->rb_all);
230 }
231 return 0;
Chuck Lever91e70e72015-03-30 14:34:58 -0400232}
233
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400234/* Use the ib_map_phys_fmr() verb to register a memory region
235 * for remote access via RDMA READ or RDMA WRITE.
236 */
237static int
238fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
239 int nsegs, bool writing)
240{
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400241 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400242 int len, pageoff, i, rc;
Chuck Leverfc7fbb52015-05-26 11:52:16 -0400243 struct rpcrdma_mw *mw;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400244 u64 *dma_pages;
Chuck Leverfc7fbb52015-05-26 11:52:16 -0400245
246 mw = seg1->rl_mw;
247 seg1->rl_mw = NULL;
248 if (!mw) {
249 mw = rpcrdma_get_mw(r_xprt);
250 if (!mw)
251 return -ENOMEM;
252 } else {
253 /* this is a retransmit; generate a fresh rkey */
254 rc = __fmr_unmap(mw);
255 if (rc)
256 return rc;
257 }
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400258
259 pageoff = offset_in_page(seg1->mr_offset);
260 seg1->mr_offset -= pageoff; /* start of page */
261 seg1->mr_len += pageoff;
262 len = -pageoff;
263 if (nsegs > RPCRDMA_MAX_FMR_SGES)
264 nsegs = RPCRDMA_MAX_FMR_SGES;
265 for (i = 0; i < nsegs;) {
Chuck Leverfcdfb962016-06-29 13:52:45 -0400266 if (seg->mr_page)
267 sg_set_page(&mw->mw_sg[i],
268 seg->mr_page,
269 seg->mr_len,
270 offset_in_page(seg->mr_offset));
271 else
272 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
273 seg->mr_len);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400274 len += seg->mr_len;
275 ++seg;
276 ++i;
277 /* Check for holes */
278 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
279 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
280 break;
281 }
Chuck Leverfcdfb962016-06-29 13:52:45 -0400282 mw->mw_nents = i;
283 mw->mw_dir = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400284
Chuck Leverfcdfb962016-06-29 13:52:45 -0400285 if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
286 mw->mw_sg, mw->mw_nents, mw->mw_dir))
287 goto out_dmamap_err;
288
289 for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
290 dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
291 rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
292 dma_pages[0]);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400293 if (rc)
294 goto out_maperr;
295
Chuck Leverfc7fbb52015-05-26 11:52:16 -0400296 seg1->rl_mw = mw;
Chuck Lever88975eb2016-06-29 13:52:37 -0400297 seg1->mr_rkey = mw->fmr.fm_mr->rkey;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400298 seg1->mr_base = dma_pages[0] + pageoff;
299 seg1->mr_nsegs = mw->mw_nents;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400300 seg1->mr_len = len;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400301 return mw->mw_nents;
302
303out_dmamap_err:
304 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
305 mw->mw_sg, mw->mw_nents);
306 return -ENOMEM;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400307
308out_maperr:
Chuck Leverfcdfb962016-06-29 13:52:45 -0400309 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
310 len, (unsigned long long)dma_pages[0],
311 pageoff, mw->mw_nents, rc);
312 __fmr_dma_unmap(mw);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400313 return rc;
314}
315
Chuck Lever7c7a5392015-12-16 17:22:55 -0500316/* Invalidate all memory regions that were registered for "req".
317 *
318 * Sleeps until it is safe for the host CPU to access the
319 * previously mapped memory regions.
320 */
321static void
322fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
323{
324 struct rpcrdma_mr_seg *seg;
325 unsigned int i, nchunks;
326 struct rpcrdma_mw *mw;
327 LIST_HEAD(unmap_list);
328 int rc;
329
330 dprintk("RPC: %s: req %p\n", __func__, req);
331
332 /* ORDER: Invalidate all of the req's MRs first
333 *
334 * ib_unmap_fmr() is slow, so use a single call instead
335 * of one call per mapped MR.
336 */
337 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
338 seg = &req->rl_segments[i];
339 mw = seg->rl_mw;
340
Chuck Lever88975eb2016-06-29 13:52:37 -0400341 list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
Chuck Lever7c7a5392015-12-16 17:22:55 -0500342
343 i += seg->mr_nsegs;
344 }
345 rc = ib_unmap_fmr(&unmap_list);
346 if (rc)
347 pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc);
348
349 /* ORDER: Now DMA unmap all of the req's MRs, and return
350 * them to the free MW list.
351 */
352 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
353 seg = &req->rl_segments[i];
Chuck Lever38f1932e2016-06-29 13:52:12 -0400354 mw = seg->rl_mw;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500355
Chuck Lever88975eb2016-06-29 13:52:37 -0400356 list_del_init(&mw->fmr.fm_mr->list);
Chuck Leverfcdfb962016-06-29 13:52:45 -0400357 __fmr_dma_unmap(mw);
Chuck Lever7c7a5392015-12-16 17:22:55 -0500358
359 i += seg->mr_nsegs;
360 seg->mr_nsegs = 0;
Chuck Lever763bc232016-05-02 14:42:38 -0400361 seg->rl_mw = NULL;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500362 }
363
364 req->rl_nchunks = 0;
365}
366
Chuck Leveread3f262016-05-02 14:42:46 -0400367/* Use a slow, safe mechanism to invalidate all memory regions
368 * that were registered for "req".
Chuck Leveread3f262016-05-02 14:42:46 -0400369 */
370static void
371fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
372 bool sync)
373{
374 struct rpcrdma_mr_seg *seg;
375 struct rpcrdma_mw *mw;
376 unsigned int i;
377
378 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
379 seg = &req->rl_segments[i];
380 mw = seg->rl_mw;
381
Chuck Leverfcdfb962016-06-29 13:52:45 -0400382 if (sync)
383 __fmr_reset_and_unmap(mw);
384 else
Chuck Leveread3f262016-05-02 14:42:46 -0400385 __fmr_queue_recovery(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400386
387 i += seg->mr_nsegs;
388 seg->mr_nsegs = 0;
389 seg->rl_mw = NULL;
390 }
391}
392
Chuck Lever4561f342015-03-30 14:35:17 -0400393static void
394fmr_op_destroy(struct rpcrdma_buffer *buf)
395{
396 struct rpcrdma_mw *r;
Chuck Lever4561f342015-03-30 14:35:17 -0400397
398 while (!list_empty(&buf->rb_all)) {
399 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
400 list_del(&r->mw_all);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400401 __fmr_release(r);
Chuck Lever4561f342015-03-30 14:35:17 -0400402 kfree(r);
403 }
404}
405
Chuck Levera0ce85f2015-03-30 14:34:21 -0400406const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400407 .ro_map = fmr_op_map,
Chuck Lever7c7a5392015-12-16 17:22:55 -0500408 .ro_unmap_sync = fmr_op_unmap_sync,
Chuck Leveread3f262016-05-02 14:42:46 -0400409 .ro_unmap_safe = fmr_op_unmap_safe,
Chuck Lever3968cb52015-03-30 14:35:26 -0400410 .ro_open = fmr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400411 .ro_maxpages = fmr_op_maxpages,
Chuck Lever91e70e72015-03-30 14:34:58 -0400412 .ro_init = fmr_op_init,
Chuck Lever4561f342015-03-30 14:35:17 -0400413 .ro_destroy = fmr_op_destroy,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400414 .ro_displayname = "fmr",
415};