blob: 59e64025ed96b52a0cb618638034de8282317c21 [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
8 *
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
12 */
13
Chuck Leverfc7fbb52015-05-26 11:52:16 -040014/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
20 */
21
Chuck Levera0ce85f2015-03-30 14:34:21 -040022#include "xprt_rdma.h"
23
24#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
25# define RPCDBG_FACILITY RPCDBG_TRANS
26#endif
27
Chuck Lever1c9351e2015-03-30 14:34:30 -040028/* Maximum scatter/gather per FMR */
29#define RPCRDMA_MAX_FMR_SGES (64)
30
Chuck Leverd48b1d22016-06-29 13:52:29 -040031/* Access mode of externally registered pages */
32enum {
33 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
34 IB_ACCESS_REMOTE_READ,
35};
36
Chuck Leverb54054c2016-06-29 13:53:27 -040037bool
38fmr_is_supported(struct rpcrdma_ia *ia)
39{
40 if (!ia->ri_device->alloc_fmr) {
41 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
42 ia->ri_device->name);
43 return false;
44 }
45 return true;
46}
47
Chuck Leveread3f262016-05-02 14:42:46 -040048static int
Chuck Levere2ac2362016-06-29 13:54:00 -040049fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
Chuck Leverd48b1d22016-06-29 13:52:29 -040050{
51 static struct ib_fmr_attr fmr_attr = {
52 .max_pages = RPCRDMA_MAX_FMR_SGES,
53 .max_maps = 1,
54 .page_shift = PAGE_SHIFT
55 };
56
Chuck Lever88975eb2016-06-29 13:52:37 -040057 mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
58 sizeof(u64), GFP_KERNEL);
59 if (!mw->fmr.fm_physaddrs)
Chuck Leverd48b1d22016-06-29 13:52:29 -040060 goto out_free;
61
62 mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
63 sizeof(*mw->mw_sg), GFP_KERNEL);
64 if (!mw->mw_sg)
65 goto out_free;
66
67 sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
68
Chuck Levere2ac2362016-06-29 13:54:00 -040069 mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
Chuck Lever88975eb2016-06-29 13:52:37 -040070 &fmr_attr);
71 if (IS_ERR(mw->fmr.fm_mr))
Chuck Leverd48b1d22016-06-29 13:52:29 -040072 goto out_fmr_err;
73
74 return 0;
75
76out_fmr_err:
77 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
Chuck Lever88975eb2016-06-29 13:52:37 -040078 PTR_ERR(mw->fmr.fm_mr));
Chuck Leverd48b1d22016-06-29 13:52:29 -040079
80out_free:
81 kfree(mw->mw_sg);
Chuck Lever88975eb2016-06-29 13:52:37 -040082 kfree(mw->fmr.fm_physaddrs);
Chuck Leverd48b1d22016-06-29 13:52:29 -040083 return -ENOMEM;
84}
85
86static int
Chuck Leveread3f262016-05-02 14:42:46 -040087__fmr_unmap(struct rpcrdma_mw *mw)
88{
89 LIST_HEAD(l);
Chuck Lever38f1932e2016-06-29 13:52:12 -040090 int rc;
Chuck Leveread3f262016-05-02 14:42:46 -040091
Chuck Lever88975eb2016-06-29 13:52:37 -040092 list_add(&mw->fmr.fm_mr->list, &l);
Chuck Lever38f1932e2016-06-29 13:52:12 -040093 rc = ib_unmap_fmr(&l);
Chuck Lever88975eb2016-06-29 13:52:37 -040094 list_del_init(&mw->fmr.fm_mr->list);
Chuck Lever38f1932e2016-06-29 13:52:12 -040095 return rc;
Chuck Leveread3f262016-05-02 14:42:46 -040096}
97
Chuck Leverd48b1d22016-06-29 13:52:29 -040098static void
Chuck Levere2ac2362016-06-29 13:54:00 -040099fmr_op_release_mr(struct rpcrdma_mw *r)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400100{
Chuck Lever505bbe62016-06-29 13:52:54 -0400101 LIST_HEAD(unmap_list);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400102 int rc;
103
Chuck Lever9d6b0402016-06-29 13:54:16 -0400104 /* Ensure MW is not on any rl_registered list */
105 if (!list_empty(&r->mw_list))
106 list_del(&r->mw_list);
107
Chuck Lever88975eb2016-06-29 13:52:37 -0400108 kfree(r->fmr.fm_physaddrs);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400109 kfree(r->mw_sg);
110
Chuck Lever505bbe62016-06-29 13:52:54 -0400111 /* In case this one was left mapped, try to unmap it
112 * to prevent dealloc_fmr from failing with EBUSY
113 */
114 rc = __fmr_unmap(r);
115 if (rc)
116 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
117 r, rc);
118
Chuck Lever88975eb2016-06-29 13:52:37 -0400119 rc = ib_dealloc_fmr(r->fmr.fm_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400120 if (rc)
121 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
122 r, rc);
Chuck Levere2ac2362016-06-29 13:54:00 -0400123
124 kfree(r);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400125}
126
Chuck Lever505bbe62016-06-29 13:52:54 -0400127/* Reset of a single FMR.
Chuck Leveread3f262016-05-02 14:42:46 -0400128 */
129static void
Chuck Lever505bbe62016-06-29 13:52:54 -0400130fmr_op_recover_mr(struct rpcrdma_mw *mw)
Chuck Leveread3f262016-05-02 14:42:46 -0400131{
Chuck Lever505bbe62016-06-29 13:52:54 -0400132 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
133 int rc;
Chuck Leveread3f262016-05-02 14:42:46 -0400134
Chuck Lever505bbe62016-06-29 13:52:54 -0400135 /* ORDER: invalidate first */
136 rc = __fmr_unmap(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400137
Chuck Lever505bbe62016-06-29 13:52:54 -0400138 /* ORDER: then DMA unmap */
139 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
140 mw->mw_sg, mw->mw_nents, mw->mw_dir);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400141 if (rc)
142 goto out_release;
Chuck Lever505bbe62016-06-29 13:52:54 -0400143
144 rpcrdma_put_mw(r_xprt, mw);
145 r_xprt->rx_stats.mrs_recovered++;
Chuck Lever2ffc8712016-06-29 13:54:08 -0400146 return;
147
148out_release:
149 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
150 r_xprt->rx_stats.mrs_orphaned++;
151
152 spin_lock(&r_xprt->rx_buf.rb_mwlock);
153 list_del(&mw->mw_all);
154 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
155
156 fmr_op_release_mr(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400157}
158
Chuck Lever3968cb52015-03-30 14:35:26 -0400159static int
160fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
161 struct rpcrdma_create_data_internal *cdata)
162{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400163 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
164 RPCRDMA_MAX_FMR_SGES);
Chuck Lever3968cb52015-03-30 14:35:26 -0400165 return 0;
166}
167
Chuck Lever1c9351e2015-03-30 14:34:30 -0400168/* FMR mode conveys up to 64 pages of payload per chunk segment.
169 */
170static size_t
171fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
172{
173 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever94931742016-05-02 14:40:56 -0400174 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400175}
176
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400177/* Use the ib_map_phys_fmr() verb to register a memory region
178 * for remote access via RDMA READ or RDMA WRITE.
179 */
180static int
181fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
Chuck Lever9d6b0402016-06-29 13:54:16 -0400182 int nsegs, bool writing, struct rpcrdma_mw **out)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400183{
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400184 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400185 int len, pageoff, i, rc;
Chuck Leverfc7fbb52015-05-26 11:52:16 -0400186 struct rpcrdma_mw *mw;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400187 u64 *dma_pages;
Chuck Leverfc7fbb52015-05-26 11:52:16 -0400188
Chuck Lever505bbe62016-06-29 13:52:54 -0400189 mw = rpcrdma_get_mw(r_xprt);
190 if (!mw)
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400191 return -ENOBUFS;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400192
193 pageoff = offset_in_page(seg1->mr_offset);
194 seg1->mr_offset -= pageoff; /* start of page */
195 seg1->mr_len += pageoff;
196 len = -pageoff;
197 if (nsegs > RPCRDMA_MAX_FMR_SGES)
198 nsegs = RPCRDMA_MAX_FMR_SGES;
199 for (i = 0; i < nsegs;) {
Chuck Leverfcdfb962016-06-29 13:52:45 -0400200 if (seg->mr_page)
201 sg_set_page(&mw->mw_sg[i],
202 seg->mr_page,
203 seg->mr_len,
204 offset_in_page(seg->mr_offset));
205 else
206 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
207 seg->mr_len);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400208 len += seg->mr_len;
209 ++seg;
210 ++i;
211 /* Check for holes */
212 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
213 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
214 break;
215 }
Chuck Leverfcdfb962016-06-29 13:52:45 -0400216 mw->mw_nents = i;
217 mw->mw_dir = rpcrdma_data_dir(writing);
Chuck Levera54d4052016-06-29 13:53:52 -0400218 if (i == 0)
219 goto out_dmamap_err;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400220
Chuck Leverfcdfb962016-06-29 13:52:45 -0400221 if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
222 mw->mw_sg, mw->mw_nents, mw->mw_dir))
223 goto out_dmamap_err;
224
225 for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
226 dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
227 rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
228 dma_pages[0]);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400229 if (rc)
230 goto out_maperr;
231
Chuck Lever9d6b0402016-06-29 13:54:16 -0400232 mw->mw_handle = mw->fmr.fm_mr->rkey;
233 mw->mw_length = len;
234 mw->mw_offset = dma_pages[0] + pageoff;
235
236 *out = mw;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400237 return mw->mw_nents;
238
239out_dmamap_err:
240 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
241 mw->mw_sg, mw->mw_nents);
Chuck Lever42fe28f2016-06-29 13:53:02 -0400242 rpcrdma_defer_mr_recovery(mw);
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400243 return -EIO;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400244
245out_maperr:
Chuck Leverfcdfb962016-06-29 13:52:45 -0400246 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
247 len, (unsigned long long)dma_pages[0],
248 pageoff, mw->mw_nents, rc);
Chuck Lever505bbe62016-06-29 13:52:54 -0400249 rpcrdma_defer_mr_recovery(mw);
Chuck Lever7a89f9c2016-06-29 13:53:43 -0400250 return -EIO;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400251}
252
Chuck Lever7c7a5392015-12-16 17:22:55 -0500253/* Invalidate all memory regions that were registered for "req".
254 *
255 * Sleeps until it is safe for the host CPU to access the
256 * previously mapped memory regions.
Chuck Lever9d6b0402016-06-29 13:54:16 -0400257 *
258 * Caller ensures that req->rl_registered is not empty.
Chuck Lever7c7a5392015-12-16 17:22:55 -0500259 */
260static void
261fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
262{
Chuck Lever9d6b0402016-06-29 13:54:16 -0400263 struct rpcrdma_mw *mw, *tmp;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500264 LIST_HEAD(unmap_list);
265 int rc;
266
267 dprintk("RPC: %s: req %p\n", __func__, req);
268
269 /* ORDER: Invalidate all of the req's MRs first
270 *
271 * ib_unmap_fmr() is slow, so use a single call instead
Chuck Lever505bbe62016-06-29 13:52:54 -0400272 * of one call per mapped FMR.
Chuck Lever7c7a5392015-12-16 17:22:55 -0500273 */
Chuck Lever9d6b0402016-06-29 13:54:16 -0400274 list_for_each_entry(mw, &req->rl_registered, mw_list)
Chuck Lever88975eb2016-06-29 13:52:37 -0400275 list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
Chuck Leverc8b920b2016-09-15 10:57:16 -0400276 r_xprt->rx_stats.local_inv_needed++;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500277 rc = ib_unmap_fmr(&unmap_list);
278 if (rc)
Chuck Lever505bbe62016-06-29 13:52:54 -0400279 goto out_reset;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500280
281 /* ORDER: Now DMA unmap all of the req's MRs, and return
282 * them to the free MW list.
283 */
Chuck Lever9d6b0402016-06-29 13:54:16 -0400284 list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
285 list_del_init(&mw->mw_list);
Chuck Lever88975eb2016-06-29 13:52:37 -0400286 list_del_init(&mw->fmr.fm_mr->list);
Chuck Lever505bbe62016-06-29 13:52:54 -0400287 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
288 mw->mw_sg, mw->mw_nents, mw->mw_dir);
289 rpcrdma_put_mw(r_xprt, mw);
Chuck Lever7c7a5392015-12-16 17:22:55 -0500290 }
291
Chuck Lever505bbe62016-06-29 13:52:54 -0400292 return;
293
294out_reset:
295 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
296
Chuck Lever9d6b0402016-06-29 13:54:16 -0400297 list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
Chuck Lever505bbe62016-06-29 13:52:54 -0400298 list_del_init(&mw->fmr.fm_mr->list);
299 fmr_op_recover_mr(mw);
Chuck Lever505bbe62016-06-29 13:52:54 -0400300 }
Chuck Lever7c7a5392015-12-16 17:22:55 -0500301}
302
Chuck Leveread3f262016-05-02 14:42:46 -0400303/* Use a slow, safe mechanism to invalidate all memory regions
304 * that were registered for "req".
Chuck Leveread3f262016-05-02 14:42:46 -0400305 */
306static void
307fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
308 bool sync)
309{
Chuck Leveread3f262016-05-02 14:42:46 -0400310 struct rpcrdma_mw *mw;
Chuck Leveread3f262016-05-02 14:42:46 -0400311
Chuck Lever9d6b0402016-06-29 13:54:16 -0400312 while (!list_empty(&req->rl_registered)) {
Chuck Lever9a5c63e2017-02-08 17:00:43 -0500313 mw = rpcrdma_pop_mw(&req->rl_registered);
Chuck Leverfcdfb962016-06-29 13:52:45 -0400314 if (sync)
Chuck Lever505bbe62016-06-29 13:52:54 -0400315 fmr_op_recover_mr(mw);
Chuck Leverfcdfb962016-06-29 13:52:45 -0400316 else
Chuck Lever505bbe62016-06-29 13:52:54 -0400317 rpcrdma_defer_mr_recovery(mw);
Chuck Leveread3f262016-05-02 14:42:46 -0400318 }
319}
320
Chuck Levera0ce85f2015-03-30 14:34:21 -0400321const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400322 .ro_map = fmr_op_map,
Chuck Lever7c7a5392015-12-16 17:22:55 -0500323 .ro_unmap_sync = fmr_op_unmap_sync,
Chuck Leveread3f262016-05-02 14:42:46 -0400324 .ro_unmap_safe = fmr_op_unmap_safe,
Chuck Lever505bbe62016-06-29 13:52:54 -0400325 .ro_recover_mr = fmr_op_recover_mr,
Chuck Lever3968cb52015-03-30 14:35:26 -0400326 .ro_open = fmr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400327 .ro_maxpages = fmr_op_maxpages,
Chuck Levere2ac2362016-06-29 13:54:00 -0400328 .ro_init_mr = fmr_op_init_mr,
329 .ro_release_mr = fmr_op_release_mr,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400330 .ro_displayname = "fmr",
Chuck Leverc8b920b2016-09-15 10:57:16 -0400331 .ro_send_w_inv_ok = 0,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400332};