blob: 5cc68a824f451b16d66a88c7cb43f2d16628ea38 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Chuck Levera0ce85f2015-03-30 14:34:21 -04002/*
Chuck Lever96cedde2017-12-14 20:57:55 -05003 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
Chuck Levera0ce85f2015-03-30 14:34:21 -04004 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 */
6
7/* Lightweight memory registration using Fast Memory Regions (FMR).
8 * Referred to sometimes as MTHCAFMR mode.
9 *
10 * FMR uses synchronous memory registration and deregistration.
11 * FMR registration is known to be fast, but FMR deregistration
12 * can take tens of usecs to complete.
13 */
14
Chuck Leverfc7fbb52015-05-26 11:52:16 -040015/* Normal operation
16 *
17 * A Memory Region is prepared for RDMA READ or WRITE using the
18 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
19 * finished, the Memory Region is unmapped using the ib_unmap_fmr
20 * verb (fmr_op_unmap).
21 */
22
Chuck Levera0ce85f2015-03-30 14:34:21 -040023#include "xprt_rdma.h"
24
25#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
26# define RPCDBG_FACILITY RPCDBG_TRANS
27#endif
28
Chuck Lever1c9351e2015-03-30 14:34:30 -040029/* Maximum scatter/gather per FMR */
30#define RPCRDMA_MAX_FMR_SGES (64)
31
Chuck Leverd48b1d22016-06-29 13:52:29 -040032/* Access mode of externally registered pages */
33enum {
34 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
35 IB_ACCESS_REMOTE_READ,
36};
37
Chuck Leverb54054c2016-06-29 13:53:27 -040038bool
39fmr_is_supported(struct rpcrdma_ia *ia)
40{
41 if (!ia->ri_device->alloc_fmr) {
42 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
43 ia->ri_device->name);
44 return false;
45 }
46 return true;
47}
48
Chuck Leveread3f262016-05-02 14:42:46 -040049static int
Chuck Lever96cedde2017-12-14 20:57:55 -050050fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -040051{
52 static struct ib_fmr_attr fmr_attr = {
53 .max_pages = RPCRDMA_MAX_FMR_SGES,
54 .max_maps = 1,
55 .page_shift = PAGE_SHIFT
56 };
57
Chuck Lever96cedde2017-12-14 20:57:55 -050058 mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
Chuck Lever88975eb2016-06-29 13:52:37 -040059 sizeof(u64), GFP_KERNEL);
Chuck Lever96cedde2017-12-14 20:57:55 -050060 if (!mr->fmr.fm_physaddrs)
Chuck Leverd48b1d22016-06-29 13:52:29 -040061 goto out_free;
62
Chuck Lever96cedde2017-12-14 20:57:55 -050063 mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
64 sizeof(*mr->mr_sg), GFP_KERNEL);
65 if (!mr->mr_sg)
Chuck Leverd48b1d22016-06-29 13:52:29 -040066 goto out_free;
67
Chuck Lever96cedde2017-12-14 20:57:55 -050068 sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
Chuck Leverd48b1d22016-06-29 13:52:29 -040069
Chuck Lever96cedde2017-12-14 20:57:55 -050070 mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
Chuck Lever88975eb2016-06-29 13:52:37 -040071 &fmr_attr);
Chuck Lever96cedde2017-12-14 20:57:55 -050072 if (IS_ERR(mr->fmr.fm_mr))
Chuck Leverd48b1d22016-06-29 13:52:29 -040073 goto out_fmr_err;
74
75 return 0;
76
77out_fmr_err:
78 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
Chuck Lever96cedde2017-12-14 20:57:55 -050079 PTR_ERR(mr->fmr.fm_mr));
Chuck Leverd48b1d22016-06-29 13:52:29 -040080
81out_free:
Chuck Lever96cedde2017-12-14 20:57:55 -050082 kfree(mr->mr_sg);
83 kfree(mr->fmr.fm_physaddrs);
Chuck Leverd48b1d22016-06-29 13:52:29 -040084 return -ENOMEM;
85}
86
87static int
Chuck Lever96cedde2017-12-14 20:57:55 -050088__fmr_unmap(struct rpcrdma_mr *mr)
Chuck Leveread3f262016-05-02 14:42:46 -040089{
90 LIST_HEAD(l);
Chuck Lever38f1932e2016-06-29 13:52:12 -040091 int rc;
Chuck Leveread3f262016-05-02 14:42:46 -040092
Chuck Lever96cedde2017-12-14 20:57:55 -050093 list_add(&mr->fmr.fm_mr->list, &l);
Chuck Lever38f1932e2016-06-29 13:52:12 -040094 rc = ib_unmap_fmr(&l);
Chuck Lever96cedde2017-12-14 20:57:55 -050095 list_del(&mr->fmr.fm_mr->list);
Chuck Lever38f1932e2016-06-29 13:52:12 -040096 return rc;
Chuck Leveread3f262016-05-02 14:42:46 -040097}
98
Chuck Leverd48b1d22016-06-29 13:52:29 -040099static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500100fmr_op_release_mr(struct rpcrdma_mr *mr)
Chuck Leverd48b1d22016-06-29 13:52:29 -0400101{
Chuck Lever505bbe62016-06-29 13:52:54 -0400102 LIST_HEAD(unmap_list);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400103 int rc;
104
Chuck Lever9d6b0402016-06-29 13:54:16 -0400105 /* Ensure MW is not on any rl_registered list */
Chuck Lever96cedde2017-12-14 20:57:55 -0500106 if (!list_empty(&mr->mr_list))
107 list_del(&mr->mr_list);
Chuck Lever9d6b0402016-06-29 13:54:16 -0400108
Chuck Lever96cedde2017-12-14 20:57:55 -0500109 kfree(mr->fmr.fm_physaddrs);
110 kfree(mr->mr_sg);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400111
Chuck Lever505bbe62016-06-29 13:52:54 -0400112 /* In case this one was left mapped, try to unmap it
113 * to prevent dealloc_fmr from failing with EBUSY
114 */
Chuck Lever96cedde2017-12-14 20:57:55 -0500115 rc = __fmr_unmap(mr);
Chuck Lever505bbe62016-06-29 13:52:54 -0400116 if (rc)
117 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500118 mr, rc);
Chuck Lever505bbe62016-06-29 13:52:54 -0400119
Chuck Lever96cedde2017-12-14 20:57:55 -0500120 rc = ib_dealloc_fmr(mr->fmr.fm_mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400121 if (rc)
122 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500123 mr, rc);
Chuck Levere2ac2362016-06-29 13:54:00 -0400124
Chuck Lever96cedde2017-12-14 20:57:55 -0500125 kfree(mr);
Chuck Leverd48b1d22016-06-29 13:52:29 -0400126}
127
Chuck Lever505bbe62016-06-29 13:52:54 -0400128/* Reset of a single FMR.
Chuck Leveread3f262016-05-02 14:42:46 -0400129 */
130static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500131fmr_op_recover_mr(struct rpcrdma_mr *mr)
Chuck Leveread3f262016-05-02 14:42:46 -0400132{
Chuck Lever96cedde2017-12-14 20:57:55 -0500133 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
Chuck Lever505bbe62016-06-29 13:52:54 -0400134 int rc;
Chuck Leveread3f262016-05-02 14:42:46 -0400135
Chuck Lever505bbe62016-06-29 13:52:54 -0400136 /* ORDER: invalidate first */
Chuck Lever96cedde2017-12-14 20:57:55 -0500137 rc = __fmr_unmap(mr);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400138 if (rc)
139 goto out_release;
Chuck Lever505bbe62016-06-29 13:52:54 -0400140
Chuck Leverec12e472017-12-14 20:58:04 -0500141 /* ORDER: then DMA unmap */
142 rpcrdma_mr_unmap_and_put(mr);
143
Chuck Lever505bbe62016-06-29 13:52:54 -0400144 r_xprt->rx_stats.mrs_recovered++;
Chuck Lever2ffc8712016-06-29 13:54:08 -0400145 return;
146
147out_release:
Chuck Lever96cedde2017-12-14 20:57:55 -0500148 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400149 r_xprt->rx_stats.mrs_orphaned++;
150
Chuck Lever2937fed2017-12-20 16:31:12 -0500151 trace_xprtrdma_dma_unmap(mr);
Chuck Leverec12e472017-12-14 20:58:04 -0500152 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
153 mr->mr_sg, mr->mr_nents, mr->mr_dir);
154
Chuck Lever96cedde2017-12-14 20:57:55 -0500155 spin_lock(&r_xprt->rx_buf.rb_mrlock);
156 list_del(&mr->mr_all);
157 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
Chuck Lever2ffc8712016-06-29 13:54:08 -0400158
Chuck Lever96cedde2017-12-14 20:57:55 -0500159 fmr_op_release_mr(mr);
Chuck Leveread3f262016-05-02 14:42:46 -0400160}
161
Chuck Lever3968cb52015-03-30 14:35:26 -0400162static int
163fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
164 struct rpcrdma_create_data_internal *cdata)
165{
Chuck Lever87cfb9a2016-09-15 10:57:07 -0400166 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
167 RPCRDMA_MAX_FMR_SGES);
Chuck Lever3968cb52015-03-30 14:35:26 -0400168 return 0;
169}
170
Chuck Lever1c9351e2015-03-30 14:34:30 -0400171/* FMR mode conveys up to 64 pages of payload per chunk segment.
172 */
173static size_t
174fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
175{
176 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
Chuck Lever94931742016-05-02 14:40:56 -0400177 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
Chuck Lever1c9351e2015-03-30 14:34:30 -0400178}
179
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400180/* Use the ib_map_phys_fmr() verb to register a memory region
181 * for remote access via RDMA READ or RDMA WRITE.
182 */
Chuck Lever6748b0c2017-08-14 15:38:30 -0400183static struct rpcrdma_mr_seg *
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400184fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
Chuck Lever96cedde2017-12-14 20:57:55 -0500185 int nsegs, bool writing, struct rpcrdma_mr **out)
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400186{
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400187 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400188 int len, pageoff, i, rc;
Chuck Lever96cedde2017-12-14 20:57:55 -0500189 struct rpcrdma_mr *mr;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400190 u64 *dma_pages;
Chuck Leverfc7fbb52015-05-26 11:52:16 -0400191
Chuck Lever96cedde2017-12-14 20:57:55 -0500192 mr = rpcrdma_mr_get(r_xprt);
193 if (!mr)
Chuck Lever9e679d52018-02-28 15:30:44 -0500194 return ERR_PTR(-EAGAIN);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400195
196 pageoff = offset_in_page(seg1->mr_offset);
197 seg1->mr_offset -= pageoff; /* start of page */
198 seg1->mr_len += pageoff;
199 len = -pageoff;
200 if (nsegs > RPCRDMA_MAX_FMR_SGES)
201 nsegs = RPCRDMA_MAX_FMR_SGES;
202 for (i = 0; i < nsegs;) {
Chuck Leverfcdfb962016-06-29 13:52:45 -0400203 if (seg->mr_page)
Chuck Lever96cedde2017-12-14 20:57:55 -0500204 sg_set_page(&mr->mr_sg[i],
Chuck Leverfcdfb962016-06-29 13:52:45 -0400205 seg->mr_page,
206 seg->mr_len,
207 offset_in_page(seg->mr_offset));
208 else
Chuck Lever96cedde2017-12-14 20:57:55 -0500209 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
Chuck Leverfcdfb962016-06-29 13:52:45 -0400210 seg->mr_len);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400211 len += seg->mr_len;
212 ++seg;
213 ++i;
214 /* Check for holes */
215 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
216 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
217 break;
218 }
Chuck Lever96cedde2017-12-14 20:57:55 -0500219 mr->mr_dir = rpcrdma_data_dir(writing);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400220
Chuck Lever96cedde2017-12-14 20:57:55 -0500221 mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
222 mr->mr_sg, i, mr->mr_dir);
223 if (!mr->mr_nents)
Chuck Leverfcdfb962016-06-29 13:52:45 -0400224 goto out_dmamap_err;
225
Chuck Lever96cedde2017-12-14 20:57:55 -0500226 for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
227 dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
228 rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
Chuck Leverfcdfb962016-06-29 13:52:45 -0400229 dma_pages[0]);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400230 if (rc)
231 goto out_maperr;
232
Chuck Lever96cedde2017-12-14 20:57:55 -0500233 mr->mr_handle = mr->fmr.fm_mr->rkey;
234 mr->mr_length = len;
235 mr->mr_offset = dma_pages[0] + pageoff;
Chuck Lever9d6b0402016-06-29 13:54:16 -0400236
Chuck Lever96cedde2017-12-14 20:57:55 -0500237 *out = mr;
Chuck Lever6748b0c2017-08-14 15:38:30 -0400238 return seg;
Chuck Leverfcdfb962016-06-29 13:52:45 -0400239
240out_dmamap_err:
Chuck Lever1f541892017-06-08 11:52:36 -0400241 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500242 mr->mr_sg, i);
243 rpcrdma_mr_put(mr);
Chuck Lever6748b0c2017-08-14 15:38:30 -0400244 return ERR_PTR(-EIO);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400245
246out_maperr:
Chuck Leverfcdfb962016-06-29 13:52:45 -0400247 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
248 len, (unsigned long long)dma_pages[0],
Chuck Lever96cedde2017-12-14 20:57:55 -0500249 pageoff, mr->mr_nents, rc);
Chuck Leverec12e472017-12-14 20:58:04 -0500250 rpcrdma_mr_unmap_and_put(mr);
Chuck Lever6748b0c2017-08-14 15:38:30 -0400251 return ERR_PTR(-EIO);
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400252}
253
Chuck Leverf2877622018-02-28 15:30:59 -0500254/* Post Send WR containing the RPC Call message.
255 */
256static int
257fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
258{
259 struct ib_send_wr *bad_wr;
260
261 return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
262}
263
Chuck Lever7c7a5392015-12-16 17:22:55 -0500264/* Invalidate all memory regions that were registered for "req".
265 *
266 * Sleeps until it is safe for the host CPU to access the
267 * previously mapped memory regions.
Chuck Lever9d6b0402016-06-29 13:54:16 -0400268 *
Chuck Lever96cedde2017-12-14 20:57:55 -0500269 * Caller ensures that @mrs is not empty before the call. This
Chuck Lever451d26e2017-06-08 11:52:04 -0400270 * function empties the list.
Chuck Lever7c7a5392015-12-16 17:22:55 -0500271 */
272static void
Chuck Lever96cedde2017-12-14 20:57:55 -0500273fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
Chuck Lever7c7a5392015-12-16 17:22:55 -0500274{
Chuck Lever96cedde2017-12-14 20:57:55 -0500275 struct rpcrdma_mr *mr;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500276 LIST_HEAD(unmap_list);
277 int rc;
278
Chuck Lever7c7a5392015-12-16 17:22:55 -0500279 /* ORDER: Invalidate all of the req's MRs first
280 *
281 * ib_unmap_fmr() is slow, so use a single call instead
Chuck Lever505bbe62016-06-29 13:52:54 -0400282 * of one call per mapped FMR.
Chuck Lever7c7a5392015-12-16 17:22:55 -0500283 */
Chuck Lever96cedde2017-12-14 20:57:55 -0500284 list_for_each_entry(mr, mrs, mr_list) {
Chuck Lever451d26e2017-06-08 11:52:04 -0400285 dprintk("RPC: %s: unmapping fmr %p\n",
Chuck Lever96cedde2017-12-14 20:57:55 -0500286 __func__, &mr->fmr);
Chuck Lever2937fed2017-12-20 16:31:12 -0500287 trace_xprtrdma_localinv(mr);
Chuck Lever96cedde2017-12-14 20:57:55 -0500288 list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
Chuck Lever451d26e2017-06-08 11:52:04 -0400289 }
Chuck Leverc8b920b2016-09-15 10:57:16 -0400290 r_xprt->rx_stats.local_inv_needed++;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500291 rc = ib_unmap_fmr(&unmap_list);
292 if (rc)
Chuck Lever505bbe62016-06-29 13:52:54 -0400293 goto out_reset;
Chuck Lever7c7a5392015-12-16 17:22:55 -0500294
295 /* ORDER: Now DMA unmap all of the req's MRs, and return
296 * them to the free MW list.
297 */
Chuck Lever96cedde2017-12-14 20:57:55 -0500298 while (!list_empty(mrs)) {
299 mr = rpcrdma_mr_pop(mrs);
Chuck Lever96cedde2017-12-14 20:57:55 -0500300 list_del(&mr->fmr.fm_mr->list);
Chuck Leverec12e472017-12-14 20:58:04 -0500301 rpcrdma_mr_unmap_and_put(mr);
Chuck Lever7c7a5392015-12-16 17:22:55 -0500302 }
303
Chuck Lever505bbe62016-06-29 13:52:54 -0400304 return;
305
306out_reset:
307 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
308
Chuck Lever96cedde2017-12-14 20:57:55 -0500309 while (!list_empty(mrs)) {
310 mr = rpcrdma_mr_pop(mrs);
311 list_del(&mr->fmr.fm_mr->list);
312 fmr_op_recover_mr(mr);
Chuck Lever505bbe62016-06-29 13:52:54 -0400313 }
Chuck Lever7c7a5392015-12-16 17:22:55 -0500314}
315
Chuck Levera0ce85f2015-03-30 14:34:21 -0400316const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400317 .ro_map = fmr_op_map,
Chuck Leverf2877622018-02-28 15:30:59 -0500318 .ro_send = fmr_op_send,
Chuck Lever7c7a5392015-12-16 17:22:55 -0500319 .ro_unmap_sync = fmr_op_unmap_sync,
Chuck Lever505bbe62016-06-29 13:52:54 -0400320 .ro_recover_mr = fmr_op_recover_mr,
Chuck Lever3968cb52015-03-30 14:35:26 -0400321 .ro_open = fmr_op_open,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400322 .ro_maxpages = fmr_op_maxpages,
Chuck Levere2ac2362016-06-29 13:54:00 -0400323 .ro_init_mr = fmr_op_init_mr,
324 .ro_release_mr = fmr_op_release_mr,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400325 .ro_displayname = "fmr",
Chuck Leverc8b920b2016-09-15 10:57:16 -0400326 .ro_send_w_inv_ok = 0,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400327};