blob: 23e4d99a20972d6e73fb5630bf7bc15613fba0ed [file] [log] [blame]
Chuck Levera0ce85f2015-03-30 14:34:21 -04001/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
8 *
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
12 */
13
14#include "xprt_rdma.h"
15
16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif
19
Chuck Lever1c9351e2015-03-30 14:34:30 -040020/* FRWR mode conveys a list of pages per chunk segment. The
21 * maximum length of that list is the FRWR page list depth.
22 */
23static size_t
24frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
25{
26 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
27
28 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
29 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
30}
31
Chuck Lever9c1b4d72015-03-30 14:34:39 -040032/* Post a FAST_REG Work Request to register a memory region
33 * for remote access via RDMA READ or RDMA WRITE.
34 */
35static int
36frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
37 int nsegs, bool writing)
38{
39 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
40 struct rpcrdma_mr_seg *seg1 = seg;
41 struct rpcrdma_mw *mw = seg1->rl_mw;
42 struct rpcrdma_frmr *frmr = &mw->r.frmr;
43 struct ib_mr *mr = frmr->fr_mr;
44 struct ib_send_wr fastreg_wr, *bad_wr;
45 u8 key;
46 int len, pageoff;
47 int i, rc;
48 int seg_len;
49 u64 pa;
50 int page_no;
51
52 pageoff = offset_in_page(seg1->mr_offset);
53 seg1->mr_offset -= pageoff; /* start of page */
54 seg1->mr_len += pageoff;
55 len = -pageoff;
56 if (nsegs > ia->ri_max_frmr_depth)
57 nsegs = ia->ri_max_frmr_depth;
58 for (page_no = i = 0; i < nsegs;) {
59 rpcrdma_map_one(ia, seg, writing);
60 pa = seg->mr_dma;
61 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
62 frmr->fr_pgl->page_list[page_no++] = pa;
63 pa += PAGE_SIZE;
64 }
65 len += seg->mr_len;
66 ++seg;
67 ++i;
68 /* Check for holes */
69 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
70 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
71 break;
72 }
73 dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n",
74 __func__, mw, i, len);
75
76 frmr->fr_state = FRMR_IS_VALID;
77
78 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
79 fastreg_wr.wr_id = (unsigned long)(void *)mw;
80 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
81 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff;
82 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
83 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
84 fastreg_wr.wr.fast_reg.page_list_len = page_no;
85 fastreg_wr.wr.fast_reg.length = len;
86 fastreg_wr.wr.fast_reg.access_flags = writing ?
87 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
88 IB_ACCESS_REMOTE_READ;
89 key = (u8)(mr->rkey & 0x000000FF);
90 ib_update_fast_reg_key(mr, ++key);
91 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
92
93 DECR_CQCOUNT(&r_xprt->rx_ep);
94 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
95 if (rc)
96 goto out_senderr;
97
98 seg1->mr_rkey = mr->rkey;
99 seg1->mr_base = seg1->mr_dma + pageoff;
100 seg1->mr_nsegs = i;
101 seg1->mr_len = len;
102 return i;
103
104out_senderr:
105 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
106 ib_update_fast_reg_key(mr, --key);
107 frmr->fr_state = FRMR_IS_INVALID;
108 while (i--)
109 rpcrdma_unmap_one(ia, --seg);
110 return rc;
111}
112
Chuck Levera0ce85f2015-03-30 14:34:21 -0400113const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
Chuck Lever9c1b4d72015-03-30 14:34:39 -0400114 .ro_map = frwr_op_map,
Chuck Lever1c9351e2015-03-30 14:34:30 -0400115 .ro_maxpages = frwr_op_maxpages,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400116 .ro_displayname = "frwr",
117};