blob: b4d4f6300fbcb1ecdf9b182cd88244de5278f3ad [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040038 */
39
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040040/*
41 * verbs.c
42 *
43 * Encapsulates the major functions managing:
44 * o adapters
45 * o endpoints
46 * o connections
47 * o buffer memory
48 */
49
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000050#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090051#include <linux/slab.h>
Chuck Levereba8ff62015-01-21 11:03:02 -050052#include <linux/prefetch.h>
Chuck Lever0dd39ca2015-03-30 14:33:43 -040053#include <linux/sunrpc/addr.h>
Chuck Lever65866f82014-05-28 10:33:59 -040054#include <asm/bitops.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040055
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040056#include "xprt_rdma.h"
57
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040058/*
59 * Globals/Macros
60 */
61
Jeff Laytonf895b252014-11-17 16:58:04 -050062#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040063# define RPCDBG_FACILITY RPCDBG_TRANS
64#endif
65
66/*
67 * internal functions
68 */
69
70/*
71 * handle replies in tasklet context, using a single, global list
72 * rdma tasklet function -- just turn around and call the func
73 * for all replies on the list
74 */
75
76static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
77static LIST_HEAD(rpcrdma_tasklets_g);
78
79static void
80rpcrdma_run_tasklet(unsigned long data)
81{
82 struct rpcrdma_rep *rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040083 unsigned long flags;
84
85 data = data;
86 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
87 while (!list_empty(&rpcrdma_tasklets_g)) {
88 rep = list_entry(rpcrdma_tasklets_g.next,
89 struct rpcrdma_rep, rr_list);
90 list_del(&rep->rr_list);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040091 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
92
Chuck Lever494ae302015-05-26 11:51:46 -040093 rpcrdma_reply_handler(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040094
95 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
96 }
97 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
98}
99
100static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
101
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400102static void
Chuck Leverf1a03b72014-11-08 20:14:37 -0500103rpcrdma_schedule_tasklet(struct list_head *sched_list)
104{
105 unsigned long flags;
106
107 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
108 list_splice_tail(sched_list, &rpcrdma_tasklets_g);
109 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
110 tasklet_schedule(&rpcrdma_tasklet_g);
111}
112
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400113static void
114rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
115{
116 struct rpcrdma_ep *ep = context;
117
Chuck Lever7ff11de2014-11-08 20:15:01 -0500118 pr_err("RPC: %s: %s on device %s ep %p\n",
Sagi Grimberg76357c72015-05-18 13:40:32 +0300119 __func__, ib_event_msg(event->event),
Chuck Lever7ff11de2014-11-08 20:15:01 -0500120 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400121 if (ep->rep_connected == 1) {
122 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500123 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400124 wake_up_all(&ep->rep_connect_wait);
125 }
126}
127
128static void
129rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
130{
131 struct rpcrdma_ep *ep = context;
132
Chuck Lever7ff11de2014-11-08 20:15:01 -0500133 pr_err("RPC: %s: %s on device %s ep %p\n",
Sagi Grimberg76357c72015-05-18 13:40:32 +0300134 __func__, ib_event_msg(event->event),
Chuck Lever7ff11de2014-11-08 20:15:01 -0500135 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400136 if (ep->rep_connected == 1) {
137 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500138 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400139 wake_up_all(&ep->rep_connect_wait);
140 }
141}
142
Chuck Leverfc664482014-05-28 10:33:25 -0400143static void
144rpcrdma_sendcq_process_wc(struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400145{
Chuck Lever85024272015-01-21 11:02:04 -0500146 /* WARNING: Only wr_id and status are reliable at this point */
Chuck Levere46ac342015-03-30 14:35:35 -0400147 if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
148 if (wc->status != IB_WC_SUCCESS &&
149 wc->status != IB_WC_WR_FLUSH_ERR)
Chuck Lever85024272015-01-21 11:02:04 -0500150 pr_err("RPC: %s: SEND: %s\n",
Sagi Grimberg76357c72015-05-18 13:40:32 +0300151 __func__, ib_wc_status_msg(wc->status));
Chuck Lever85024272015-01-21 11:02:04 -0500152 } else {
153 struct rpcrdma_mw *r;
154
155 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
Chuck Levere46ac342015-03-30 14:35:35 -0400156 r->mw_sendcompletion(wc);
Chuck Lever85024272015-01-21 11:02:04 -0500157 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400158}
159
Chuck Leverfc664482014-05-28 10:33:25 -0400160static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400161rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400162{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400163 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400164 int budget, count, rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400165
Chuck Lever8301a2c2014-05-28 10:33:51 -0400166 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400167 do {
168 wcs = ep->rep_send_wcs;
169
170 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
171 if (rc <= 0)
172 return rc;
173
174 count = rc;
175 while (count-- > 0)
176 rpcrdma_sendcq_process_wc(wcs++);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400177 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Lever1c00dd02014-05-28 10:33:42 -0400178 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400179}
180
181/*
Chuck Leverfc664482014-05-28 10:33:25 -0400182 * Handle send, fast_reg_mr, and local_inv completions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400183 *
Chuck Leverfc664482014-05-28 10:33:25 -0400184 * Send events are typically suppressed and thus do not result
185 * in an upcall. Occasionally one is signaled, however. This
186 * prevents the provider's completion queue from wrapping and
187 * losing a completion.
188 */
189static void
190rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
191{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400192 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
Chuck Leverfc664482014-05-28 10:33:25 -0400193 int rc;
194
Chuck Lever1c00dd02014-05-28 10:33:42 -0400195 rc = rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400196 if (rc) {
197 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
198 __func__, rc);
199 return;
200 }
201
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400202 rc = ib_req_notify_cq(cq,
203 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
204 if (rc == 0)
205 return;
206 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400207 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
208 __func__, rc);
209 return;
210 }
211
Chuck Lever1c00dd02014-05-28 10:33:42 -0400212 rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400213}
214
215static void
Chuck Leverbb961932014-07-29 17:25:46 -0400216rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
Chuck Leverfc664482014-05-28 10:33:25 -0400217{
218 struct rpcrdma_rep *rep =
219 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
220
Chuck Lever85024272015-01-21 11:02:04 -0500221 /* WARNING: Only wr_id and status are reliable at this point */
222 if (wc->status != IB_WC_SUCCESS)
223 goto out_fail;
Chuck Leverfc664482014-05-28 10:33:25 -0400224
Chuck Lever85024272015-01-21 11:02:04 -0500225 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Leverfc664482014-05-28 10:33:25 -0400226 if (wc->opcode != IB_WC_RECV)
227 return;
228
Chuck Lever85024272015-01-21 11:02:04 -0500229 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
230 __func__, rep, wc->byte_len);
231
Chuck Leverfc664482014-05-28 10:33:25 -0400232 rep->rr_len = wc->byte_len;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400233 ib_dma_sync_single_for_cpu(rep->rr_device,
Chuck Lever6b1184c2015-01-21 11:04:25 -0500234 rdmab_addr(rep->rr_rdmabuf),
235 rep->rr_len, DMA_FROM_DEVICE);
236 prefetch(rdmab_to_msg(rep->rr_rdmabuf));
Chuck Leverfc664482014-05-28 10:33:25 -0400237
238out_schedule:
Chuck Leverbb961932014-07-29 17:25:46 -0400239 list_add_tail(&rep->rr_list, sched_list);
Chuck Lever85024272015-01-21 11:02:04 -0500240 return;
241out_fail:
242 if (wc->status != IB_WC_WR_FLUSH_ERR)
243 pr_err("RPC: %s: rep %p: %s\n",
Sagi Grimberg76357c72015-05-18 13:40:32 +0300244 __func__, rep, ib_wc_status_msg(wc->status));
Chuck Lever85024272015-01-21 11:02:04 -0500245 rep->rr_len = ~0U;
246 goto out_schedule;
Chuck Leverfc664482014-05-28 10:33:25 -0400247}
248
249static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400250rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
Chuck Leverfc664482014-05-28 10:33:25 -0400251{
Chuck Leverbb961932014-07-29 17:25:46 -0400252 struct list_head sched_list;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400253 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400254 int budget, count, rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400255
Chuck Leverbb961932014-07-29 17:25:46 -0400256 INIT_LIST_HEAD(&sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400257 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400258 do {
259 wcs = ep->rep_recv_wcs;
260
261 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
262 if (rc <= 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400263 goto out_schedule;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400264
265 count = rc;
266 while (count-- > 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400267 rpcrdma_recvcq_process_wc(wcs++, &sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400268 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Leverbb961932014-07-29 17:25:46 -0400269 rc = 0;
270
271out_schedule:
Chuck Leverf1a03b72014-11-08 20:14:37 -0500272 rpcrdma_schedule_tasklet(&sched_list);
Chuck Leverbb961932014-07-29 17:25:46 -0400273 return rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400274}
275
276/*
277 * Handle receive completions.
278 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400279 * It is reentrant but processes single events in order to maintain
280 * ordering of receives to keep server credits.
281 *
282 * It is the responsibility of the scheduled tasklet to return
283 * recv buffers to the pool. NOTE: this affects synchronization of
284 * connection shutdown. That is, the structures required for
285 * the completion of the reply handler must remain intact until
286 * all memory has been reclaimed.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400287 */
288static void
Chuck Leverfc664482014-05-28 10:33:25 -0400289rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400290{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400291 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400292 int rc;
293
Chuck Lever1c00dd02014-05-28 10:33:42 -0400294 rc = rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400295 if (rc) {
Chuck Leverfc664482014-05-28 10:33:25 -0400296 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400297 __func__, rc);
298 return;
299 }
300
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400301 rc = ib_req_notify_cq(cq,
302 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
303 if (rc == 0)
304 return;
305 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400306 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
307 __func__, rc);
308 return;
309 }
310
Chuck Lever1c00dd02014-05-28 10:33:42 -0400311 rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400312}
313
Chuck Levera7bc2112014-07-29 17:23:52 -0400314static void
315rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
316{
Chuck Lever5c166be2014-11-08 20:14:45 -0500317 struct ib_wc wc;
318 LIST_HEAD(sched_list);
319
320 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
321 rpcrdma_recvcq_process_wc(&wc, &sched_list);
322 if (!list_empty(&sched_list))
323 rpcrdma_schedule_tasklet(&sched_list);
324 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
325 rpcrdma_sendcq_process_wc(&wc);
Chuck Levera7bc2112014-07-29 17:23:52 -0400326}
327
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400328static int
329rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
330{
331 struct rpcrdma_xprt *xprt = id->context;
332 struct rpcrdma_ia *ia = &xprt->rx_ia;
333 struct rpcrdma_ep *ep = &xprt->rx_ep;
Jeff Laytonf895b252014-11-17 16:58:04 -0500334#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400335 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
Ingo Molnarff0db042008-11-25 16:58:42 -0800336#endif
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500337 struct ib_qp_attr *attr = &ia->ri_qp_attr;
338 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400339 int connstate = 0;
340
341 switch (event->event) {
342 case RDMA_CM_EVENT_ADDR_RESOLVED:
343 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Tom Talpey5675add2008-10-09 15:01:41 -0400344 ia->ri_async_rc = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400345 complete(&ia->ri_done);
346 break;
347 case RDMA_CM_EVENT_ADDR_ERROR:
348 ia->ri_async_rc = -EHOSTUNREACH;
349 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
350 __func__, ep);
351 complete(&ia->ri_done);
352 break;
353 case RDMA_CM_EVENT_ROUTE_ERROR:
354 ia->ri_async_rc = -ENETUNREACH;
355 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
356 __func__, ep);
357 complete(&ia->ri_done);
358 break;
359 case RDMA_CM_EVENT_ESTABLISHED:
360 connstate = 1;
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500361 ib_query_qp(ia->ri_id->qp, attr,
362 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
363 iattr);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400364 dprintk("RPC: %s: %d responder resources"
365 " (%d initiator)\n",
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500366 __func__, attr->max_dest_rd_atomic,
367 attr->max_rd_atomic);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400368 goto connected;
369 case RDMA_CM_EVENT_CONNECT_ERROR:
370 connstate = -ENOTCONN;
371 goto connected;
372 case RDMA_CM_EVENT_UNREACHABLE:
373 connstate = -ENETDOWN;
374 goto connected;
375 case RDMA_CM_EVENT_REJECTED:
376 connstate = -ECONNREFUSED;
377 goto connected;
378 case RDMA_CM_EVENT_DISCONNECTED:
379 connstate = -ECONNABORTED;
380 goto connected;
381 case RDMA_CM_EVENT_DEVICE_REMOVAL:
382 connstate = -ENODEV;
383connected:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400384 dprintk("RPC: %s: %sconnected\n",
385 __func__, connstate > 0 ? "" : "dis");
386 ep->rep_connected = connstate;
Chuck Leverafadc462015-01-21 11:03:11 -0500387 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400388 wake_up_all(&ep->rep_connect_wait);
Chuck Lever8079fb72014-07-29 17:26:12 -0400389 /*FALLTHROUGH*/
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400390 default:
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400391 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
392 __func__, sap, rpc_get_port(sap), ep,
Sagi Grimberg76357c72015-05-18 13:40:32 +0300393 rdma_event_msg(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400394 break;
395 }
396
Jeff Laytonf895b252014-11-17 16:58:04 -0500397#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400398 if (connstate == 1) {
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500399 int ird = attr->max_dest_rd_atomic;
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400400 int tird = ep->rep_remote_cma.responder_resources;
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400401
Chuck Levera0ce85f2015-03-30 14:34:21 -0400402 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400403 sap, rpc_get_port(sap),
Chuck Lever89e0d1122015-05-26 11:51:56 -0400404 ia->ri_device->name,
Chuck Levera0ce85f2015-03-30 14:34:21 -0400405 ia->ri_ops->ro_displayname,
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400406 xprt->rx_buf.rb_max_requests,
407 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
408 } else if (connstate < 0) {
Chuck Lever0dd39ca2015-03-30 14:33:43 -0400409 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
410 sap, rpc_get_port(sap), connstate);
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400411 }
412#endif
413
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400414 return 0;
415}
416
417static struct rdma_cm_id *
418rpcrdma_create_id(struct rpcrdma_xprt *xprt,
419 struct rpcrdma_ia *ia, struct sockaddr *addr)
420{
421 struct rdma_cm_id *id;
422 int rc;
423
Tom Talpey1a954052008-10-09 15:01:31 -0400424 init_completion(&ia->ri_done);
425
Sean Heftyb26f9b92010-04-01 17:08:41 +0000426 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400427 if (IS_ERR(id)) {
428 rc = PTR_ERR(id);
429 dprintk("RPC: %s: rdma_create_id() failed %i\n",
430 __func__, rc);
431 return id;
432 }
433
Tom Talpey5675add2008-10-09 15:01:41 -0400434 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400435 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
436 if (rc) {
437 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
438 __func__, rc);
439 goto out;
440 }
Tom Talpey5675add2008-10-09 15:01:41 -0400441 wait_for_completion_interruptible_timeout(&ia->ri_done,
442 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400443 rc = ia->ri_async_rc;
444 if (rc)
445 goto out;
446
Tom Talpey5675add2008-10-09 15:01:41 -0400447 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400448 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
449 if (rc) {
450 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
451 __func__, rc);
452 goto out;
453 }
Tom Talpey5675add2008-10-09 15:01:41 -0400454 wait_for_completion_interruptible_timeout(&ia->ri_done,
455 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400456 rc = ia->ri_async_rc;
457 if (rc)
458 goto out;
459
460 return id;
461
462out:
463 rdma_destroy_id(id);
464 return ERR_PTR(rc);
465}
466
467/*
468 * Drain any cq, prior to teardown.
469 */
470static void
471rpcrdma_clean_cq(struct ib_cq *cq)
472{
473 struct ib_wc wc;
474 int count = 0;
475
476 while (1 == ib_poll_cq(cq, 1, &wc))
477 ++count;
478
479 if (count)
480 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
481 __func__, count, wc.opcode);
482}
483
484/*
485 * Exported functions.
486 */
487
488/*
489 * Open and initialize an Interface Adapter.
490 * o initializes fields of struct rpcrdma_ia, including
491 * interface and provider attributes and protection zone.
492 */
493int
494rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
495{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400496 struct rpcrdma_ia *ia = &xprt->rx_ia;
Chuck Lever7bc79722015-01-21 11:03:27 -0500497 struct ib_device_attr *devattr = &ia->ri_devattr;
Chuck Leverd1ed8572015-08-03 13:03:30 -0400498 int rc;
499
500 ia->ri_dma_mr = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400501
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400502 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
503 if (IS_ERR(ia->ri_id)) {
504 rc = PTR_ERR(ia->ri_id);
505 goto out1;
506 }
Chuck Lever89e0d1122015-05-26 11:51:56 -0400507 ia->ri_device = ia->ri_id->device;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400508
Chuck Lever89e0d1122015-05-26 11:51:56 -0400509 ia->ri_pd = ib_alloc_pd(ia->ri_device);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400510 if (IS_ERR(ia->ri_pd)) {
511 rc = PTR_ERR(ia->ri_pd);
512 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
513 __func__, rc);
514 goto out2;
515 }
516
Chuck Lever89e0d1122015-05-26 11:51:56 -0400517 rc = ib_query_device(ia->ri_device, devattr);
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400518 if (rc) {
519 dprintk("RPC: %s: ib_query_device failed %d\n",
520 __func__, rc);
Chuck Lever5ae711a2015-01-21 11:03:19 -0500521 goto out3;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400522 }
523
Chuck Leverf10eafd2014-05-28 10:32:51 -0400524 if (memreg == RPCRDMA_FRMR) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400525 /* Requires both frmr reg and local dma lkey */
Chuck Lever41f97022015-03-30 14:34:12 -0400526 if (((devattr->device_cap_flags &
Tom Talpey3197d3092008-10-09 15:00:20 -0400527 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
Chuck Lever41f97022015-03-30 14:34:12 -0400528 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
529 (devattr->max_fast_reg_page_list_len == 0)) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400530 dprintk("RPC: %s: FRMR registration "
Chuck Leverf10eafd2014-05-28 10:32:51 -0400531 "not supported by HCA\n", __func__);
532 memreg = RPCRDMA_MTHCAFMR;
Tom Talpey3197d3092008-10-09 15:00:20 -0400533 }
Chuck Leverf10eafd2014-05-28 10:32:51 -0400534 }
535 if (memreg == RPCRDMA_MTHCAFMR) {
Chuck Lever89e0d1122015-05-26 11:51:56 -0400536 if (!ia->ri_device->alloc_fmr) {
Chuck Leverf10eafd2014-05-28 10:32:51 -0400537 dprintk("RPC: %s: MTHCAFMR registration "
538 "not supported by HCA\n", __func__);
Chuck Leverd2310932015-08-03 13:03:09 -0400539 goto out3;
Chuck Leverf10eafd2014-05-28 10:32:51 -0400540 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400541 }
542
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400543 switch (memreg) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400544 case RPCRDMA_FRMR:
Chuck Levera0ce85f2015-03-30 14:34:21 -0400545 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400546 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400547 case RPCRDMA_ALLPHYSICAL:
Chuck Levera0ce85f2015-03-30 14:34:21 -0400548 ia->ri_ops = &rpcrdma_physical_memreg_ops;
Chuck Leverd1ed8572015-08-03 13:03:30 -0400549 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400550 case RPCRDMA_MTHCAFMR:
Chuck Levera0ce85f2015-03-30 14:34:21 -0400551 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400552 break;
553 default:
Chuck Levercdd9ade2014-05-28 10:33:00 -0400554 printk(KERN_ERR "RPC: Unsupported memory "
555 "registration mode: %d\n", memreg);
556 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500557 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400558 }
Chuck Levera0ce85f2015-03-30 14:34:21 -0400559 dprintk("RPC: %s: memory registration strategy is '%s'\n",
560 __func__, ia->ri_ops->ro_displayname);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400561
Chuck Lever73806c82014-07-29 17:23:25 -0400562 rwlock_init(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400563 return 0;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500564
565out3:
566 ib_dealloc_pd(ia->ri_pd);
567 ia->ri_pd = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400568out2:
569 rdma_destroy_id(ia->ri_id);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400570 ia->ri_id = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400571out1:
572 return rc;
573}
574
575/*
576 * Clean up/close an IA.
577 * o if event handles and PD have been initialized, free them.
578 * o close the IA
579 */
580void
581rpcrdma_ia_close(struct rpcrdma_ia *ia)
582{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400583 dprintk("RPC: %s: entering\n", __func__);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400584 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
585 if (ia->ri_id->qp)
586 rdma_destroy_qp(ia->ri_id);
587 rdma_destroy_id(ia->ri_id);
588 ia->ri_id = NULL;
589 }
Chuck Lever6d446982015-05-26 11:51:27 -0400590
591 /* If the pd is still busy, xprtrdma missed freeing a resource */
592 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
593 WARN_ON(ib_dealloc_pd(ia->ri_pd));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400594}
595
596/*
597 * Create unconnected endpoint.
598 */
599int
600rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
601 struct rpcrdma_create_data_internal *cdata)
602{
Chuck Lever7bc79722015-01-21 11:03:27 -0500603 struct ib_device_attr *devattr = &ia->ri_devattr;
Chuck Leverfc664482014-05-28 10:33:25 -0400604 struct ib_cq *sendcq, *recvcq;
Matan Barak8e372102015-06-11 16:35:21 +0300605 struct ib_cq_init_attr cq_attr = {};
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400606 int rc, err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400607
Chuck Leverb3221d62015-08-03 13:03:39 -0400608 if (devattr->max_sge < RPCRDMA_MAX_IOVS) {
609 dprintk("RPC: %s: insufficient sge's available\n",
610 __func__);
611 return -ENOMEM;
612 }
613
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400614 /* check provider's send/recv wr limits */
Chuck Lever7bc79722015-01-21 11:03:27 -0500615 if (cdata->max_requests > devattr->max_qp_wr)
616 cdata->max_requests = devattr->max_qp_wr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400617
618 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
619 ep->rep_attr.qp_context = ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400620 ep->rep_attr.srq = NULL;
621 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
Chuck Lever3968cb52015-03-30 14:35:26 -0400622 rc = ia->ri_ops->ro_open(ia, ep, cdata);
623 if (rc)
624 return rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400625 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
Chuck Leverb3221d62015-08-03 13:03:39 -0400626 ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400627 ep->rep_attr.cap.max_recv_sge = 1;
628 ep->rep_attr.cap.max_inline_data = 0;
629 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
630 ep->rep_attr.qp_type = IB_QPT_RC;
631 ep->rep_attr.port_num = ~0;
632
633 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
634 "iovs: send %d recv %d\n",
635 __func__,
636 ep->rep_attr.cap.max_send_wr,
637 ep->rep_attr.cap.max_recv_wr,
638 ep->rep_attr.cap.max_send_sge,
639 ep->rep_attr.cap.max_recv_sge);
640
641 /* set trigger for requesting send completion */
Chuck Leverfc664482014-05-28 10:33:25 -0400642 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
Chuck Levere7104a22014-11-08 20:14:20 -0500643 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
644 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
645 else if (ep->rep_cqinit <= 2)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400646 ep->rep_cqinit = 0;
647 INIT_CQCOUNT(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400648 init_waitqueue_head(&ep->rep_connect_wait);
Chuck Lever254f91e2014-05-28 10:32:17 -0400649 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400650
Matan Barak8e372102015-06-11 16:35:21 +0300651 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400652 sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
Linus Torvalds8688d952015-07-02 11:32:23 -0700653 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
Chuck Leverfc664482014-05-28 10:33:25 -0400654 if (IS_ERR(sendcq)) {
655 rc = PTR_ERR(sendcq);
656 dprintk("RPC: %s: failed to create send CQ: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400657 __func__, rc);
658 goto out1;
659 }
660
Chuck Leverfc664482014-05-28 10:33:25 -0400661 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400662 if (rc) {
663 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
664 __func__, rc);
665 goto out2;
666 }
667
Matan Barak8e372102015-06-11 16:35:21 +0300668 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
Chuck Lever89e0d1122015-05-26 11:51:56 -0400669 recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
Linus Torvalds8688d952015-07-02 11:32:23 -0700670 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
Chuck Leverfc664482014-05-28 10:33:25 -0400671 if (IS_ERR(recvcq)) {
672 rc = PTR_ERR(recvcq);
673 dprintk("RPC: %s: failed to create recv CQ: %i\n",
674 __func__, rc);
675 goto out2;
676 }
677
678 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
679 if (rc) {
680 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
681 __func__, rc);
682 ib_destroy_cq(recvcq);
683 goto out2;
684 }
685
686 ep->rep_attr.send_cq = sendcq;
687 ep->rep_attr.recv_cq = recvcq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400688
689 /* Initialize cma parameters */
690
691 /* RPC/RDMA does not use private data */
692 ep->rep_remote_cma.private_data = NULL;
693 ep->rep_remote_cma.private_data_len = 0;
694
695 /* Client offers RDMA Read but does not initiate */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400696 ep->rep_remote_cma.initiator_depth = 0;
Chuck Lever7bc79722015-01-21 11:03:27 -0500697 if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
Tom Tuckerb334eaa2008-10-09 15:00:30 -0400698 ep->rep_remote_cma.responder_resources = 32;
699 else
Chuck Lever7bc79722015-01-21 11:03:27 -0500700 ep->rep_remote_cma.responder_resources =
701 devattr->max_qp_rd_atom;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400702
703 ep->rep_remote_cma.retry_count = 7;
704 ep->rep_remote_cma.flow_control = 0;
705 ep->rep_remote_cma.rnr_retry_count = 0;
706
707 return 0;
708
709out2:
Chuck Leverfc664482014-05-28 10:33:25 -0400710 err = ib_destroy_cq(sendcq);
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400711 if (err)
712 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
713 __func__, err);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400714out1:
Chuck Leverd1ed8572015-08-03 13:03:30 -0400715 if (ia->ri_dma_mr)
716 ib_dereg_mr(ia->ri_dma_mr);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400717 return rc;
718}
719
720/*
721 * rpcrdma_ep_destroy
722 *
723 * Disconnect and destroy endpoint. After this, the only
724 * valid operations on the ep are to free it (if dynamically
725 * allocated) or re-create it.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400726 */
Chuck Lever7f1d5412014-05-28 10:33:16 -0400727void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400728rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
729{
730 int rc;
731
732 dprintk("RPC: %s: entering, connected is %d\n",
733 __func__, ep->rep_connected);
734
Chuck Lever254f91e2014-05-28 10:32:17 -0400735 cancel_delayed_work_sync(&ep->rep_connect_worker);
736
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400737 if (ia->ri_id->qp) {
Chuck Lever282191c2014-07-29 17:25:55 -0400738 rpcrdma_ep_disconnect(ep, ia);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400739 rdma_destroy_qp(ia->ri_id);
740 ia->ri_id->qp = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400741 }
742
Chuck Leverfc664482014-05-28 10:33:25 -0400743 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
744 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
745 if (rc)
746 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
747 __func__, rc);
748
749 rpcrdma_clean_cq(ep->rep_attr.send_cq);
750 rc = ib_destroy_cq(ep->rep_attr.send_cq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400751 if (rc)
752 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
753 __func__, rc);
Chuck Leverd1ed8572015-08-03 13:03:30 -0400754
755 if (ia->ri_dma_mr) {
756 rc = ib_dereg_mr(ia->ri_dma_mr);
757 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
758 __func__, rc);
759 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400760}
761
762/*
763 * Connect unconnected endpoint.
764 */
765int
766rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
767{
Chuck Lever73806c82014-07-29 17:23:25 -0400768 struct rdma_cm_id *id, *old;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400769 int rc = 0;
770 int retry_count = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400771
Tom Talpeyc0555512008-10-10 11:32:45 -0400772 if (ep->rep_connected != 0) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400773 struct rpcrdma_xprt *xprt;
774retry:
Chuck Leverec62f402014-05-28 10:34:07 -0400775 dprintk("RPC: %s: reconnecting...\n", __func__);
Chuck Lever282191c2014-07-29 17:25:55 -0400776
777 rpcrdma_ep_disconnect(ep, ia);
Chuck Levera7bc2112014-07-29 17:23:52 -0400778 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400779
780 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
781 id = rpcrdma_create_id(xprt, ia,
782 (struct sockaddr *)&xprt->rx_data.addr);
783 if (IS_ERR(id)) {
Chuck Leverec62f402014-05-28 10:34:07 -0400784 rc = -EHOSTUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400785 goto out;
786 }
787 /* TEMP TEMP TEMP - fail if new device:
788 * Deregister/remarshal *all* requests!
789 * Close and recreate adapter, pd, etc!
790 * Re-determine all attributes still sane!
791 * More stuff I haven't thought of!
792 * Rrrgh!
793 */
Chuck Lever89e0d1122015-05-26 11:51:56 -0400794 if (ia->ri_device != id->device) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400795 printk("RPC: %s: can't reconnect on "
796 "different device!\n", __func__);
797 rdma_destroy_id(id);
Chuck Leverec62f402014-05-28 10:34:07 -0400798 rc = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400799 goto out;
800 }
801 /* END TEMP */
Chuck Leverec62f402014-05-28 10:34:07 -0400802 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
803 if (rc) {
804 dprintk("RPC: %s: rdma_create_qp failed %i\n",
805 __func__, rc);
806 rdma_destroy_id(id);
807 rc = -ENETUNREACH;
808 goto out;
809 }
Chuck Lever73806c82014-07-29 17:23:25 -0400810
811 write_lock(&ia->ri_qplock);
812 old = ia->ri_id;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400813 ia->ri_id = id;
Chuck Lever73806c82014-07-29 17:23:25 -0400814 write_unlock(&ia->ri_qplock);
815
816 rdma_destroy_qp(old);
817 rdma_destroy_id(old);
Chuck Leverec62f402014-05-28 10:34:07 -0400818 } else {
819 dprintk("RPC: %s: connecting...\n", __func__);
820 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
821 if (rc) {
822 dprintk("RPC: %s: rdma_create_qp failed %i\n",
823 __func__, rc);
824 /* do not update ep->rep_connected */
825 return -ENETUNREACH;
826 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400827 }
828
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400829 ep->rep_connected = 0;
830
831 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
832 if (rc) {
833 dprintk("RPC: %s: rdma_connect() failed with %i\n",
834 __func__, rc);
835 goto out;
836 }
837
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400838 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
839
840 /*
841 * Check state. A non-peer reject indicates no listener
842 * (ECONNREFUSED), which may be a transient state. All
843 * others indicate a transport condition which has already
844 * undergone a best-effort.
845 */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800846 if (ep->rep_connected == -ECONNREFUSED &&
847 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400848 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
849 goto retry;
850 }
851 if (ep->rep_connected <= 0) {
852 /* Sometimes, the only way to reliably connect to remote
853 * CMs is to use same nonzero values for ORD and IRD. */
Tom Tuckerb334eaa2008-10-09 15:00:30 -0400854 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
855 (ep->rep_remote_cma.responder_resources == 0 ||
856 ep->rep_remote_cma.initiator_depth !=
857 ep->rep_remote_cma.responder_resources)) {
858 if (ep->rep_remote_cma.responder_resources == 0)
859 ep->rep_remote_cma.responder_resources = 1;
860 ep->rep_remote_cma.initiator_depth =
861 ep->rep_remote_cma.responder_resources;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400862 goto retry;
Tom Tuckerb334eaa2008-10-09 15:00:30 -0400863 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400864 rc = ep->rep_connected;
865 } else {
866 dprintk("RPC: %s: connected\n", __func__);
867 }
868
869out:
870 if (rc)
871 ep->rep_connected = rc;
872 return rc;
873}
874
875/*
876 * rpcrdma_ep_disconnect
877 *
878 * This is separate from destroy to facilitate the ability
879 * to reconnect without recreating the endpoint.
880 *
881 * This call is not reentrant, and must not be made in parallel
882 * on the same endpoint.
883 */
Chuck Lever282191c2014-07-29 17:25:55 -0400884void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400885rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
886{
887 int rc;
888
Chuck Levera7bc2112014-07-29 17:23:52 -0400889 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400890 rc = rdma_disconnect(ia->ri_id);
891 if (!rc) {
892 /* returns without wait if not connected */
893 wait_event_interruptible(ep->rep_connect_wait,
894 ep->rep_connected != 1);
895 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
896 (ep->rep_connected == 1) ? "still " : "dis");
897 } else {
898 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
899 ep->rep_connected = rc;
900 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400901}
902
Chuck Lever13924022015-01-21 11:03:52 -0500903static struct rpcrdma_req *
904rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
905{
Chuck Lever13924022015-01-21 11:03:52 -0500906 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -0500907
Chuck Lever85275c82015-01-21 11:04:16 -0500908 req = kzalloc(sizeof(*req), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -0500909 if (req == NULL)
Chuck Lever85275c82015-01-21 11:04:16 -0500910 return ERR_PTR(-ENOMEM);
Chuck Lever13924022015-01-21 11:03:52 -0500911
Chuck Lever13924022015-01-21 11:03:52 -0500912 req->rl_buffer = &r_xprt->rx_buf;
913 return req;
Chuck Lever13924022015-01-21 11:03:52 -0500914}
915
916static struct rpcrdma_rep *
917rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
918{
919 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
Chuck Lever13924022015-01-21 11:03:52 -0500920 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
921 struct rpcrdma_rep *rep;
922 int rc;
923
924 rc = -ENOMEM;
Chuck Lever6b1184c2015-01-21 11:04:25 -0500925 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -0500926 if (rep == NULL)
927 goto out;
Chuck Lever13924022015-01-21 11:03:52 -0500928
Chuck Lever6b1184c2015-01-21 11:04:25 -0500929 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
930 GFP_KERNEL);
931 if (IS_ERR(rep->rr_rdmabuf)) {
932 rc = PTR_ERR(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -0500933 goto out_free;
Chuck Lever6b1184c2015-01-21 11:04:25 -0500934 }
Chuck Lever13924022015-01-21 11:03:52 -0500935
Chuck Lever89e0d1122015-05-26 11:51:56 -0400936 rep->rr_device = ia->ri_device;
Chuck Leverfed171b2015-05-26 11:51:37 -0400937 rep->rr_rxprt = r_xprt;
Chuck Lever13924022015-01-21 11:03:52 -0500938 return rep;
939
940out_free:
941 kfree(rep);
942out:
943 return ERR_PTR(rc);
944}
945
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400946int
Chuck Leverac920d02015-01-21 11:03:44 -0500947rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400948{
Chuck Leverac920d02015-01-21 11:03:44 -0500949 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
950 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
951 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400952 char *p;
Chuck Lever13924022015-01-21 11:03:52 -0500953 size_t len;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400954 int i, rc;
955
956 buf->rb_max_requests = cdata->max_requests;
957 spin_lock_init(&buf->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400958
959 /* Need to allocate:
960 * 1. arrays for send and recv pointers
961 * 2. arrays of struct rpcrdma_req to fill in pointers
962 * 3. array of struct rpcrdma_rep for replies
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400963 * Send/recv buffers in req/rep need to be registered
964 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400965 len = buf->rb_max_requests *
966 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400967
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400968 p = kzalloc(len, GFP_KERNEL);
969 if (p == NULL) {
970 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
971 __func__, len);
972 rc = -ENOMEM;
973 goto out;
974 }
975 buf->rb_pool = p; /* for freeing it later */
976
977 buf->rb_send_bufs = (struct rpcrdma_req **) p;
978 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
979 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
980 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
981
Chuck Lever91e70e72015-03-30 14:34:58 -0400982 rc = ia->ri_ops->ro_init(r_xprt);
983 if (rc)
984 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400985
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400986 for (i = 0; i < buf->rb_max_requests; i++) {
987 struct rpcrdma_req *req;
988 struct rpcrdma_rep *rep;
989
Chuck Lever13924022015-01-21 11:03:52 -0500990 req = rpcrdma_create_req(r_xprt);
991 if (IS_ERR(req)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400992 dprintk("RPC: %s: request buffer %d alloc"
993 " failed\n", __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -0500994 rc = PTR_ERR(req);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400995 goto out;
996 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400997 buf->rb_send_bufs[i] = req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400998
Chuck Lever13924022015-01-21 11:03:52 -0500999 rep = rpcrdma_create_rep(r_xprt);
1000 if (IS_ERR(rep)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001001 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1002 __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001003 rc = PTR_ERR(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001004 goto out;
1005 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001006 buf->rb_recv_bufs[i] = rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001007 }
Chuck Lever13924022015-01-21 11:03:52 -05001008
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001009 return 0;
1010out:
1011 rpcrdma_buffer_destroy(buf);
1012 return rc;
1013}
1014
Chuck Lever2e845222014-07-29 17:25:38 -04001015static void
Chuck Lever13924022015-01-21 11:03:52 -05001016rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1017{
1018 if (!rep)
1019 return;
1020
Chuck Lever6b1184c2015-01-21 11:04:25 -05001021 rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001022 kfree(rep);
1023}
1024
1025static void
1026rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1027{
1028 if (!req)
1029 return;
1030
Chuck Lever0ca77dc2015-01-21 11:04:08 -05001031 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
Chuck Lever85275c82015-01-21 11:04:16 -05001032 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001033 kfree(req);
1034}
1035
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001036void
1037rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1038{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001039 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
Chuck Lever2e845222014-07-29 17:25:38 -04001040 int i;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001041
1042 /* clean up in reverse order from create
1043 * 1. recv mr memory (mr free, then kfree)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001044 * 2. send mr memory (mr free, then kfree)
Chuck Lever2e845222014-07-29 17:25:38 -04001045 * 3. MWs
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001046 */
1047 dprintk("RPC: %s: entering\n", __func__);
1048
1049 for (i = 0; i < buf->rb_max_requests; i++) {
Chuck Lever13924022015-01-21 11:03:52 -05001050 if (buf->rb_recv_bufs)
1051 rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1052 if (buf->rb_send_bufs)
1053 rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001054 }
1055
Chuck Lever4561f342015-03-30 14:35:17 -04001056 ia->ri_ops->ro_destroy(buf);
Allen Andrews4034ba02014-05-28 10:32:09 -04001057
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001058 kfree(buf->rb_pool);
1059}
1060
Chuck Lever346aa662015-05-26 11:52:06 -04001061struct rpcrdma_mw *
1062rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
Chuck Leverc2922c02014-07-29 17:24:36 -04001063{
Chuck Lever346aa662015-05-26 11:52:06 -04001064 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1065 struct rpcrdma_mw *mw = NULL;
Chuck Lever346aa662015-05-26 11:52:06 -04001066
Chuck Lever58d1dcf2015-05-26 11:53:13 -04001067 spin_lock(&buf->rb_mwlock);
Chuck Lever346aa662015-05-26 11:52:06 -04001068 if (!list_empty(&buf->rb_mws)) {
1069 mw = list_first_entry(&buf->rb_mws,
1070 struct rpcrdma_mw, mw_list);
1071 list_del_init(&mw->mw_list);
Chuck Leverc2922c02014-07-29 17:24:36 -04001072 }
Chuck Lever58d1dcf2015-05-26 11:53:13 -04001073 spin_unlock(&buf->rb_mwlock);
Chuck Lever346aa662015-05-26 11:52:06 -04001074
1075 if (!mw)
1076 pr_err("RPC: %s: no MWs available\n", __func__);
1077 return mw;
Chuck Leverc2922c02014-07-29 17:24:36 -04001078}
1079
Chuck Lever346aa662015-05-26 11:52:06 -04001080void
1081rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
Chuck Leverc2922c02014-07-29 17:24:36 -04001082{
Chuck Lever346aa662015-05-26 11:52:06 -04001083 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
Chuck Leverc2922c02014-07-29 17:24:36 -04001084
Chuck Lever58d1dcf2015-05-26 11:53:13 -04001085 spin_lock(&buf->rb_mwlock);
Chuck Lever346aa662015-05-26 11:52:06 -04001086 list_add_tail(&mw->mw_list, &buf->rb_mws);
Chuck Lever58d1dcf2015-05-26 11:53:13 -04001087 spin_unlock(&buf->rb_mwlock);
Chuck Leverc2922c02014-07-29 17:24:36 -04001088}
1089
1090static void
1091rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1092{
1093 buf->rb_send_bufs[--buf->rb_send_index] = req;
1094 req->rl_niovs = 0;
1095 if (req->rl_reply) {
1096 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
Chuck Leverc2922c02014-07-29 17:24:36 -04001097 req->rl_reply = NULL;
1098 }
1099}
1100
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001101/*
1102 * Get a set of request/reply buffers.
1103 *
1104 * Reply buffer (if needed) is attached to send buffer upon return.
1105 * Rule:
1106 * rb_send_index and rb_recv_index MUST always be pointing to the
1107 * *next* available buffer (non-NULL). They are incremented after
1108 * removing buffers, and decremented *before* returning them.
1109 */
1110struct rpcrdma_req *
1111rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1112{
1113 struct rpcrdma_req *req;
1114 unsigned long flags;
1115
1116 spin_lock_irqsave(&buffers->rb_lock, flags);
Chuck Leverc14d86e2015-05-26 11:52:35 -04001117
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001118 if (buffers->rb_send_index == buffers->rb_max_requests) {
1119 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1120 dprintk("RPC: %s: out of request buffers\n", __func__);
1121 return ((struct rpcrdma_req *)NULL);
1122 }
1123
1124 req = buffers->rb_send_bufs[buffers->rb_send_index];
1125 if (buffers->rb_send_index < buffers->rb_recv_index) {
1126 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1127 __func__,
1128 buffers->rb_recv_index - buffers->rb_send_index);
1129 req->rl_reply = NULL;
1130 } else {
1131 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1132 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1133 }
1134 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001135
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001136 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1137 return req;
1138}
1139
1140/*
1141 * Put request/reply buffers back into pool.
1142 * Pre-decrement counter/array index.
1143 */
1144void
1145rpcrdma_buffer_put(struct rpcrdma_req *req)
1146{
1147 struct rpcrdma_buffer *buffers = req->rl_buffer;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001148 unsigned long flags;
1149
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001150 spin_lock_irqsave(&buffers->rb_lock, flags);
Chuck Leverc2922c02014-07-29 17:24:36 -04001151 rpcrdma_buffer_put_sendbuf(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001152 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1153}
1154
1155/*
1156 * Recover reply buffers from pool.
1157 * This happens when recovering from error conditions.
1158 * Post-increment counter/array index.
1159 */
1160void
1161rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1162{
1163 struct rpcrdma_buffer *buffers = req->rl_buffer;
1164 unsigned long flags;
1165
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001166 spin_lock_irqsave(&buffers->rb_lock, flags);
1167 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1168 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1169 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1170 }
1171 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1172}
1173
1174/*
1175 * Put reply buffers back into pool when not attached to
Chuck Leverb45ccfd2014-05-28 10:32:34 -04001176 * request. This happens in error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001177 */
1178void
1179rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1180{
Chuck Leverfed171b2015-05-26 11:51:37 -04001181 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001182 unsigned long flags;
1183
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001184 spin_lock_irqsave(&buffers->rb_lock, flags);
1185 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1186 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1187}
1188
1189/*
1190 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1191 */
1192
Chuck Leverd6547882015-03-30 14:35:44 -04001193void
1194rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1195{
1196 dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
1197 seg->mr_offset,
1198 (unsigned long long)seg->mr_dma, seg->mr_dmalen);
1199}
1200
Chuck Lever9128c3e2015-01-21 11:04:00 -05001201/**
1202 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1203 * @ia: controlling rpcrdma_ia
1204 * @size: size of buffer to be allocated, in bytes
1205 * @flags: GFP flags
1206 *
1207 * Returns pointer to private header of an area of internally
1208 * registered memory, or an ERR_PTR. The registered buffer follows
1209 * the end of the private header.
1210 *
1211 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1212 * receiving the payload of RDMA RECV operations. regbufs are not
1213 * used for RDMA READ/WRITE operations, thus are registered only for
1214 * LOCAL access.
1215 */
1216struct rpcrdma_regbuf *
1217rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1218{
1219 struct rpcrdma_regbuf *rb;
Chuck Levere531dca2015-08-03 13:03:20 -04001220 struct ib_sge *iov;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001221
Chuck Lever9128c3e2015-01-21 11:04:00 -05001222 rb = kmalloc(sizeof(*rb) + size, flags);
1223 if (rb == NULL)
1224 goto out;
1225
Chuck Levere531dca2015-08-03 13:03:20 -04001226 iov = &rb->rg_iov;
1227 iov->addr = ib_dma_map_single(ia->ri_device,
1228 (void *)rb->rg_base, size,
1229 DMA_BIDIRECTIONAL);
1230 if (ib_dma_mapping_error(ia->ri_device, iov->addr))
Chuck Lever9128c3e2015-01-21 11:04:00 -05001231 goto out_free;
1232
Chuck Levere531dca2015-08-03 13:03:20 -04001233 iov->length = size;
Chuck Leverd1ed8572015-08-03 13:03:30 -04001234 iov->lkey = ia->ri_dma_lkey;
Chuck Levere531dca2015-08-03 13:03:20 -04001235 rb->rg_size = size;
1236 rb->rg_owner = NULL;
Chuck Lever9128c3e2015-01-21 11:04:00 -05001237 return rb;
1238
1239out_free:
1240 kfree(rb);
1241out:
Chuck Levere531dca2015-08-03 13:03:20 -04001242 return ERR_PTR(-ENOMEM);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001243}
1244
1245/**
1246 * rpcrdma_free_regbuf - deregister and free registered buffer
1247 * @ia: controlling rpcrdma_ia
1248 * @rb: regbuf to be deregistered and freed
1249 */
1250void
1251rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1252{
Chuck Levere531dca2015-08-03 13:03:20 -04001253 struct ib_sge *iov;
1254
1255 if (!rb)
1256 return;
1257
1258 iov = &rb->rg_iov;
1259 ib_dma_unmap_single(ia->ri_device,
1260 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1261 kfree(rb);
Chuck Lever9128c3e2015-01-21 11:04:00 -05001262}
1263
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001264/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001265 * Prepost any receive buffer, then post send.
1266 *
1267 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1268 */
1269int
1270rpcrdma_ep_post(struct rpcrdma_ia *ia,
1271 struct rpcrdma_ep *ep,
1272 struct rpcrdma_req *req)
1273{
Chuck Leverb3221d62015-08-03 13:03:39 -04001274 struct ib_device *device = ia->ri_device;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001275 struct ib_send_wr send_wr, *send_wr_fail;
1276 struct rpcrdma_rep *rep = req->rl_reply;
Chuck Leverb3221d62015-08-03 13:03:39 -04001277 struct ib_sge *iov = req->rl_send_iov;
1278 int i, rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001279
1280 if (rep) {
1281 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1282 if (rc)
1283 goto out;
1284 req->rl_reply = NULL;
1285 }
1286
1287 send_wr.next = NULL;
Chuck Levere46ac342015-03-30 14:35:35 -04001288 send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
Chuck Leverb3221d62015-08-03 13:03:39 -04001289 send_wr.sg_list = iov;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001290 send_wr.num_sge = req->rl_niovs;
1291 send_wr.opcode = IB_WR_SEND;
Chuck Leverb3221d62015-08-03 13:03:39 -04001292
1293 for (i = 0; i < send_wr.num_sge; i++)
1294 ib_dma_sync_single_for_device(device, iov[i].addr,
1295 iov[i].length, DMA_TO_DEVICE);
1296 dprintk("RPC: %s: posting %d s/g entries\n",
1297 __func__, send_wr.num_sge);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001298
1299 if (DECR_CQCOUNT(ep) > 0)
1300 send_wr.send_flags = 0;
1301 else { /* Provider must take a send completion every now and then */
1302 INIT_CQCOUNT(ep);
1303 send_wr.send_flags = IB_SEND_SIGNALED;
1304 }
1305
1306 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1307 if (rc)
1308 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
1309 rc);
1310out:
1311 return rc;
1312}
1313
1314/*
1315 * (Re)post a receive buffer.
1316 */
1317int
1318rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1319 struct rpcrdma_ep *ep,
1320 struct rpcrdma_rep *rep)
1321{
1322 struct ib_recv_wr recv_wr, *recv_wr_fail;
1323 int rc;
1324
1325 recv_wr.next = NULL;
1326 recv_wr.wr_id = (u64) (unsigned long) rep;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001327 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001328 recv_wr.num_sge = 1;
1329
Chuck Lever89e0d1122015-05-26 11:51:56 -04001330 ib_dma_sync_single_for_cpu(ia->ri_device,
Chuck Lever6b1184c2015-01-21 11:04:25 -05001331 rdmab_addr(rep->rr_rdmabuf),
1332 rdmab_length(rep->rr_rdmabuf),
1333 DMA_BIDIRECTIONAL);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001334
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001335 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1336
1337 if (rc)
1338 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
1339 rc);
1340 return rc;
1341}
Chuck Lever43e95982014-07-29 17:23:34 -04001342
Chuck Lever1c9351e2015-03-30 14:34:30 -04001343/* How many chunk list items fit within our inline buffers?
Chuck Lever43e95982014-07-29 17:23:34 -04001344 */
Chuck Lever1c9351e2015-03-30 14:34:30 -04001345unsigned int
1346rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
Chuck Lever43e95982014-07-29 17:23:34 -04001347{
1348 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
Chuck Lever1c9351e2015-03-30 14:34:30 -04001349 int bytes, segments;
Chuck Lever43e95982014-07-29 17:23:34 -04001350
Chuck Lever1c9351e2015-03-30 14:34:30 -04001351 bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
1352 bytes -= RPCRDMA_HDRLEN_MIN;
1353 if (bytes < sizeof(struct rpcrdma_segment) * 2) {
1354 pr_warn("RPC: %s: inline threshold too small\n",
1355 __func__);
1356 return 0;
Chuck Lever43e95982014-07-29 17:23:34 -04001357 }
Chuck Lever1c9351e2015-03-30 14:34:30 -04001358
1359 segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
1360 dprintk("RPC: %s: max chunk list size = %d segments\n",
1361 __func__, segments);
1362 return segments;
Chuck Lever43e95982014-07-29 17:23:34 -04001363}