blob: f58521dd88e21cb91bdeec8ad32c9cb7a245c6c7 [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040038 */
39
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040040/*
41 * verbs.c
42 *
43 * Encapsulates the major functions managing:
44 * o adapters
45 * o endpoints
46 * o connections
47 * o buffer memory
48 */
49
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000050#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090051#include <linux/slab.h>
Chuck Levereba8ff62015-01-21 11:03:02 -050052#include <linux/prefetch.h>
Chuck Lever65866f82014-05-28 10:33:59 -040053#include <asm/bitops.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040054
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040055#include "xprt_rdma.h"
56
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040057/*
58 * Globals/Macros
59 */
60
Jeff Laytonf895b252014-11-17 16:58:04 -050061#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040062# define RPCDBG_FACILITY RPCDBG_TRANS
63#endif
64
Chuck Lever9f9d8022014-07-29 17:24:45 -040065static void rpcrdma_reset_frmrs(struct rpcrdma_ia *);
Chuck Lever467c9672014-11-08 20:14:29 -050066static void rpcrdma_reset_fmrs(struct rpcrdma_ia *);
Chuck Lever9f9d8022014-07-29 17:24:45 -040067
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040068/*
69 * internal functions
70 */
71
72/*
73 * handle replies in tasklet context, using a single, global list
74 * rdma tasklet function -- just turn around and call the func
75 * for all replies on the list
76 */
77
78static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
79static LIST_HEAD(rpcrdma_tasklets_g);
80
81static void
82rpcrdma_run_tasklet(unsigned long data)
83{
84 struct rpcrdma_rep *rep;
85 void (*func)(struct rpcrdma_rep *);
86 unsigned long flags;
87
88 data = data;
89 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
90 while (!list_empty(&rpcrdma_tasklets_g)) {
91 rep = list_entry(rpcrdma_tasklets_g.next,
92 struct rpcrdma_rep, rr_list);
93 list_del(&rep->rr_list);
94 func = rep->rr_func;
95 rep->rr_func = NULL;
96 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
97
98 if (func)
99 func(rep);
100 else
101 rpcrdma_recv_buffer_put(rep);
102
103 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
104 }
105 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
106}
107
108static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
109
Chuck Lever7ff11de2014-11-08 20:15:01 -0500110static const char * const async_event[] = {
111 "CQ error",
112 "QP fatal error",
113 "QP request error",
114 "QP access error",
115 "communication established",
116 "send queue drained",
117 "path migration successful",
118 "path mig error",
119 "device fatal error",
120 "port active",
121 "port error",
122 "LID change",
123 "P_key change",
124 "SM change",
125 "SRQ error",
126 "SRQ limit reached",
127 "last WQE reached",
128 "client reregister",
129 "GID change",
130};
131
132#define ASYNC_MSG(status) \
133 ((status) < ARRAY_SIZE(async_event) ? \
134 async_event[(status)] : "unknown async error")
135
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400136static void
Chuck Leverf1a03b72014-11-08 20:14:37 -0500137rpcrdma_schedule_tasklet(struct list_head *sched_list)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
142 list_splice_tail(sched_list, &rpcrdma_tasklets_g);
143 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
144 tasklet_schedule(&rpcrdma_tasklet_g);
145}
146
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400147static void
148rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
149{
150 struct rpcrdma_ep *ep = context;
151
Chuck Lever7ff11de2014-11-08 20:15:01 -0500152 pr_err("RPC: %s: %s on device %s ep %p\n",
153 __func__, ASYNC_MSG(event->event),
154 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400155 if (ep->rep_connected == 1) {
156 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500157 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400158 wake_up_all(&ep->rep_connect_wait);
159 }
160}
161
162static void
163rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
164{
165 struct rpcrdma_ep *ep = context;
166
Chuck Lever7ff11de2014-11-08 20:15:01 -0500167 pr_err("RPC: %s: %s on device %s ep %p\n",
168 __func__, ASYNC_MSG(event->event),
169 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400170 if (ep->rep_connected == 1) {
171 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500172 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400173 wake_up_all(&ep->rep_connect_wait);
174 }
175}
176
Chuck Lever85024272015-01-21 11:02:04 -0500177static const char * const wc_status[] = {
178 "success",
179 "local length error",
180 "local QP operation error",
181 "local EE context operation error",
182 "local protection error",
183 "WR flushed",
184 "memory management operation error",
185 "bad response error",
186 "local access error",
187 "remote invalid request error",
188 "remote access error",
189 "remote operation error",
190 "transport retry counter exceeded",
191 "RNR retrycounter exceeded",
192 "local RDD violation error",
193 "remove invalid RD request",
194 "operation aborted",
195 "invalid EE context number",
196 "invalid EE context state",
197 "fatal error",
198 "response timeout error",
199 "general error",
200};
201
202#define COMPLETION_MSG(status) \
203 ((status) < ARRAY_SIZE(wc_status) ? \
204 wc_status[(status)] : "unexpected completion error")
205
Chuck Leverfc664482014-05-28 10:33:25 -0400206static void
207rpcrdma_sendcq_process_wc(struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400208{
Chuck Lever85024272015-01-21 11:02:04 -0500209 if (likely(wc->status == IB_WC_SUCCESS))
Chuck Leverfc664482014-05-28 10:33:25 -0400210 return;
Chuck Lever85024272015-01-21 11:02:04 -0500211
212 /* WARNING: Only wr_id and status are reliable at this point */
213 if (wc->wr_id == 0ULL) {
214 if (wc->status != IB_WC_WR_FLUSH_ERR)
215 pr_err("RPC: %s: SEND: %s\n",
216 __func__, COMPLETION_MSG(wc->status));
217 } else {
218 struct rpcrdma_mw *r;
219
220 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
221 r->r.frmr.fr_state = FRMR_IS_STALE;
222 pr_err("RPC: %s: frmr %p (stale): %s\n",
223 __func__, r, COMPLETION_MSG(wc->status));
224 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400225}
226
Chuck Leverfc664482014-05-28 10:33:25 -0400227static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400228rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400229{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400230 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400231 int budget, count, rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400232
Chuck Lever8301a2c2014-05-28 10:33:51 -0400233 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400234 do {
235 wcs = ep->rep_send_wcs;
236
237 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
238 if (rc <= 0)
239 return rc;
240
241 count = rc;
242 while (count-- > 0)
243 rpcrdma_sendcq_process_wc(wcs++);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400244 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Lever1c00dd02014-05-28 10:33:42 -0400245 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400246}
247
248/*
Chuck Leverfc664482014-05-28 10:33:25 -0400249 * Handle send, fast_reg_mr, and local_inv completions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400250 *
Chuck Leverfc664482014-05-28 10:33:25 -0400251 * Send events are typically suppressed and thus do not result
252 * in an upcall. Occasionally one is signaled, however. This
253 * prevents the provider's completion queue from wrapping and
254 * losing a completion.
255 */
256static void
257rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
258{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400259 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
Chuck Leverfc664482014-05-28 10:33:25 -0400260 int rc;
261
Chuck Lever1c00dd02014-05-28 10:33:42 -0400262 rc = rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400263 if (rc) {
264 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
265 __func__, rc);
266 return;
267 }
268
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400269 rc = ib_req_notify_cq(cq,
270 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
271 if (rc == 0)
272 return;
273 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400274 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
275 __func__, rc);
276 return;
277 }
278
Chuck Lever1c00dd02014-05-28 10:33:42 -0400279 rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400280}
281
282static void
Chuck Leverbb961932014-07-29 17:25:46 -0400283rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
Chuck Leverfc664482014-05-28 10:33:25 -0400284{
285 struct rpcrdma_rep *rep =
286 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
287
Chuck Lever85024272015-01-21 11:02:04 -0500288 /* WARNING: Only wr_id and status are reliable at this point */
289 if (wc->status != IB_WC_SUCCESS)
290 goto out_fail;
Chuck Leverfc664482014-05-28 10:33:25 -0400291
Chuck Lever85024272015-01-21 11:02:04 -0500292 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Leverfc664482014-05-28 10:33:25 -0400293 if (wc->opcode != IB_WC_RECV)
294 return;
295
Chuck Lever85024272015-01-21 11:02:04 -0500296 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
297 __func__, rep, wc->byte_len);
298
Chuck Leverfc664482014-05-28 10:33:25 -0400299 rep->rr_len = wc->byte_len;
300 ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
Chuck Lever6b1184c2015-01-21 11:04:25 -0500301 rdmab_addr(rep->rr_rdmabuf),
302 rep->rr_len, DMA_FROM_DEVICE);
303 prefetch(rdmab_to_msg(rep->rr_rdmabuf));
Chuck Leverfc664482014-05-28 10:33:25 -0400304
305out_schedule:
Chuck Leverbb961932014-07-29 17:25:46 -0400306 list_add_tail(&rep->rr_list, sched_list);
Chuck Lever85024272015-01-21 11:02:04 -0500307 return;
308out_fail:
309 if (wc->status != IB_WC_WR_FLUSH_ERR)
310 pr_err("RPC: %s: rep %p: %s\n",
311 __func__, rep, COMPLETION_MSG(wc->status));
312 rep->rr_len = ~0U;
313 goto out_schedule;
Chuck Leverfc664482014-05-28 10:33:25 -0400314}
315
316static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400317rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
Chuck Leverfc664482014-05-28 10:33:25 -0400318{
Chuck Leverbb961932014-07-29 17:25:46 -0400319 struct list_head sched_list;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400320 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400321 int budget, count, rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400322
Chuck Leverbb961932014-07-29 17:25:46 -0400323 INIT_LIST_HEAD(&sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400324 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400325 do {
326 wcs = ep->rep_recv_wcs;
327
328 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
329 if (rc <= 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400330 goto out_schedule;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400331
332 count = rc;
333 while (count-- > 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400334 rpcrdma_recvcq_process_wc(wcs++, &sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400335 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Leverbb961932014-07-29 17:25:46 -0400336 rc = 0;
337
338out_schedule:
Chuck Leverf1a03b72014-11-08 20:14:37 -0500339 rpcrdma_schedule_tasklet(&sched_list);
Chuck Leverbb961932014-07-29 17:25:46 -0400340 return rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400341}
342
343/*
344 * Handle receive completions.
345 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400346 * It is reentrant but processes single events in order to maintain
347 * ordering of receives to keep server credits.
348 *
349 * It is the responsibility of the scheduled tasklet to return
350 * recv buffers to the pool. NOTE: this affects synchronization of
351 * connection shutdown. That is, the structures required for
352 * the completion of the reply handler must remain intact until
353 * all memory has been reclaimed.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400354 */
355static void
Chuck Leverfc664482014-05-28 10:33:25 -0400356rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400357{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400358 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400359 int rc;
360
Chuck Lever1c00dd02014-05-28 10:33:42 -0400361 rc = rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400362 if (rc) {
Chuck Leverfc664482014-05-28 10:33:25 -0400363 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400364 __func__, rc);
365 return;
366 }
367
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400368 rc = ib_req_notify_cq(cq,
369 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
370 if (rc == 0)
371 return;
372 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400373 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
374 __func__, rc);
375 return;
376 }
377
Chuck Lever1c00dd02014-05-28 10:33:42 -0400378 rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400379}
380
Chuck Levera7bc2112014-07-29 17:23:52 -0400381static void
382rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
383{
Chuck Lever5c166be2014-11-08 20:14:45 -0500384 struct ib_wc wc;
385 LIST_HEAD(sched_list);
386
387 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
388 rpcrdma_recvcq_process_wc(&wc, &sched_list);
389 if (!list_empty(&sched_list))
390 rpcrdma_schedule_tasklet(&sched_list);
391 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
392 rpcrdma_sendcq_process_wc(&wc);
Chuck Levera7bc2112014-07-29 17:23:52 -0400393}
394
Jeff Laytonf895b252014-11-17 16:58:04 -0500395#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400396static const char * const conn[] = {
397 "address resolved",
398 "address error",
399 "route resolved",
400 "route error",
401 "connect request",
402 "connect response",
403 "connect error",
404 "unreachable",
405 "rejected",
406 "established",
407 "disconnected",
Chuck Lever8079fb72014-07-29 17:26:12 -0400408 "device removal",
409 "multicast join",
410 "multicast error",
411 "address change",
412 "timewait exit",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400413};
Chuck Lever8079fb72014-07-29 17:26:12 -0400414
415#define CONNECTION_MSG(status) \
416 ((status) < ARRAY_SIZE(conn) ? \
417 conn[(status)] : "unrecognized connection error")
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400418#endif
419
420static int
421rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
422{
423 struct rpcrdma_xprt *xprt = id->context;
424 struct rpcrdma_ia *ia = &xprt->rx_ia;
425 struct rpcrdma_ep *ep = &xprt->rx_ep;
Jeff Laytonf895b252014-11-17 16:58:04 -0500426#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400427 struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
Ingo Molnarff0db042008-11-25 16:58:42 -0800428#endif
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500429 struct ib_qp_attr *attr = &ia->ri_qp_attr;
430 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400431 int connstate = 0;
432
433 switch (event->event) {
434 case RDMA_CM_EVENT_ADDR_RESOLVED:
435 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Tom Talpey5675add2008-10-09 15:01:41 -0400436 ia->ri_async_rc = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400437 complete(&ia->ri_done);
438 break;
439 case RDMA_CM_EVENT_ADDR_ERROR:
440 ia->ri_async_rc = -EHOSTUNREACH;
441 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
442 __func__, ep);
443 complete(&ia->ri_done);
444 break;
445 case RDMA_CM_EVENT_ROUTE_ERROR:
446 ia->ri_async_rc = -ENETUNREACH;
447 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
448 __func__, ep);
449 complete(&ia->ri_done);
450 break;
451 case RDMA_CM_EVENT_ESTABLISHED:
452 connstate = 1;
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500453 ib_query_qp(ia->ri_id->qp, attr,
454 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
455 iattr);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400456 dprintk("RPC: %s: %d responder resources"
457 " (%d initiator)\n",
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500458 __func__, attr->max_dest_rd_atomic,
459 attr->max_rd_atomic);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400460 goto connected;
461 case RDMA_CM_EVENT_CONNECT_ERROR:
462 connstate = -ENOTCONN;
463 goto connected;
464 case RDMA_CM_EVENT_UNREACHABLE:
465 connstate = -ENETDOWN;
466 goto connected;
467 case RDMA_CM_EVENT_REJECTED:
468 connstate = -ECONNREFUSED;
469 goto connected;
470 case RDMA_CM_EVENT_DISCONNECTED:
471 connstate = -ECONNABORTED;
472 goto connected;
473 case RDMA_CM_EVENT_DEVICE_REMOVAL:
474 connstate = -ENODEV;
475connected:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400476 dprintk("RPC: %s: %sconnected\n",
477 __func__, connstate > 0 ? "" : "dis");
478 ep->rep_connected = connstate;
Chuck Leverafadc462015-01-21 11:03:11 -0500479 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400480 wake_up_all(&ep->rep_connect_wait);
Chuck Lever8079fb72014-07-29 17:26:12 -0400481 /*FALLTHROUGH*/
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400482 default:
Chuck Lever8079fb72014-07-29 17:26:12 -0400483 dprintk("RPC: %s: %pI4:%u (ep 0x%p): %s\n",
484 __func__, &addr->sin_addr.s_addr,
485 ntohs(addr->sin_port), ep,
486 CONNECTION_MSG(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400487 break;
488 }
489
Jeff Laytonf895b252014-11-17 16:58:04 -0500490#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400491 if (connstate == 1) {
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500492 int ird = attr->max_dest_rd_atomic;
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400493 int tird = ep->rep_remote_cma.responder_resources;
Harvey Harrison21454aa2008-10-31 00:54:56 -0700494 printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400495 "on %s, memreg %d slots %d ird %d%s\n",
Harvey Harrison21454aa2008-10-31 00:54:56 -0700496 &addr->sin_addr.s_addr,
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400497 ntohs(addr->sin_port),
498 ia->ri_id->device->name,
499 ia->ri_memreg_strategy,
500 xprt->rx_buf.rb_max_requests,
501 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
502 } else if (connstate < 0) {
Harvey Harrison21454aa2008-10-31 00:54:56 -0700503 printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n",
504 &addr->sin_addr.s_addr,
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400505 ntohs(addr->sin_port),
506 connstate);
507 }
508#endif
509
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400510 return 0;
511}
512
513static struct rdma_cm_id *
514rpcrdma_create_id(struct rpcrdma_xprt *xprt,
515 struct rpcrdma_ia *ia, struct sockaddr *addr)
516{
517 struct rdma_cm_id *id;
518 int rc;
519
Tom Talpey1a954052008-10-09 15:01:31 -0400520 init_completion(&ia->ri_done);
521
Sean Heftyb26f9b92010-04-01 17:08:41 +0000522 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400523 if (IS_ERR(id)) {
524 rc = PTR_ERR(id);
525 dprintk("RPC: %s: rdma_create_id() failed %i\n",
526 __func__, rc);
527 return id;
528 }
529
Tom Talpey5675add2008-10-09 15:01:41 -0400530 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400531 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
532 if (rc) {
533 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
534 __func__, rc);
535 goto out;
536 }
Tom Talpey5675add2008-10-09 15:01:41 -0400537 wait_for_completion_interruptible_timeout(&ia->ri_done,
538 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400539 rc = ia->ri_async_rc;
540 if (rc)
541 goto out;
542
Tom Talpey5675add2008-10-09 15:01:41 -0400543 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400544 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
545 if (rc) {
546 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
547 __func__, rc);
548 goto out;
549 }
Tom Talpey5675add2008-10-09 15:01:41 -0400550 wait_for_completion_interruptible_timeout(&ia->ri_done,
551 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400552 rc = ia->ri_async_rc;
553 if (rc)
554 goto out;
555
556 return id;
557
558out:
559 rdma_destroy_id(id);
560 return ERR_PTR(rc);
561}
562
563/*
564 * Drain any cq, prior to teardown.
565 */
566static void
567rpcrdma_clean_cq(struct ib_cq *cq)
568{
569 struct ib_wc wc;
570 int count = 0;
571
572 while (1 == ib_poll_cq(cq, 1, &wc))
573 ++count;
574
575 if (count)
576 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
577 __func__, count, wc.opcode);
578}
579
580/*
581 * Exported functions.
582 */
583
584/*
585 * Open and initialize an Interface Adapter.
586 * o initializes fields of struct rpcrdma_ia, including
587 * interface and provider attributes and protection zone.
588 */
589int
590rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
591{
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400592 int rc, mem_priv;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400593 struct rpcrdma_ia *ia = &xprt->rx_ia;
Chuck Lever7bc79722015-01-21 11:03:27 -0500594 struct ib_device_attr *devattr = &ia->ri_devattr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400595
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400596 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
597 if (IS_ERR(ia->ri_id)) {
598 rc = PTR_ERR(ia->ri_id);
599 goto out1;
600 }
601
602 ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
603 if (IS_ERR(ia->ri_pd)) {
604 rc = PTR_ERR(ia->ri_pd);
605 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
606 __func__, rc);
607 goto out2;
608 }
609
Chuck Lever7bc79722015-01-21 11:03:27 -0500610 rc = ib_query_device(ia->ri_id->device, devattr);
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400611 if (rc) {
612 dprintk("RPC: %s: ib_query_device failed %d\n",
613 __func__, rc);
Chuck Lever5ae711a2015-01-21 11:03:19 -0500614 goto out3;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400615 }
616
Chuck Lever7bc79722015-01-21 11:03:27 -0500617 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400618 ia->ri_have_dma_lkey = 1;
619 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
620 }
621
Chuck Leverf10eafd2014-05-28 10:32:51 -0400622 if (memreg == RPCRDMA_FRMR) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400623 /* Requires both frmr reg and local dma lkey */
Chuck Lever7bc79722015-01-21 11:03:27 -0500624 if ((devattr->device_cap_flags &
Tom Talpey3197d3092008-10-09 15:00:20 -0400625 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
626 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400627 dprintk("RPC: %s: FRMR registration "
Chuck Leverf10eafd2014-05-28 10:32:51 -0400628 "not supported by HCA\n", __func__);
629 memreg = RPCRDMA_MTHCAFMR;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400630 } else {
631 /* Mind the ia limit on FRMR page list depth */
632 ia->ri_max_frmr_depth = min_t(unsigned int,
633 RPCRDMA_MAX_DATA_SEGS,
Chuck Lever7bc79722015-01-21 11:03:27 -0500634 devattr->max_fast_reg_page_list_len);
Tom Talpey3197d3092008-10-09 15:00:20 -0400635 }
Chuck Leverf10eafd2014-05-28 10:32:51 -0400636 }
637 if (memreg == RPCRDMA_MTHCAFMR) {
638 if (!ia->ri_id->device->alloc_fmr) {
639 dprintk("RPC: %s: MTHCAFMR registration "
640 "not supported by HCA\n", __func__);
Chuck Leverf10eafd2014-05-28 10:32:51 -0400641 memreg = RPCRDMA_ALLPHYSICAL;
Chuck Leverf10eafd2014-05-28 10:32:51 -0400642 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400643 }
644
645 /*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400646 * Optionally obtain an underlying physical identity mapping in
647 * order to do a memory window-based bind. This base registration
648 * is protected from remote access - that is enabled only by binding
649 * for the specific bytes targeted during each RPC operation, and
650 * revoked after the corresponding completion similar to a storage
651 * adapter.
652 */
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400653 switch (memreg) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400654 case RPCRDMA_FRMR:
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400655 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400656 case RPCRDMA_ALLPHYSICAL:
657 mem_priv = IB_ACCESS_LOCAL_WRITE |
658 IB_ACCESS_REMOTE_WRITE |
659 IB_ACCESS_REMOTE_READ;
660 goto register_setup;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400661 case RPCRDMA_MTHCAFMR:
662 if (ia->ri_have_dma_lkey)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400663 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400664 mem_priv = IB_ACCESS_LOCAL_WRITE;
665 register_setup:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400666 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
667 if (IS_ERR(ia->ri_bind_mem)) {
668 printk(KERN_ALERT "%s: ib_get_dma_mr for "
Chuck Lever0ac531c2014-05-28 10:32:43 -0400669 "phys register failed with %lX\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400670 __func__, PTR_ERR(ia->ri_bind_mem));
Chuck Lever0ac531c2014-05-28 10:32:43 -0400671 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500672 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400673 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400674 break;
675 default:
Chuck Levercdd9ade2014-05-28 10:33:00 -0400676 printk(KERN_ERR "RPC: Unsupported memory "
677 "registration mode: %d\n", memreg);
678 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500679 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400680 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400681 dprintk("RPC: %s: memory registration strategy is %d\n",
682 __func__, memreg);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400683
684 /* Else will do memory reg/dereg for each chunk */
685 ia->ri_memreg_strategy = memreg;
686
Chuck Lever73806c82014-07-29 17:23:25 -0400687 rwlock_init(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400688 return 0;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500689
690out3:
691 ib_dealloc_pd(ia->ri_pd);
692 ia->ri_pd = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400693out2:
694 rdma_destroy_id(ia->ri_id);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400695 ia->ri_id = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400696out1:
697 return rc;
698}
699
700/*
701 * Clean up/close an IA.
702 * o if event handles and PD have been initialized, free them.
703 * o close the IA
704 */
705void
706rpcrdma_ia_close(struct rpcrdma_ia *ia)
707{
708 int rc;
709
710 dprintk("RPC: %s: entering\n", __func__);
711 if (ia->ri_bind_mem != NULL) {
712 rc = ib_dereg_mr(ia->ri_bind_mem);
713 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
714 __func__, rc);
715 }
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400716 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
717 if (ia->ri_id->qp)
718 rdma_destroy_qp(ia->ri_id);
719 rdma_destroy_id(ia->ri_id);
720 ia->ri_id = NULL;
721 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400722 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
723 rc = ib_dealloc_pd(ia->ri_pd);
724 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
725 __func__, rc);
726 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400727}
728
729/*
730 * Create unconnected endpoint.
731 */
732int
733rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
734 struct rpcrdma_create_data_internal *cdata)
735{
Chuck Lever7bc79722015-01-21 11:03:27 -0500736 struct ib_device_attr *devattr = &ia->ri_devattr;
Chuck Leverfc664482014-05-28 10:33:25 -0400737 struct ib_cq *sendcq, *recvcq;
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400738 int rc, err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400739
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400740 /* check provider's send/recv wr limits */
Chuck Lever7bc79722015-01-21 11:03:27 -0500741 if (cdata->max_requests > devattr->max_qp_wr)
742 cdata->max_requests = devattr->max_qp_wr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400743
744 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
745 ep->rep_attr.qp_context = ep;
746 /* send_cq and recv_cq initialized below */
747 ep->rep_attr.srq = NULL;
748 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
749 switch (ia->ri_memreg_strategy) {
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400750 case RPCRDMA_FRMR: {
751 int depth = 7;
752
Tom Tucker15cdc6442010-08-11 12:47:24 -0400753 /* Add room for frmr register and invalidate WRs.
754 * 1. FRMR reg WR for head
755 * 2. FRMR invalidate WR for head
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400756 * 3. N FRMR reg WRs for pagelist
757 * 4. N FRMR invalidate WRs for pagelist
Tom Tucker15cdc6442010-08-11 12:47:24 -0400758 * 5. FRMR reg WR for tail
759 * 6. FRMR invalidate WR for tail
760 * 7. The RDMA_SEND WR
761 */
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400762
763 /* Calculate N if the device max FRMR depth is smaller than
764 * RPCRDMA_MAX_DATA_SEGS.
765 */
766 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
767 int delta = RPCRDMA_MAX_DATA_SEGS -
768 ia->ri_max_frmr_depth;
769
770 do {
771 depth += 2; /* FRMR reg + invalidate */
772 delta -= ia->ri_max_frmr_depth;
773 } while (delta > 0);
774
775 }
776 ep->rep_attr.cap.max_send_wr *= depth;
Chuck Lever7bc79722015-01-21 11:03:27 -0500777 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
778 cdata->max_requests = devattr->max_qp_wr / depth;
Tom Tucker15cdc6442010-08-11 12:47:24 -0400779 if (!cdata->max_requests)
780 return -EINVAL;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400781 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
782 depth;
Tom Tucker15cdc6442010-08-11 12:47:24 -0400783 }
Tom Talpey3197d3092008-10-09 15:00:20 -0400784 break;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400785 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400786 default:
787 break;
788 }
789 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
790 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
791 ep->rep_attr.cap.max_recv_sge = 1;
792 ep->rep_attr.cap.max_inline_data = 0;
793 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
794 ep->rep_attr.qp_type = IB_QPT_RC;
795 ep->rep_attr.port_num = ~0;
796
797 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
798 "iovs: send %d recv %d\n",
799 __func__,
800 ep->rep_attr.cap.max_send_wr,
801 ep->rep_attr.cap.max_recv_wr,
802 ep->rep_attr.cap.max_send_sge,
803 ep->rep_attr.cap.max_recv_sge);
804
805 /* set trigger for requesting send completion */
Chuck Leverfc664482014-05-28 10:33:25 -0400806 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
Chuck Levere7104a22014-11-08 20:14:20 -0500807 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
808 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
809 else if (ep->rep_cqinit <= 2)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400810 ep->rep_cqinit = 0;
811 INIT_CQCOUNT(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400812 init_waitqueue_head(&ep->rep_connect_wait);
Chuck Lever254f91e2014-05-28 10:32:17 -0400813 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400814
Chuck Leverfc664482014-05-28 10:33:25 -0400815 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
Chuck Lever1c00dd02014-05-28 10:33:42 -0400816 rpcrdma_cq_async_error_upcall, ep,
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400817 ep->rep_attr.cap.max_send_wr + 1, 0);
Chuck Leverfc664482014-05-28 10:33:25 -0400818 if (IS_ERR(sendcq)) {
819 rc = PTR_ERR(sendcq);
820 dprintk("RPC: %s: failed to create send CQ: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400821 __func__, rc);
822 goto out1;
823 }
824
Chuck Leverfc664482014-05-28 10:33:25 -0400825 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400826 if (rc) {
827 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
828 __func__, rc);
829 goto out2;
830 }
831
Chuck Leverfc664482014-05-28 10:33:25 -0400832 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
Chuck Lever1c00dd02014-05-28 10:33:42 -0400833 rpcrdma_cq_async_error_upcall, ep,
Chuck Leverfc664482014-05-28 10:33:25 -0400834 ep->rep_attr.cap.max_recv_wr + 1, 0);
835 if (IS_ERR(recvcq)) {
836 rc = PTR_ERR(recvcq);
837 dprintk("RPC: %s: failed to create recv CQ: %i\n",
838 __func__, rc);
839 goto out2;
840 }
841
842 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
843 if (rc) {
844 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
845 __func__, rc);
846 ib_destroy_cq(recvcq);
847 goto out2;
848 }
849
850 ep->rep_attr.send_cq = sendcq;
851 ep->rep_attr.recv_cq = recvcq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400852
853 /* Initialize cma parameters */
854
855 /* RPC/RDMA does not use private data */
856 ep->rep_remote_cma.private_data = NULL;
857 ep->rep_remote_cma.private_data_len = 0;
858
859 /* Client offers RDMA Read but does not initiate */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400860 ep->rep_remote_cma.initiator_depth = 0;
Chuck Lever7bc79722015-01-21 11:03:27 -0500861 if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
Tom Tuckerb334eaa2008-10-09 15:00:30 -0400862 ep->rep_remote_cma.responder_resources = 32;
863 else
Chuck Lever7bc79722015-01-21 11:03:27 -0500864 ep->rep_remote_cma.responder_resources =
865 devattr->max_qp_rd_atom;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400866
867 ep->rep_remote_cma.retry_count = 7;
868 ep->rep_remote_cma.flow_control = 0;
869 ep->rep_remote_cma.rnr_retry_count = 0;
870
871 return 0;
872
873out2:
Chuck Leverfc664482014-05-28 10:33:25 -0400874 err = ib_destroy_cq(sendcq);
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400875 if (err)
876 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
877 __func__, err);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400878out1:
879 return rc;
880}
881
882/*
883 * rpcrdma_ep_destroy
884 *
885 * Disconnect and destroy endpoint. After this, the only
886 * valid operations on the ep are to free it (if dynamically
887 * allocated) or re-create it.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400888 */
Chuck Lever7f1d5412014-05-28 10:33:16 -0400889void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400890rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
891{
892 int rc;
893
894 dprintk("RPC: %s: entering, connected is %d\n",
895 __func__, ep->rep_connected);
896
Chuck Lever254f91e2014-05-28 10:32:17 -0400897 cancel_delayed_work_sync(&ep->rep_connect_worker);
898
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400899 if (ia->ri_id->qp) {
Chuck Lever282191c2014-07-29 17:25:55 -0400900 rpcrdma_ep_disconnect(ep, ia);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400901 rdma_destroy_qp(ia->ri_id);
902 ia->ri_id->qp = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400903 }
904
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400905 /* padding - could be done in rpcrdma_buffer_destroy... */
906 if (ep->rep_pad_mr) {
907 rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
908 ep->rep_pad_mr = NULL;
909 }
910
Chuck Leverfc664482014-05-28 10:33:25 -0400911 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
912 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
913 if (rc)
914 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
915 __func__, rc);
916
917 rpcrdma_clean_cq(ep->rep_attr.send_cq);
918 rc = ib_destroy_cq(ep->rep_attr.send_cq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400919 if (rc)
920 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
921 __func__, rc);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400922}
923
924/*
925 * Connect unconnected endpoint.
926 */
927int
928rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
929{
Chuck Lever73806c82014-07-29 17:23:25 -0400930 struct rdma_cm_id *id, *old;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400931 int rc = 0;
932 int retry_count = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400933
Tom Talpeyc0555512008-10-10 11:32:45 -0400934 if (ep->rep_connected != 0) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400935 struct rpcrdma_xprt *xprt;
936retry:
Chuck Leverec62f402014-05-28 10:34:07 -0400937 dprintk("RPC: %s: reconnecting...\n", __func__);
Chuck Lever282191c2014-07-29 17:25:55 -0400938
939 rpcrdma_ep_disconnect(ep, ia);
Chuck Levera7bc2112014-07-29 17:23:52 -0400940 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400941
Chuck Lever467c9672014-11-08 20:14:29 -0500942 switch (ia->ri_memreg_strategy) {
943 case RPCRDMA_FRMR:
Chuck Lever9f9d8022014-07-29 17:24:45 -0400944 rpcrdma_reset_frmrs(ia);
Chuck Lever467c9672014-11-08 20:14:29 -0500945 break;
946 case RPCRDMA_MTHCAFMR:
947 rpcrdma_reset_fmrs(ia);
948 break;
949 case RPCRDMA_ALLPHYSICAL:
950 break;
951 default:
952 rc = -EIO;
953 goto out;
954 }
Chuck Lever9f9d8022014-07-29 17:24:45 -0400955
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400956 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
957 id = rpcrdma_create_id(xprt, ia,
958 (struct sockaddr *)&xprt->rx_data.addr);
959 if (IS_ERR(id)) {
Chuck Leverec62f402014-05-28 10:34:07 -0400960 rc = -EHOSTUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400961 goto out;
962 }
963 /* TEMP TEMP TEMP - fail if new device:
964 * Deregister/remarshal *all* requests!
965 * Close and recreate adapter, pd, etc!
966 * Re-determine all attributes still sane!
967 * More stuff I haven't thought of!
968 * Rrrgh!
969 */
970 if (ia->ri_id->device != id->device) {
971 printk("RPC: %s: can't reconnect on "
972 "different device!\n", __func__);
973 rdma_destroy_id(id);
Chuck Leverec62f402014-05-28 10:34:07 -0400974 rc = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400975 goto out;
976 }
977 /* END TEMP */
Chuck Leverec62f402014-05-28 10:34:07 -0400978 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
979 if (rc) {
980 dprintk("RPC: %s: rdma_create_qp failed %i\n",
981 __func__, rc);
982 rdma_destroy_id(id);
983 rc = -ENETUNREACH;
984 goto out;
985 }
Chuck Lever73806c82014-07-29 17:23:25 -0400986
987 write_lock(&ia->ri_qplock);
988 old = ia->ri_id;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400989 ia->ri_id = id;
Chuck Lever73806c82014-07-29 17:23:25 -0400990 write_unlock(&ia->ri_qplock);
991
992 rdma_destroy_qp(old);
993 rdma_destroy_id(old);
Chuck Leverec62f402014-05-28 10:34:07 -0400994 } else {
995 dprintk("RPC: %s: connecting...\n", __func__);
996 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
997 if (rc) {
998 dprintk("RPC: %s: rdma_create_qp failed %i\n",
999 __func__, rc);
1000 /* do not update ep->rep_connected */
1001 return -ENETUNREACH;
1002 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001003 }
1004
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001005 ep->rep_connected = 0;
1006
1007 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
1008 if (rc) {
1009 dprintk("RPC: %s: rdma_connect() failed with %i\n",
1010 __func__, rc);
1011 goto out;
1012 }
1013
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001014 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
1015
1016 /*
1017 * Check state. A non-peer reject indicates no listener
1018 * (ECONNREFUSED), which may be a transient state. All
1019 * others indicate a transport condition which has already
1020 * undergone a best-effort.
1021 */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001022 if (ep->rep_connected == -ECONNREFUSED &&
1023 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001024 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
1025 goto retry;
1026 }
1027 if (ep->rep_connected <= 0) {
1028 /* Sometimes, the only way to reliably connect to remote
1029 * CMs is to use same nonzero values for ORD and IRD. */
Tom Tuckerb334eaa2008-10-09 15:00:30 -04001030 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
1031 (ep->rep_remote_cma.responder_resources == 0 ||
1032 ep->rep_remote_cma.initiator_depth !=
1033 ep->rep_remote_cma.responder_resources)) {
1034 if (ep->rep_remote_cma.responder_resources == 0)
1035 ep->rep_remote_cma.responder_resources = 1;
1036 ep->rep_remote_cma.initiator_depth =
1037 ep->rep_remote_cma.responder_resources;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001038 goto retry;
Tom Tuckerb334eaa2008-10-09 15:00:30 -04001039 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001040 rc = ep->rep_connected;
1041 } else {
1042 dprintk("RPC: %s: connected\n", __func__);
1043 }
1044
1045out:
1046 if (rc)
1047 ep->rep_connected = rc;
1048 return rc;
1049}
1050
1051/*
1052 * rpcrdma_ep_disconnect
1053 *
1054 * This is separate from destroy to facilitate the ability
1055 * to reconnect without recreating the endpoint.
1056 *
1057 * This call is not reentrant, and must not be made in parallel
1058 * on the same endpoint.
1059 */
Chuck Lever282191c2014-07-29 17:25:55 -04001060void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001061rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
1062{
1063 int rc;
1064
Chuck Levera7bc2112014-07-29 17:23:52 -04001065 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001066 rc = rdma_disconnect(ia->ri_id);
1067 if (!rc) {
1068 /* returns without wait if not connected */
1069 wait_event_interruptible(ep->rep_connect_wait,
1070 ep->rep_connected != 1);
1071 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
1072 (ep->rep_connected == 1) ? "still " : "dis");
1073 } else {
1074 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
1075 ep->rep_connected = rc;
1076 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001077}
1078
Chuck Lever13924022015-01-21 11:03:52 -05001079static struct rpcrdma_req *
1080rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1081{
Chuck Lever13924022015-01-21 11:03:52 -05001082 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -05001083
Chuck Lever85275c82015-01-21 11:04:16 -05001084 req = kzalloc(sizeof(*req), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001085 if (req == NULL)
Chuck Lever85275c82015-01-21 11:04:16 -05001086 return ERR_PTR(-ENOMEM);
Chuck Lever13924022015-01-21 11:03:52 -05001087
Chuck Lever13924022015-01-21 11:03:52 -05001088 req->rl_buffer = &r_xprt->rx_buf;
1089 return req;
Chuck Lever13924022015-01-21 11:03:52 -05001090}
1091
1092static struct rpcrdma_rep *
1093rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
1094{
1095 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
Chuck Lever13924022015-01-21 11:03:52 -05001096 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1097 struct rpcrdma_rep *rep;
1098 int rc;
1099
1100 rc = -ENOMEM;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001101 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001102 if (rep == NULL)
1103 goto out;
Chuck Lever13924022015-01-21 11:03:52 -05001104
Chuck Lever6b1184c2015-01-21 11:04:25 -05001105 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
1106 GFP_KERNEL);
1107 if (IS_ERR(rep->rr_rdmabuf)) {
1108 rc = PTR_ERR(rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001109 goto out_free;
Chuck Lever6b1184c2015-01-21 11:04:25 -05001110 }
Chuck Lever13924022015-01-21 11:03:52 -05001111
1112 rep->rr_buffer = &r_xprt->rx_buf;
1113 return rep;
1114
1115out_free:
1116 kfree(rep);
1117out:
1118 return ERR_PTR(rc);
1119}
1120
Chuck Lever2e845222014-07-29 17:25:38 -04001121static int
1122rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
1123{
1124 int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
1125 struct ib_fmr_attr fmr_attr = {
1126 .max_pages = RPCRDMA_MAX_DATA_SEGS,
1127 .max_maps = 1,
1128 .page_shift = PAGE_SHIFT
1129 };
1130 struct rpcrdma_mw *r;
1131 int i, rc;
1132
1133 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
1134 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
1135
1136 while (i--) {
1137 r = kzalloc(sizeof(*r), GFP_KERNEL);
1138 if (r == NULL)
1139 return -ENOMEM;
1140
1141 r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr);
1142 if (IS_ERR(r->r.fmr)) {
1143 rc = PTR_ERR(r->r.fmr);
1144 dprintk("RPC: %s: ib_alloc_fmr failed %i\n",
1145 __func__, rc);
1146 goto out_free;
1147 }
1148
1149 list_add(&r->mw_list, &buf->rb_mws);
1150 list_add(&r->mw_all, &buf->rb_all);
1151 }
1152 return 0;
1153
1154out_free:
1155 kfree(r);
1156 return rc;
1157}
1158
1159static int
1160rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
1161{
1162 struct rpcrdma_frmr *f;
1163 struct rpcrdma_mw *r;
1164 int i, rc;
1165
1166 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
1167 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
1168
1169 while (i--) {
1170 r = kzalloc(sizeof(*r), GFP_KERNEL);
1171 if (r == NULL)
1172 return -ENOMEM;
1173 f = &r->r.frmr;
1174
1175 f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1176 ia->ri_max_frmr_depth);
1177 if (IS_ERR(f->fr_mr)) {
1178 rc = PTR_ERR(f->fr_mr);
1179 dprintk("RPC: %s: ib_alloc_fast_reg_mr "
1180 "failed %i\n", __func__, rc);
1181 goto out_free;
1182 }
1183
1184 f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device,
1185 ia->ri_max_frmr_depth);
1186 if (IS_ERR(f->fr_pgl)) {
1187 rc = PTR_ERR(f->fr_pgl);
1188 dprintk("RPC: %s: ib_alloc_fast_reg_page_list "
1189 "failed %i\n", __func__, rc);
1190
1191 ib_dereg_mr(f->fr_mr);
1192 goto out_free;
1193 }
1194
1195 list_add(&r->mw_list, &buf->rb_mws);
1196 list_add(&r->mw_all, &buf->rb_all);
1197 }
1198
1199 return 0;
1200
1201out_free:
1202 kfree(r);
1203 return rc;
1204}
1205
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001206int
Chuck Leverac920d02015-01-21 11:03:44 -05001207rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001208{
Chuck Leverac920d02015-01-21 11:03:44 -05001209 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1210 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1211 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001212 char *p;
Chuck Lever13924022015-01-21 11:03:52 -05001213 size_t len;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001214 int i, rc;
1215
1216 buf->rb_max_requests = cdata->max_requests;
1217 spin_lock_init(&buf->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001218
1219 /* Need to allocate:
1220 * 1. arrays for send and recv pointers
1221 * 2. arrays of struct rpcrdma_req to fill in pointers
1222 * 3. array of struct rpcrdma_rep for replies
1223 * 4. padding, if any
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001224 * Send/recv buffers in req/rep need to be registered
1225 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001226 len = buf->rb_max_requests *
1227 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
1228 len += cdata->padding;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001229
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001230 p = kzalloc(len, GFP_KERNEL);
1231 if (p == NULL) {
1232 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1233 __func__, len);
1234 rc = -ENOMEM;
1235 goto out;
1236 }
1237 buf->rb_pool = p; /* for freeing it later */
1238
1239 buf->rb_send_bufs = (struct rpcrdma_req **) p;
1240 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1241 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1242 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1243
1244 /*
1245 * Register the zeroed pad buffer, if any.
1246 */
1247 if (cdata->padding) {
Chuck Leverac920d02015-01-21 11:03:44 -05001248 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001249 rc = rpcrdma_register_internal(ia, p, cdata->padding,
1250 &ep->rep_pad_mr, &ep->rep_pad);
1251 if (rc)
1252 goto out;
1253 }
1254 p += cdata->padding;
1255
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001256 INIT_LIST_HEAD(&buf->rb_mws);
Chuck Lever3111d722014-07-29 17:24:28 -04001257 INIT_LIST_HEAD(&buf->rb_all);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001258 switch (ia->ri_memreg_strategy) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001259 case RPCRDMA_FRMR:
Chuck Lever2e845222014-07-29 17:25:38 -04001260 rc = rpcrdma_init_frmrs(ia, buf);
1261 if (rc)
1262 goto out;
Tom Talpey3197d3092008-10-09 15:00:20 -04001263 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001264 case RPCRDMA_MTHCAFMR:
Chuck Lever2e845222014-07-29 17:25:38 -04001265 rc = rpcrdma_init_fmrs(ia, buf);
1266 if (rc)
1267 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001268 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001269 default:
1270 break;
1271 }
1272
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001273 for (i = 0; i < buf->rb_max_requests; i++) {
1274 struct rpcrdma_req *req;
1275 struct rpcrdma_rep *rep;
1276
Chuck Lever13924022015-01-21 11:03:52 -05001277 req = rpcrdma_create_req(r_xprt);
1278 if (IS_ERR(req)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001279 dprintk("RPC: %s: request buffer %d alloc"
1280 " failed\n", __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001281 rc = PTR_ERR(req);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001282 goto out;
1283 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001284 buf->rb_send_bufs[i] = req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001285
Chuck Lever13924022015-01-21 11:03:52 -05001286 rep = rpcrdma_create_rep(r_xprt);
1287 if (IS_ERR(rep)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001288 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1289 __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001290 rc = PTR_ERR(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001291 goto out;
1292 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001293 buf->rb_recv_bufs[i] = rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001294 }
Chuck Lever13924022015-01-21 11:03:52 -05001295
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001296 return 0;
1297out:
1298 rpcrdma_buffer_destroy(buf);
1299 return rc;
1300}
1301
Chuck Lever2e845222014-07-29 17:25:38 -04001302static void
Chuck Lever13924022015-01-21 11:03:52 -05001303rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1304{
1305 if (!rep)
1306 return;
1307
Chuck Lever6b1184c2015-01-21 11:04:25 -05001308 rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001309 kfree(rep);
1310}
1311
1312static void
1313rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1314{
1315 if (!req)
1316 return;
1317
Chuck Lever0ca77dc2015-01-21 11:04:08 -05001318 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
Chuck Lever85275c82015-01-21 11:04:16 -05001319 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001320 kfree(req);
1321}
1322
1323static void
Chuck Lever2e845222014-07-29 17:25:38 -04001324rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf)
1325{
1326 struct rpcrdma_mw *r;
1327 int rc;
1328
1329 while (!list_empty(&buf->rb_all)) {
1330 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1331 list_del(&r->mw_all);
1332 list_del(&r->mw_list);
1333
1334 rc = ib_dealloc_fmr(r->r.fmr);
1335 if (rc)
1336 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
1337 __func__, rc);
1338
1339 kfree(r);
1340 }
1341}
1342
1343static void
1344rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf)
1345{
1346 struct rpcrdma_mw *r;
1347 int rc;
1348
1349 while (!list_empty(&buf->rb_all)) {
1350 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1351 list_del(&r->mw_all);
1352 list_del(&r->mw_list);
1353
1354 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1355 if (rc)
1356 dprintk("RPC: %s: ib_dereg_mr failed %i\n",
1357 __func__, rc);
1358 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1359
1360 kfree(r);
1361 }
1362}
1363
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001364void
1365rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1366{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001367 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
Chuck Lever2e845222014-07-29 17:25:38 -04001368 int i;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001369
1370 /* clean up in reverse order from create
1371 * 1. recv mr memory (mr free, then kfree)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001372 * 2. send mr memory (mr free, then kfree)
Chuck Lever2e845222014-07-29 17:25:38 -04001373 * 3. MWs
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001374 */
1375 dprintk("RPC: %s: entering\n", __func__);
1376
1377 for (i = 0; i < buf->rb_max_requests; i++) {
Chuck Lever13924022015-01-21 11:03:52 -05001378 if (buf->rb_recv_bufs)
1379 rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1380 if (buf->rb_send_bufs)
1381 rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001382 }
1383
Chuck Lever2e845222014-07-29 17:25:38 -04001384 switch (ia->ri_memreg_strategy) {
1385 case RPCRDMA_FRMR:
1386 rpcrdma_destroy_frmrs(buf);
1387 break;
1388 case RPCRDMA_MTHCAFMR:
1389 rpcrdma_destroy_fmrs(buf);
1390 break;
1391 default:
1392 break;
Allen Andrews4034ba02014-05-28 10:32:09 -04001393 }
1394
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001395 kfree(buf->rb_pool);
1396}
1397
Chuck Lever467c9672014-11-08 20:14:29 -05001398/* After a disconnect, unmap all FMRs.
1399 *
1400 * This is invoked only in the transport connect worker in order
1401 * to serialize with rpcrdma_register_fmr_external().
1402 */
1403static void
1404rpcrdma_reset_fmrs(struct rpcrdma_ia *ia)
1405{
1406 struct rpcrdma_xprt *r_xprt =
1407 container_of(ia, struct rpcrdma_xprt, rx_ia);
1408 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1409 struct list_head *pos;
1410 struct rpcrdma_mw *r;
1411 LIST_HEAD(l);
1412 int rc;
1413
1414 list_for_each(pos, &buf->rb_all) {
1415 r = list_entry(pos, struct rpcrdma_mw, mw_all);
1416
1417 INIT_LIST_HEAD(&l);
1418 list_add(&r->r.fmr->list, &l);
1419 rc = ib_unmap_fmr(&l);
1420 if (rc)
1421 dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
1422 __func__, rc);
1423 }
1424}
1425
Chuck Lever9f9d8022014-07-29 17:24:45 -04001426/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
1427 * an unusable state. Find FRMRs in this state and dereg / reg
1428 * each. FRMRs that are VALID and attached to an rpcrdma_req are
1429 * also torn down.
1430 *
1431 * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
1432 *
1433 * This is invoked only in the transport connect worker in order
1434 * to serialize with rpcrdma_register_frmr_external().
1435 */
1436static void
1437rpcrdma_reset_frmrs(struct rpcrdma_ia *ia)
1438{
1439 struct rpcrdma_xprt *r_xprt =
1440 container_of(ia, struct rpcrdma_xprt, rx_ia);
1441 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1442 struct list_head *pos;
1443 struct rpcrdma_mw *r;
1444 int rc;
1445
1446 list_for_each(pos, &buf->rb_all) {
1447 r = list_entry(pos, struct rpcrdma_mw, mw_all);
1448
1449 if (r->r.frmr.fr_state == FRMR_IS_INVALID)
1450 continue;
1451
1452 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1453 if (rc)
1454 dprintk("RPC: %s: ib_dereg_mr failed %i\n",
1455 __func__, rc);
1456 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1457
1458 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1459 ia->ri_max_frmr_depth);
1460 if (IS_ERR(r->r.frmr.fr_mr)) {
1461 rc = PTR_ERR(r->r.frmr.fr_mr);
1462 dprintk("RPC: %s: ib_alloc_fast_reg_mr"
1463 " failed %i\n", __func__, rc);
1464 continue;
1465 }
1466 r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
1467 ia->ri_id->device,
1468 ia->ri_max_frmr_depth);
1469 if (IS_ERR(r->r.frmr.fr_pgl)) {
1470 rc = PTR_ERR(r->r.frmr.fr_pgl);
1471 dprintk("RPC: %s: "
1472 "ib_alloc_fast_reg_page_list "
1473 "failed %i\n", __func__, rc);
1474
1475 ib_dereg_mr(r->r.frmr.fr_mr);
1476 continue;
1477 }
1478 r->r.frmr.fr_state = FRMR_IS_INVALID;
1479 }
1480}
1481
Chuck Leverc2922c02014-07-29 17:24:36 -04001482/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
1483 * some req segments uninitialized.
1484 */
1485static void
1486rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
1487{
1488 if (*mw) {
1489 list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
1490 *mw = NULL;
1491 }
1492}
1493
1494/* Cycle mw's back in reverse order, and "spin" them.
1495 * This delays and scrambles reuse as much as possible.
1496 */
1497static void
1498rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1499{
1500 struct rpcrdma_mr_seg *seg = req->rl_segments;
1501 struct rpcrdma_mr_seg *seg1 = seg;
1502 int i;
1503
1504 for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
Chuck Lever3eb35812015-01-21 11:02:54 -05001505 rpcrdma_buffer_put_mr(&seg->rl_mw, buf);
1506 rpcrdma_buffer_put_mr(&seg1->rl_mw, buf);
Chuck Leverc2922c02014-07-29 17:24:36 -04001507}
1508
1509static void
1510rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1511{
1512 buf->rb_send_bufs[--buf->rb_send_index] = req;
1513 req->rl_niovs = 0;
1514 if (req->rl_reply) {
1515 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1516 req->rl_reply->rr_func = NULL;
1517 req->rl_reply = NULL;
1518 }
1519}
1520
Chuck Leverddb6beb2014-07-29 17:24:54 -04001521/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external().
1522 * Redo only the ib_post_send().
1523 */
1524static void
1525rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia)
1526{
1527 struct rpcrdma_xprt *r_xprt =
1528 container_of(ia, struct rpcrdma_xprt, rx_ia);
1529 struct ib_send_wr invalidate_wr, *bad_wr;
1530 int rc;
1531
1532 dprintk("RPC: %s: FRMR %p is stale\n", __func__, r);
1533
1534 /* When this FRMR is re-inserted into rb_mws, it is no longer stale */
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001535 r->r.frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001536
1537 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
1538 invalidate_wr.wr_id = (unsigned long)(void *)r;
1539 invalidate_wr.opcode = IB_WR_LOCAL_INV;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001540 invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey;
1541 DECR_CQCOUNT(&r_xprt->rx_ep);
1542
1543 dprintk("RPC: %s: frmr %p invalidating rkey %08x\n",
1544 __func__, r, r->r.frmr.fr_mr->rkey);
1545
1546 read_lock(&ia->ri_qplock);
1547 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1548 read_unlock(&ia->ri_qplock);
1549 if (rc) {
1550 /* Force rpcrdma_buffer_get() to retry */
1551 r->r.frmr.fr_state = FRMR_IS_STALE;
1552 dprintk("RPC: %s: ib_post_send failed, %i\n",
1553 __func__, rc);
1554 }
1555}
1556
1557static void
1558rpcrdma_retry_flushed_linv(struct list_head *stale,
1559 struct rpcrdma_buffer *buf)
1560{
1561 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1562 struct list_head *pos;
1563 struct rpcrdma_mw *r;
1564 unsigned long flags;
1565
1566 list_for_each(pos, stale) {
1567 r = list_entry(pos, struct rpcrdma_mw, mw_list);
1568 rpcrdma_retry_local_inv(r, ia);
1569 }
1570
1571 spin_lock_irqsave(&buf->rb_lock, flags);
1572 list_splice_tail(stale, &buf->rb_mws);
1573 spin_unlock_irqrestore(&buf->rb_lock, flags);
1574}
1575
Chuck Leverc2922c02014-07-29 17:24:36 -04001576static struct rpcrdma_req *
Chuck Leverddb6beb2014-07-29 17:24:54 -04001577rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
1578 struct list_head *stale)
1579{
1580 struct rpcrdma_mw *r;
1581 int i;
1582
1583 i = RPCRDMA_MAX_SEGS - 1;
1584 while (!list_empty(&buf->rb_mws)) {
1585 r = list_entry(buf->rb_mws.next,
1586 struct rpcrdma_mw, mw_list);
1587 list_del(&r->mw_list);
1588 if (r->r.frmr.fr_state == FRMR_IS_STALE) {
1589 list_add(&r->mw_list, stale);
1590 continue;
1591 }
Chuck Lever3eb35812015-01-21 11:02:54 -05001592 req->rl_segments[i].rl_mw = r;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001593 if (unlikely(i-- == 0))
1594 return req; /* Success */
1595 }
1596
1597 /* Not enough entries on rb_mws for this req */
1598 rpcrdma_buffer_put_sendbuf(req, buf);
1599 rpcrdma_buffer_put_mrs(req, buf);
1600 return NULL;
1601}
1602
1603static struct rpcrdma_req *
1604rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
Chuck Leverc2922c02014-07-29 17:24:36 -04001605{
1606 struct rpcrdma_mw *r;
1607 int i;
1608
1609 i = RPCRDMA_MAX_SEGS - 1;
1610 while (!list_empty(&buf->rb_mws)) {
1611 r = list_entry(buf->rb_mws.next,
1612 struct rpcrdma_mw, mw_list);
1613 list_del(&r->mw_list);
Chuck Lever3eb35812015-01-21 11:02:54 -05001614 req->rl_segments[i].rl_mw = r;
Chuck Leverc2922c02014-07-29 17:24:36 -04001615 if (unlikely(i-- == 0))
1616 return req; /* Success */
1617 }
1618
1619 /* Not enough entries on rb_mws for this req */
1620 rpcrdma_buffer_put_sendbuf(req, buf);
1621 rpcrdma_buffer_put_mrs(req, buf);
1622 return NULL;
1623}
1624
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001625/*
1626 * Get a set of request/reply buffers.
1627 *
1628 * Reply buffer (if needed) is attached to send buffer upon return.
1629 * Rule:
1630 * rb_send_index and rb_recv_index MUST always be pointing to the
1631 * *next* available buffer (non-NULL). They are incremented after
1632 * removing buffers, and decremented *before* returning them.
1633 */
1634struct rpcrdma_req *
1635rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1636{
Chuck Leverc2922c02014-07-29 17:24:36 -04001637 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
Chuck Leverddb6beb2014-07-29 17:24:54 -04001638 struct list_head stale;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001639 struct rpcrdma_req *req;
1640 unsigned long flags;
1641
1642 spin_lock_irqsave(&buffers->rb_lock, flags);
1643 if (buffers->rb_send_index == buffers->rb_max_requests) {
1644 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1645 dprintk("RPC: %s: out of request buffers\n", __func__);
1646 return ((struct rpcrdma_req *)NULL);
1647 }
1648
1649 req = buffers->rb_send_bufs[buffers->rb_send_index];
1650 if (buffers->rb_send_index < buffers->rb_recv_index) {
1651 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1652 __func__,
1653 buffers->rb_recv_index - buffers->rb_send_index);
1654 req->rl_reply = NULL;
1655 } else {
1656 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1657 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1658 }
1659 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001660
1661 INIT_LIST_HEAD(&stale);
Chuck Leverc2922c02014-07-29 17:24:36 -04001662 switch (ia->ri_memreg_strategy) {
1663 case RPCRDMA_FRMR:
Chuck Leverddb6beb2014-07-29 17:24:54 -04001664 req = rpcrdma_buffer_get_frmrs(req, buffers, &stale);
1665 break;
Chuck Leverc2922c02014-07-29 17:24:36 -04001666 case RPCRDMA_MTHCAFMR:
Chuck Leverddb6beb2014-07-29 17:24:54 -04001667 req = rpcrdma_buffer_get_fmrs(req, buffers);
Chuck Leverc2922c02014-07-29 17:24:36 -04001668 break;
1669 default:
1670 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001671 }
1672 spin_unlock_irqrestore(&buffers->rb_lock, flags);
Chuck Leverddb6beb2014-07-29 17:24:54 -04001673 if (!list_empty(&stale))
1674 rpcrdma_retry_flushed_linv(&stale, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001675 return req;
1676}
1677
1678/*
1679 * Put request/reply buffers back into pool.
1680 * Pre-decrement counter/array index.
1681 */
1682void
1683rpcrdma_buffer_put(struct rpcrdma_req *req)
1684{
1685 struct rpcrdma_buffer *buffers = req->rl_buffer;
1686 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001687 unsigned long flags;
1688
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001689 spin_lock_irqsave(&buffers->rb_lock, flags);
Chuck Leverc2922c02014-07-29 17:24:36 -04001690 rpcrdma_buffer_put_sendbuf(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001691 switch (ia->ri_memreg_strategy) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001692 case RPCRDMA_FRMR:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001693 case RPCRDMA_MTHCAFMR:
Chuck Leverc2922c02014-07-29 17:24:36 -04001694 rpcrdma_buffer_put_mrs(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001695 break;
1696 default:
1697 break;
1698 }
1699 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1700}
1701
1702/*
1703 * Recover reply buffers from pool.
1704 * This happens when recovering from error conditions.
1705 * Post-increment counter/array index.
1706 */
1707void
1708rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1709{
1710 struct rpcrdma_buffer *buffers = req->rl_buffer;
1711 unsigned long flags;
1712
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001713 spin_lock_irqsave(&buffers->rb_lock, flags);
1714 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1715 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1716 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1717 }
1718 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1719}
1720
1721/*
1722 * Put reply buffers back into pool when not attached to
Chuck Leverb45ccfd2014-05-28 10:32:34 -04001723 * request. This happens in error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001724 */
1725void
1726rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1727{
1728 struct rpcrdma_buffer *buffers = rep->rr_buffer;
1729 unsigned long flags;
1730
1731 rep->rr_func = NULL;
1732 spin_lock_irqsave(&buffers->rb_lock, flags);
1733 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1734 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1735}
1736
1737/*
1738 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1739 */
1740
1741int
1742rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1743 struct ib_mr **mrp, struct ib_sge *iov)
1744{
1745 struct ib_phys_buf ipb;
1746 struct ib_mr *mr;
1747 int rc;
1748
1749 /*
1750 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1751 */
1752 iov->addr = ib_dma_map_single(ia->ri_id->device,
1753 va, len, DMA_BIDIRECTIONAL);
Yan Burmanbf858ab2014-06-19 16:06:30 +03001754 if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
1755 return -ENOMEM;
1756
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001757 iov->length = len;
1758
Tom Talpeybd7ed1d2008-10-09 15:00:09 -04001759 if (ia->ri_have_dma_lkey) {
1760 *mrp = NULL;
1761 iov->lkey = ia->ri_dma_lkey;
1762 return 0;
1763 } else if (ia->ri_bind_mem != NULL) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001764 *mrp = NULL;
1765 iov->lkey = ia->ri_bind_mem->lkey;
1766 return 0;
1767 }
1768
1769 ipb.addr = iov->addr;
1770 ipb.size = iov->length;
1771 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
1772 IB_ACCESS_LOCAL_WRITE, &iov->addr);
1773
1774 dprintk("RPC: %s: phys convert: 0x%llx "
1775 "registered 0x%llx length %d\n",
Andrew Mortona56daeb2007-10-16 01:29:57 -07001776 __func__, (unsigned long long)ipb.addr,
1777 (unsigned long long)iov->addr, len);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001778
1779 if (IS_ERR(mr)) {
1780 *mrp = NULL;
1781 rc = PTR_ERR(mr);
1782 dprintk("RPC: %s: failed with %i\n", __func__, rc);
1783 } else {
1784 *mrp = mr;
1785 iov->lkey = mr->lkey;
1786 rc = 0;
1787 }
1788
1789 return rc;
1790}
1791
1792int
1793rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
1794 struct ib_mr *mr, struct ib_sge *iov)
1795{
1796 int rc;
1797
1798 ib_dma_unmap_single(ia->ri_id->device,
1799 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1800
1801 if (NULL == mr)
1802 return 0;
1803
1804 rc = ib_dereg_mr(mr);
1805 if (rc)
1806 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
1807 return rc;
1808}
1809
Chuck Lever9128c3e2015-01-21 11:04:00 -05001810/**
1811 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1812 * @ia: controlling rpcrdma_ia
1813 * @size: size of buffer to be allocated, in bytes
1814 * @flags: GFP flags
1815 *
1816 * Returns pointer to private header of an area of internally
1817 * registered memory, or an ERR_PTR. The registered buffer follows
1818 * the end of the private header.
1819 *
1820 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1821 * receiving the payload of RDMA RECV operations. regbufs are not
1822 * used for RDMA READ/WRITE operations, thus are registered only for
1823 * LOCAL access.
1824 */
1825struct rpcrdma_regbuf *
1826rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1827{
1828 struct rpcrdma_regbuf *rb;
1829 int rc;
1830
1831 rc = -ENOMEM;
1832 rb = kmalloc(sizeof(*rb) + size, flags);
1833 if (rb == NULL)
1834 goto out;
1835
1836 rb->rg_size = size;
1837 rb->rg_owner = NULL;
1838 rc = rpcrdma_register_internal(ia, rb->rg_base, size,
1839 &rb->rg_mr, &rb->rg_iov);
1840 if (rc)
1841 goto out_free;
1842
1843 return rb;
1844
1845out_free:
1846 kfree(rb);
1847out:
1848 return ERR_PTR(rc);
1849}
1850
1851/**
1852 * rpcrdma_free_regbuf - deregister and free registered buffer
1853 * @ia: controlling rpcrdma_ia
1854 * @rb: regbuf to be deregistered and freed
1855 */
1856void
1857rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1858{
1859 if (rb) {
1860 rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
1861 kfree(rb);
1862 }
1863}
1864
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001865/*
1866 * Wrappers for chunk registration, shared by read/write chunk code.
1867 */
1868
1869static void
1870rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
1871{
1872 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1873 seg->mr_dmalen = seg->mr_len;
1874 if (seg->mr_page)
1875 seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
1876 seg->mr_page, offset_in_page(seg->mr_offset),
1877 seg->mr_dmalen, seg->mr_dir);
1878 else
1879 seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
1880 seg->mr_offset,
1881 seg->mr_dmalen, seg->mr_dir);
Tom Tucker5c635e02011-02-09 19:45:34 +00001882 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
1883 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1884 __func__,
Randy Dunlap986d4ab2011-03-15 17:11:59 -07001885 (unsigned long long)seg->mr_dma,
1886 seg->mr_offset, seg->mr_dmalen);
Tom Tucker5c635e02011-02-09 19:45:34 +00001887 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001888}
1889
1890static void
1891rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
1892{
1893 if (seg->mr_page)
1894 ib_dma_unmap_page(ia->ri_id->device,
1895 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1896 else
1897 ib_dma_unmap_single(ia->ri_id->device,
1898 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1899}
1900
Tom Talpey8d4ba032008-10-09 14:59:49 -04001901static int
Tom Talpey3197d3092008-10-09 15:00:20 -04001902rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1903 int *nsegs, int writing, struct rpcrdma_ia *ia,
1904 struct rpcrdma_xprt *r_xprt)
1905{
1906 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Lever3eb35812015-01-21 11:02:54 -05001907 struct rpcrdma_mw *mw = seg1->rl_mw;
Chuck Lever0dbb4102014-07-29 17:24:09 -04001908 struct rpcrdma_frmr *frmr = &mw->r.frmr;
1909 struct ib_mr *mr = frmr->fr_mr;
Chuck Leverf590e872014-07-29 17:25:29 -04001910 struct ib_send_wr fastreg_wr, *bad_wr;
Tom Talpey3197d3092008-10-09 15:00:20 -04001911 u8 key;
1912 int len, pageoff;
1913 int i, rc;
Tom Tucker9b781452012-02-20 13:07:57 -06001914 int seg_len;
1915 u64 pa;
1916 int page_no;
Tom Talpey3197d3092008-10-09 15:00:20 -04001917
1918 pageoff = offset_in_page(seg1->mr_offset);
1919 seg1->mr_offset -= pageoff; /* start of page */
1920 seg1->mr_len += pageoff;
1921 len = -pageoff;
Steve Wise0fc6c4e2014-05-28 10:32:00 -04001922 if (*nsegs > ia->ri_max_frmr_depth)
1923 *nsegs = ia->ri_max_frmr_depth;
Tom Tucker9b781452012-02-20 13:07:57 -06001924 for (page_no = i = 0; i < *nsegs;) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001925 rpcrdma_map_one(ia, seg, writing);
Tom Tucker9b781452012-02-20 13:07:57 -06001926 pa = seg->mr_dma;
1927 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
Chuck Lever0dbb4102014-07-29 17:24:09 -04001928 frmr->fr_pgl->page_list[page_no++] = pa;
Tom Tucker9b781452012-02-20 13:07:57 -06001929 pa += PAGE_SIZE;
1930 }
Tom Talpey3197d3092008-10-09 15:00:20 -04001931 len += seg->mr_len;
1932 ++seg;
1933 ++i;
1934 /* Check for holes */
1935 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1936 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1937 break;
1938 }
1939 dprintk("RPC: %s: Using frmr %p to map %d segments\n",
Chuck Lever0dbb4102014-07-29 17:24:09 -04001940 __func__, mw, i);
Tom Talpey3197d3092008-10-09 15:00:20 -04001941
Chuck Lever05055722014-07-29 17:25:12 -04001942 frmr->fr_state = FRMR_IS_VALID;
1943
Chuck Leverf590e872014-07-29 17:25:29 -04001944 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
1945 fastreg_wr.wr_id = (unsigned long)(void *)mw;
1946 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1947 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1948 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
1949 fastreg_wr.wr.fast_reg.page_list_len = page_no;
1950 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1951 fastreg_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
1952 if (fastreg_wr.wr.fast_reg.length < len) {
Chuck Lever5fc83f42014-07-29 17:23:17 -04001953 rc = -EIO;
1954 goto out_err;
Chuck Leverc977dea2014-05-28 10:35:06 -04001955 }
1956
1957 /* Bump the key */
Chuck Lever0dbb4102014-07-29 17:24:09 -04001958 key = (u8)(mr->rkey & 0x000000FF);
1959 ib_update_fast_reg_key(mr, ++key);
Chuck Leverc977dea2014-05-28 10:35:06 -04001960
Chuck Leverf590e872014-07-29 17:25:29 -04001961 fastreg_wr.wr.fast_reg.access_flags = (writing ?
Vu Pham68743082009-05-26 14:51:00 -04001962 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1963 IB_ACCESS_REMOTE_READ);
Chuck Leverf590e872014-07-29 17:25:29 -04001964 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001965 DECR_CQCOUNT(&r_xprt->rx_ep);
1966
Chuck Leverf590e872014-07-29 17:25:29 -04001967 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
Tom Talpey3197d3092008-10-09 15:00:20 -04001968 if (rc) {
1969 dprintk("RPC: %s: failed ib_post_send for register,"
1970 " status %i\n", __func__, rc);
Chuck Leverc93e9862014-07-29 17:24:19 -04001971 ib_update_fast_reg_key(mr, --key);
Chuck Lever5fc83f42014-07-29 17:23:17 -04001972 goto out_err;
Tom Talpey3197d3092008-10-09 15:00:20 -04001973 } else {
Chuck Lever0dbb4102014-07-29 17:24:09 -04001974 seg1->mr_rkey = mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001975 seg1->mr_base = seg1->mr_dma + pageoff;
1976 seg1->mr_nsegs = i;
1977 seg1->mr_len = len;
1978 }
1979 *nsegs = i;
Chuck Lever5fc83f42014-07-29 17:23:17 -04001980 return 0;
1981out_err:
Chuck Lever05055722014-07-29 17:25:12 -04001982 frmr->fr_state = FRMR_IS_INVALID;
Chuck Lever5fc83f42014-07-29 17:23:17 -04001983 while (i--)
1984 rpcrdma_unmap_one(ia, --seg);
Tom Talpey3197d3092008-10-09 15:00:20 -04001985 return rc;
1986}
1987
1988static int
1989rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
1990 struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
1991{
1992 struct rpcrdma_mr_seg *seg1 = seg;
1993 struct ib_send_wr invalidate_wr, *bad_wr;
1994 int rc;
1995
Chuck Lever3eb35812015-01-21 11:02:54 -05001996 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001997
Tom Talpey3197d3092008-10-09 15:00:20 -04001998 memset(&invalidate_wr, 0, sizeof invalidate_wr);
Chuck Lever3eb35812015-01-21 11:02:54 -05001999 invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
Tom Talpey3197d3092008-10-09 15:00:20 -04002000 invalidate_wr.opcode = IB_WR_LOCAL_INV;
Chuck Lever3eb35812015-01-21 11:02:54 -05002001 invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04002002 DECR_CQCOUNT(&r_xprt->rx_ep);
2003
Chuck Lever73806c82014-07-29 17:23:25 -04002004 read_lock(&ia->ri_qplock);
2005 while (seg1->mr_nsegs--)
2006 rpcrdma_unmap_one(ia, seg++);
Tom Talpey3197d3092008-10-09 15:00:20 -04002007 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
Chuck Lever73806c82014-07-29 17:23:25 -04002008 read_unlock(&ia->ri_qplock);
Chuck Leverdab7e3b2014-07-29 17:25:20 -04002009 if (rc) {
2010 /* Force rpcrdma_buffer_get() to retry */
Chuck Lever3eb35812015-01-21 11:02:54 -05002011 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
Tom Talpey3197d3092008-10-09 15:00:20 -04002012 dprintk("RPC: %s: failed ib_post_send for invalidate,"
2013 " status %i\n", __func__, rc);
Chuck Leverdab7e3b2014-07-29 17:25:20 -04002014 }
Tom Talpey3197d3092008-10-09 15:00:20 -04002015 return rc;
2016}
2017
2018static int
Tom Talpey8d4ba032008-10-09 14:59:49 -04002019rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
2020 int *nsegs, int writing, struct rpcrdma_ia *ia)
2021{
2022 struct rpcrdma_mr_seg *seg1 = seg;
2023 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
2024 int len, pageoff, i, rc;
2025
2026 pageoff = offset_in_page(seg1->mr_offset);
2027 seg1->mr_offset -= pageoff; /* start of page */
2028 seg1->mr_len += pageoff;
2029 len = -pageoff;
2030 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
2031 *nsegs = RPCRDMA_MAX_DATA_SEGS;
2032 for (i = 0; i < *nsegs;) {
2033 rpcrdma_map_one(ia, seg, writing);
2034 physaddrs[i] = seg->mr_dma;
2035 len += seg->mr_len;
2036 ++seg;
2037 ++i;
2038 /* Check for holes */
2039 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
2040 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
2041 break;
2042 }
Chuck Lever3eb35812015-01-21 11:02:54 -05002043 rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002044 if (rc) {
2045 dprintk("RPC: %s: failed ib_map_phys_fmr "
2046 "%u@0x%llx+%i (%d)... status %i\n", __func__,
2047 len, (unsigned long long)seg1->mr_dma,
2048 pageoff, i, rc);
2049 while (i--)
2050 rpcrdma_unmap_one(ia, --seg);
2051 } else {
Chuck Lever3eb35812015-01-21 11:02:54 -05002052 seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey;
Tom Talpey8d4ba032008-10-09 14:59:49 -04002053 seg1->mr_base = seg1->mr_dma + pageoff;
2054 seg1->mr_nsegs = i;
2055 seg1->mr_len = len;
2056 }
2057 *nsegs = i;
2058 return rc;
2059}
2060
2061static int
2062rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
2063 struct rpcrdma_ia *ia)
2064{
2065 struct rpcrdma_mr_seg *seg1 = seg;
2066 LIST_HEAD(l);
2067 int rc;
2068
Chuck Lever3eb35812015-01-21 11:02:54 -05002069 list_add(&seg1->rl_mw->r.fmr->list, &l);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002070 rc = ib_unmap_fmr(&l);
Chuck Lever73806c82014-07-29 17:23:25 -04002071 read_lock(&ia->ri_qplock);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002072 while (seg1->mr_nsegs--)
2073 rpcrdma_unmap_one(ia, seg++);
Chuck Lever73806c82014-07-29 17:23:25 -04002074 read_unlock(&ia->ri_qplock);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002075 if (rc)
2076 dprintk("RPC: %s: failed ib_unmap_fmr,"
2077 " status %i\n", __func__, rc);
2078 return rc;
2079}
2080
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002081int
2082rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
2083 int nsegs, int writing, struct rpcrdma_xprt *r_xprt)
2084{
2085 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002086 int rc = 0;
2087
2088 switch (ia->ri_memreg_strategy) {
2089
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002090 case RPCRDMA_ALLPHYSICAL:
2091 rpcrdma_map_one(ia, seg, writing);
2092 seg->mr_rkey = ia->ri_bind_mem->rkey;
2093 seg->mr_base = seg->mr_dma;
2094 seg->mr_nsegs = 1;
2095 nsegs = 1;
2096 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002097
Tom Talpey3197d3092008-10-09 15:00:20 -04002098 /* Registration using frmr registration */
2099 case RPCRDMA_FRMR:
2100 rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt);
2101 break;
2102
Tom Talpey8d4ba032008-10-09 14:59:49 -04002103 /* Registration using fmr memory registration */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002104 case RPCRDMA_MTHCAFMR:
Tom Talpey8d4ba032008-10-09 14:59:49 -04002105 rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002106 break;
2107
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002108 default:
Chuck Lever92b98362014-11-08 20:14:12 -05002109 return -EIO;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002110 }
2111 if (rc)
Chuck Lever92b98362014-11-08 20:14:12 -05002112 return rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002113
2114 return nsegs;
2115}
2116
2117int
2118rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
Chuck Lever13c9ff82014-05-28 10:33:08 -04002119 struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002120{
2121 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002122 int nsegs = seg->mr_nsegs, rc;
2123
2124 switch (ia->ri_memreg_strategy) {
2125
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002126 case RPCRDMA_ALLPHYSICAL:
Chuck Lever73806c82014-07-29 17:23:25 -04002127 read_lock(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002128 rpcrdma_unmap_one(ia, seg);
Chuck Lever73806c82014-07-29 17:23:25 -04002129 read_unlock(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002130 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002131
Tom Talpey3197d3092008-10-09 15:00:20 -04002132 case RPCRDMA_FRMR:
2133 rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
2134 break;
2135
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002136 case RPCRDMA_MTHCAFMR:
Tom Talpey8d4ba032008-10-09 14:59:49 -04002137 rc = rpcrdma_deregister_fmr_external(seg, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002138 break;
2139
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002140 default:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002141 break;
2142 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002143 return nsegs;
2144}
2145
2146/*
2147 * Prepost any receive buffer, then post send.
2148 *
2149 * Receive buffer is donated to hardware, reclaimed upon recv completion.
2150 */
2151int
2152rpcrdma_ep_post(struct rpcrdma_ia *ia,
2153 struct rpcrdma_ep *ep,
2154 struct rpcrdma_req *req)
2155{
2156 struct ib_send_wr send_wr, *send_wr_fail;
2157 struct rpcrdma_rep *rep = req->rl_reply;
2158 int rc;
2159
2160 if (rep) {
2161 rc = rpcrdma_ep_post_recv(ia, ep, rep);
2162 if (rc)
2163 goto out;
2164 req->rl_reply = NULL;
2165 }
2166
2167 send_wr.next = NULL;
2168 send_wr.wr_id = 0ULL; /* no send cookie */
2169 send_wr.sg_list = req->rl_send_iov;
2170 send_wr.num_sge = req->rl_niovs;
2171 send_wr.opcode = IB_WR_SEND;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002172 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
2173 ib_dma_sync_single_for_device(ia->ri_id->device,
2174 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
2175 DMA_TO_DEVICE);
2176 ib_dma_sync_single_for_device(ia->ri_id->device,
2177 req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
2178 DMA_TO_DEVICE);
2179 ib_dma_sync_single_for_device(ia->ri_id->device,
2180 req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
2181 DMA_TO_DEVICE);
2182
2183 if (DECR_CQCOUNT(ep) > 0)
2184 send_wr.send_flags = 0;
2185 else { /* Provider must take a send completion every now and then */
2186 INIT_CQCOUNT(ep);
2187 send_wr.send_flags = IB_SEND_SIGNALED;
2188 }
2189
2190 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
2191 if (rc)
2192 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
2193 rc);
2194out:
2195 return rc;
2196}
2197
2198/*
2199 * (Re)post a receive buffer.
2200 */
2201int
2202rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
2203 struct rpcrdma_ep *ep,
2204 struct rpcrdma_rep *rep)
2205{
2206 struct ib_recv_wr recv_wr, *recv_wr_fail;
2207 int rc;
2208
2209 recv_wr.next = NULL;
2210 recv_wr.wr_id = (u64) (unsigned long) rep;
Chuck Lever6b1184c2015-01-21 11:04:25 -05002211 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002212 recv_wr.num_sge = 1;
2213
2214 ib_dma_sync_single_for_cpu(ia->ri_id->device,
Chuck Lever6b1184c2015-01-21 11:04:25 -05002215 rdmab_addr(rep->rr_rdmabuf),
2216 rdmab_length(rep->rr_rdmabuf),
2217 DMA_BIDIRECTIONAL);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002218
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002219 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
2220
2221 if (rc)
2222 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
2223 rc);
2224 return rc;
2225}
Chuck Lever43e95982014-07-29 17:23:34 -04002226
2227/* Physical mapping means one Read/Write list entry per-page.
2228 * All list entries must fit within an inline buffer
2229 *
2230 * NB: The server must return a Write list for NFS READ,
2231 * which has the same constraint. Factor in the inline
2232 * rsize as well.
2233 */
2234static size_t
2235rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
2236{
2237 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
2238 unsigned int inline_size, pages;
2239
2240 inline_size = min_t(unsigned int,
2241 cdata->inline_wsize, cdata->inline_rsize);
2242 inline_size -= RPCRDMA_HDRLEN_MIN;
2243 pages = inline_size / sizeof(struct rpcrdma_segment);
2244 return pages << PAGE_SHIFT;
2245}
2246
2247static size_t
2248rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
2249{
2250 return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
2251}
2252
2253size_t
2254rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
2255{
2256 size_t result;
2257
2258 switch (r_xprt->rx_ia.ri_memreg_strategy) {
2259 case RPCRDMA_ALLPHYSICAL:
2260 result = rpcrdma_physical_max_payload(r_xprt);
2261 break;
2262 default:
2263 result = rpcrdma_mr_max_payload(r_xprt);
2264 }
2265 return result;
2266}