blob: c81749b9a0de973e67a949a601d5c1b8958878bd [file] [log] [blame]
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -04001/*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040038 */
39
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040040/*
41 * verbs.c
42 *
43 * Encapsulates the major functions managing:
44 * o adapters
45 * o endpoints
46 * o connections
47 * o buffer memory
48 */
49
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000050#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090051#include <linux/slab.h>
Chuck Levereba8ff62015-01-21 11:03:02 -050052#include <linux/prefetch.h>
Chuck Lever65866f82014-05-28 10:33:59 -040053#include <asm/bitops.h>
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040054
\"Talpey, Thomas\f58851e2007-09-10 13:50:12 -040055#include "xprt_rdma.h"
56
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040057/*
58 * Globals/Macros
59 */
60
Jeff Laytonf895b252014-11-17 16:58:04 -050061#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040062# define RPCDBG_FACILITY RPCDBG_TRANS
63#endif
64
Chuck Lever9f9d8022014-07-29 17:24:45 -040065static void rpcrdma_reset_frmrs(struct rpcrdma_ia *);
Chuck Lever467c9672014-11-08 20:14:29 -050066static void rpcrdma_reset_fmrs(struct rpcrdma_ia *);
Chuck Lever9f9d8022014-07-29 17:24:45 -040067
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -040068/*
69 * internal functions
70 */
71
72/*
73 * handle replies in tasklet context, using a single, global list
74 * rdma tasklet function -- just turn around and call the func
75 * for all replies on the list
76 */
77
78static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
79static LIST_HEAD(rpcrdma_tasklets_g);
80
81static void
82rpcrdma_run_tasklet(unsigned long data)
83{
84 struct rpcrdma_rep *rep;
85 void (*func)(struct rpcrdma_rep *);
86 unsigned long flags;
87
88 data = data;
89 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
90 while (!list_empty(&rpcrdma_tasklets_g)) {
91 rep = list_entry(rpcrdma_tasklets_g.next,
92 struct rpcrdma_rep, rr_list);
93 list_del(&rep->rr_list);
94 func = rep->rr_func;
95 rep->rr_func = NULL;
96 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
97
98 if (func)
99 func(rep);
100 else
101 rpcrdma_recv_buffer_put(rep);
102
103 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
104 }
105 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
106}
107
108static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
109
Chuck Lever7ff11de2014-11-08 20:15:01 -0500110static const char * const async_event[] = {
111 "CQ error",
112 "QP fatal error",
113 "QP request error",
114 "QP access error",
115 "communication established",
116 "send queue drained",
117 "path migration successful",
118 "path mig error",
119 "device fatal error",
120 "port active",
121 "port error",
122 "LID change",
123 "P_key change",
124 "SM change",
125 "SRQ error",
126 "SRQ limit reached",
127 "last WQE reached",
128 "client reregister",
129 "GID change",
130};
131
132#define ASYNC_MSG(status) \
133 ((status) < ARRAY_SIZE(async_event) ? \
134 async_event[(status)] : "unknown async error")
135
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400136static void
Chuck Leverf1a03b72014-11-08 20:14:37 -0500137rpcrdma_schedule_tasklet(struct list_head *sched_list)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
142 list_splice_tail(sched_list, &rpcrdma_tasklets_g);
143 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
144 tasklet_schedule(&rpcrdma_tasklet_g);
145}
146
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400147static void
148rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
149{
150 struct rpcrdma_ep *ep = context;
151
Chuck Lever7ff11de2014-11-08 20:15:01 -0500152 pr_err("RPC: %s: %s on device %s ep %p\n",
153 __func__, ASYNC_MSG(event->event),
154 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400155 if (ep->rep_connected == 1) {
156 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500157 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400158 wake_up_all(&ep->rep_connect_wait);
159 }
160}
161
162static void
163rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
164{
165 struct rpcrdma_ep *ep = context;
166
Chuck Lever7ff11de2014-11-08 20:15:01 -0500167 pr_err("RPC: %s: %s on device %s ep %p\n",
168 __func__, ASYNC_MSG(event->event),
169 event->device->name, context);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400170 if (ep->rep_connected == 1) {
171 ep->rep_connected = -EIO;
Chuck Leverafadc462015-01-21 11:03:11 -0500172 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400173 wake_up_all(&ep->rep_connect_wait);
174 }
175}
176
Chuck Lever85024272015-01-21 11:02:04 -0500177static const char * const wc_status[] = {
178 "success",
179 "local length error",
180 "local QP operation error",
181 "local EE context operation error",
182 "local protection error",
183 "WR flushed",
184 "memory management operation error",
185 "bad response error",
186 "local access error",
187 "remote invalid request error",
188 "remote access error",
189 "remote operation error",
190 "transport retry counter exceeded",
191 "RNR retrycounter exceeded",
192 "local RDD violation error",
193 "remove invalid RD request",
194 "operation aborted",
195 "invalid EE context number",
196 "invalid EE context state",
197 "fatal error",
198 "response timeout error",
199 "general error",
200};
201
202#define COMPLETION_MSG(status) \
203 ((status) < ARRAY_SIZE(wc_status) ? \
204 wc_status[(status)] : "unexpected completion error")
205
Chuck Leverfc664482014-05-28 10:33:25 -0400206static void
207rpcrdma_sendcq_process_wc(struct ib_wc *wc)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400208{
Chuck Lever85024272015-01-21 11:02:04 -0500209 if (likely(wc->status == IB_WC_SUCCESS))
Chuck Leverfc664482014-05-28 10:33:25 -0400210 return;
Chuck Lever85024272015-01-21 11:02:04 -0500211
212 /* WARNING: Only wr_id and status are reliable at this point */
213 if (wc->wr_id == 0ULL) {
214 if (wc->status != IB_WC_WR_FLUSH_ERR)
215 pr_err("RPC: %s: SEND: %s\n",
216 __func__, COMPLETION_MSG(wc->status));
217 } else {
218 struct rpcrdma_mw *r;
219
220 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
221 r->r.frmr.fr_state = FRMR_IS_STALE;
222 pr_err("RPC: %s: frmr %p (stale): %s\n",
223 __func__, r, COMPLETION_MSG(wc->status));
224 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400225}
226
Chuck Leverfc664482014-05-28 10:33:25 -0400227static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400228rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400229{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400230 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400231 int budget, count, rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400232
Chuck Lever8301a2c2014-05-28 10:33:51 -0400233 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400234 do {
235 wcs = ep->rep_send_wcs;
236
237 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
238 if (rc <= 0)
239 return rc;
240
241 count = rc;
242 while (count-- > 0)
243 rpcrdma_sendcq_process_wc(wcs++);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400244 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Lever1c00dd02014-05-28 10:33:42 -0400245 return 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400246}
247
248/*
Chuck Leverfc664482014-05-28 10:33:25 -0400249 * Handle send, fast_reg_mr, and local_inv completions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400250 *
Chuck Leverfc664482014-05-28 10:33:25 -0400251 * Send events are typically suppressed and thus do not result
252 * in an upcall. Occasionally one is signaled, however. This
253 * prevents the provider's completion queue from wrapping and
254 * losing a completion.
255 */
256static void
257rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
258{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400259 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
Chuck Leverfc664482014-05-28 10:33:25 -0400260 int rc;
261
Chuck Lever1c00dd02014-05-28 10:33:42 -0400262 rc = rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400263 if (rc) {
264 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
265 __func__, rc);
266 return;
267 }
268
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400269 rc = ib_req_notify_cq(cq,
270 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
271 if (rc == 0)
272 return;
273 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400274 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
275 __func__, rc);
276 return;
277 }
278
Chuck Lever1c00dd02014-05-28 10:33:42 -0400279 rpcrdma_sendcq_poll(cq, ep);
Chuck Leverfc664482014-05-28 10:33:25 -0400280}
281
282static void
Chuck Leverbb961932014-07-29 17:25:46 -0400283rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
Chuck Leverfc664482014-05-28 10:33:25 -0400284{
285 struct rpcrdma_rep *rep =
286 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
287
Chuck Lever85024272015-01-21 11:02:04 -0500288 /* WARNING: Only wr_id and status are reliable at this point */
289 if (wc->status != IB_WC_SUCCESS)
290 goto out_fail;
Chuck Leverfc664482014-05-28 10:33:25 -0400291
Chuck Lever85024272015-01-21 11:02:04 -0500292 /* status == SUCCESS means all fields in wc are trustworthy */
Chuck Leverfc664482014-05-28 10:33:25 -0400293 if (wc->opcode != IB_WC_RECV)
294 return;
295
Chuck Lever85024272015-01-21 11:02:04 -0500296 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
297 __func__, rep, wc->byte_len);
298
Chuck Leverfc664482014-05-28 10:33:25 -0400299 rep->rr_len = wc->byte_len;
300 ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
301 rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);
Chuck Levereba8ff62015-01-21 11:03:02 -0500302 prefetch(rep->rr_base);
Chuck Leverfc664482014-05-28 10:33:25 -0400303
304out_schedule:
Chuck Leverbb961932014-07-29 17:25:46 -0400305 list_add_tail(&rep->rr_list, sched_list);
Chuck Lever85024272015-01-21 11:02:04 -0500306 return;
307out_fail:
308 if (wc->status != IB_WC_WR_FLUSH_ERR)
309 pr_err("RPC: %s: rep %p: %s\n",
310 __func__, rep, COMPLETION_MSG(wc->status));
311 rep->rr_len = ~0U;
312 goto out_schedule;
Chuck Leverfc664482014-05-28 10:33:25 -0400313}
314
315static int
Chuck Lever1c00dd02014-05-28 10:33:42 -0400316rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
Chuck Leverfc664482014-05-28 10:33:25 -0400317{
Chuck Leverbb961932014-07-29 17:25:46 -0400318 struct list_head sched_list;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400319 struct ib_wc *wcs;
Chuck Lever8301a2c2014-05-28 10:33:51 -0400320 int budget, count, rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400321
Chuck Leverbb961932014-07-29 17:25:46 -0400322 INIT_LIST_HEAD(&sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400323 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400324 do {
325 wcs = ep->rep_recv_wcs;
326
327 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
328 if (rc <= 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400329 goto out_schedule;
Chuck Lever1c00dd02014-05-28 10:33:42 -0400330
331 count = rc;
332 while (count-- > 0)
Chuck Leverbb961932014-07-29 17:25:46 -0400333 rpcrdma_recvcq_process_wc(wcs++, &sched_list);
Chuck Lever8301a2c2014-05-28 10:33:51 -0400334 } while (rc == RPCRDMA_POLLSIZE && --budget);
Chuck Leverbb961932014-07-29 17:25:46 -0400335 rc = 0;
336
337out_schedule:
Chuck Leverf1a03b72014-11-08 20:14:37 -0500338 rpcrdma_schedule_tasklet(&sched_list);
Chuck Leverbb961932014-07-29 17:25:46 -0400339 return rc;
Chuck Leverfc664482014-05-28 10:33:25 -0400340}
341
342/*
343 * Handle receive completions.
344 *
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400345 * It is reentrant but processes single events in order to maintain
346 * ordering of receives to keep server credits.
347 *
348 * It is the responsibility of the scheduled tasklet to return
349 * recv buffers to the pool. NOTE: this affects synchronization of
350 * connection shutdown. That is, the structures required for
351 * the completion of the reply handler must remain intact until
352 * all memory has been reclaimed.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400353 */
354static void
Chuck Leverfc664482014-05-28 10:33:25 -0400355rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400356{
Chuck Lever1c00dd02014-05-28 10:33:42 -0400357 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400358 int rc;
359
Chuck Lever1c00dd02014-05-28 10:33:42 -0400360 rc = rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400361 if (rc) {
Chuck Leverfc664482014-05-28 10:33:25 -0400362 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400363 __func__, rc);
364 return;
365 }
366
Chuck Lever7f23f6f2014-05-28 10:33:34 -0400367 rc = ib_req_notify_cq(cq,
368 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
369 if (rc == 0)
370 return;
371 if (rc < 0) {
Chuck Leverfc664482014-05-28 10:33:25 -0400372 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
373 __func__, rc);
374 return;
375 }
376
Chuck Lever1c00dd02014-05-28 10:33:42 -0400377 rpcrdma_recvcq_poll(cq, ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400378}
379
Chuck Levera7bc2112014-07-29 17:23:52 -0400380static void
381rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
382{
Chuck Lever5c166be2014-11-08 20:14:45 -0500383 struct ib_wc wc;
384 LIST_HEAD(sched_list);
385
386 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
387 rpcrdma_recvcq_process_wc(&wc, &sched_list);
388 if (!list_empty(&sched_list))
389 rpcrdma_schedule_tasklet(&sched_list);
390 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
391 rpcrdma_sendcq_process_wc(&wc);
Chuck Levera7bc2112014-07-29 17:23:52 -0400392}
393
Jeff Laytonf895b252014-11-17 16:58:04 -0500394#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400395static const char * const conn[] = {
396 "address resolved",
397 "address error",
398 "route resolved",
399 "route error",
400 "connect request",
401 "connect response",
402 "connect error",
403 "unreachable",
404 "rejected",
405 "established",
406 "disconnected",
Chuck Lever8079fb72014-07-29 17:26:12 -0400407 "device removal",
408 "multicast join",
409 "multicast error",
410 "address change",
411 "timewait exit",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400412};
Chuck Lever8079fb72014-07-29 17:26:12 -0400413
414#define CONNECTION_MSG(status) \
415 ((status) < ARRAY_SIZE(conn) ? \
416 conn[(status)] : "unrecognized connection error")
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400417#endif
418
419static int
420rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
421{
422 struct rpcrdma_xprt *xprt = id->context;
423 struct rpcrdma_ia *ia = &xprt->rx_ia;
424 struct rpcrdma_ep *ep = &xprt->rx_ep;
Jeff Laytonf895b252014-11-17 16:58:04 -0500425#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400426 struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
Ingo Molnarff0db042008-11-25 16:58:42 -0800427#endif
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500428 struct ib_qp_attr *attr = &ia->ri_qp_attr;
429 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400430 int connstate = 0;
431
432 switch (event->event) {
433 case RDMA_CM_EVENT_ADDR_RESOLVED:
434 case RDMA_CM_EVENT_ROUTE_RESOLVED:
Tom Talpey5675add2008-10-09 15:01:41 -0400435 ia->ri_async_rc = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400436 complete(&ia->ri_done);
437 break;
438 case RDMA_CM_EVENT_ADDR_ERROR:
439 ia->ri_async_rc = -EHOSTUNREACH;
440 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
441 __func__, ep);
442 complete(&ia->ri_done);
443 break;
444 case RDMA_CM_EVENT_ROUTE_ERROR:
445 ia->ri_async_rc = -ENETUNREACH;
446 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
447 __func__, ep);
448 complete(&ia->ri_done);
449 break;
450 case RDMA_CM_EVENT_ESTABLISHED:
451 connstate = 1;
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500452 ib_query_qp(ia->ri_id->qp, attr,
453 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
454 iattr);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400455 dprintk("RPC: %s: %d responder resources"
456 " (%d initiator)\n",
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500457 __func__, attr->max_dest_rd_atomic,
458 attr->max_rd_atomic);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400459 goto connected;
460 case RDMA_CM_EVENT_CONNECT_ERROR:
461 connstate = -ENOTCONN;
462 goto connected;
463 case RDMA_CM_EVENT_UNREACHABLE:
464 connstate = -ENETDOWN;
465 goto connected;
466 case RDMA_CM_EVENT_REJECTED:
467 connstate = -ECONNREFUSED;
468 goto connected;
469 case RDMA_CM_EVENT_DISCONNECTED:
470 connstate = -ECONNABORTED;
471 goto connected;
472 case RDMA_CM_EVENT_DEVICE_REMOVAL:
473 connstate = -ENODEV;
474connected:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400475 dprintk("RPC: %s: %sconnected\n",
476 __func__, connstate > 0 ? "" : "dis");
477 ep->rep_connected = connstate;
Chuck Leverafadc462015-01-21 11:03:11 -0500478 rpcrdma_conn_func(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400479 wake_up_all(&ep->rep_connect_wait);
Chuck Lever8079fb72014-07-29 17:26:12 -0400480 /*FALLTHROUGH*/
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400481 default:
Chuck Lever8079fb72014-07-29 17:26:12 -0400482 dprintk("RPC: %s: %pI4:%u (ep 0x%p): %s\n",
483 __func__, &addr->sin_addr.s_addr,
484 ntohs(addr->sin_port), ep,
485 CONNECTION_MSG(event->event));
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400486 break;
487 }
488
Jeff Laytonf895b252014-11-17 16:58:04 -0500489#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400490 if (connstate == 1) {
Chuck Leverce1ab9a2015-01-21 11:03:35 -0500491 int ird = attr->max_dest_rd_atomic;
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400492 int tird = ep->rep_remote_cma.responder_resources;
Harvey Harrison21454aa2008-10-31 00:54:56 -0700493 printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400494 "on %s, memreg %d slots %d ird %d%s\n",
Harvey Harrison21454aa2008-10-31 00:54:56 -0700495 &addr->sin_addr.s_addr,
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400496 ntohs(addr->sin_port),
497 ia->ri_id->device->name,
498 ia->ri_memreg_strategy,
499 xprt->rx_buf.rb_max_requests,
500 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
501 } else if (connstate < 0) {
Harvey Harrison21454aa2008-10-31 00:54:56 -0700502 printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n",
503 &addr->sin_addr.s_addr,
Tom Talpeyb3cd8d42008-10-09 15:02:02 -0400504 ntohs(addr->sin_port),
505 connstate);
506 }
507#endif
508
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400509 return 0;
510}
511
512static struct rdma_cm_id *
513rpcrdma_create_id(struct rpcrdma_xprt *xprt,
514 struct rpcrdma_ia *ia, struct sockaddr *addr)
515{
516 struct rdma_cm_id *id;
517 int rc;
518
Tom Talpey1a954052008-10-09 15:01:31 -0400519 init_completion(&ia->ri_done);
520
Sean Heftyb26f9b92010-04-01 17:08:41 +0000521 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400522 if (IS_ERR(id)) {
523 rc = PTR_ERR(id);
524 dprintk("RPC: %s: rdma_create_id() failed %i\n",
525 __func__, rc);
526 return id;
527 }
528
Tom Talpey5675add2008-10-09 15:01:41 -0400529 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400530 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
531 if (rc) {
532 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
533 __func__, rc);
534 goto out;
535 }
Tom Talpey5675add2008-10-09 15:01:41 -0400536 wait_for_completion_interruptible_timeout(&ia->ri_done,
537 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400538 rc = ia->ri_async_rc;
539 if (rc)
540 goto out;
541
Tom Talpey5675add2008-10-09 15:01:41 -0400542 ia->ri_async_rc = -ETIMEDOUT;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400543 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
544 if (rc) {
545 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
546 __func__, rc);
547 goto out;
548 }
Tom Talpey5675add2008-10-09 15:01:41 -0400549 wait_for_completion_interruptible_timeout(&ia->ri_done,
550 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400551 rc = ia->ri_async_rc;
552 if (rc)
553 goto out;
554
555 return id;
556
557out:
558 rdma_destroy_id(id);
559 return ERR_PTR(rc);
560}
561
562/*
563 * Drain any cq, prior to teardown.
564 */
565static void
566rpcrdma_clean_cq(struct ib_cq *cq)
567{
568 struct ib_wc wc;
569 int count = 0;
570
571 while (1 == ib_poll_cq(cq, 1, &wc))
572 ++count;
573
574 if (count)
575 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
576 __func__, count, wc.opcode);
577}
578
579/*
580 * Exported functions.
581 */
582
583/*
584 * Open and initialize an Interface Adapter.
585 * o initializes fields of struct rpcrdma_ia, including
586 * interface and provider attributes and protection zone.
587 */
588int
589rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
590{
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400591 int rc, mem_priv;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400592 struct rpcrdma_ia *ia = &xprt->rx_ia;
Chuck Lever7bc79722015-01-21 11:03:27 -0500593 struct ib_device_attr *devattr = &ia->ri_devattr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400594
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400595 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
596 if (IS_ERR(ia->ri_id)) {
597 rc = PTR_ERR(ia->ri_id);
598 goto out1;
599 }
600
601 ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
602 if (IS_ERR(ia->ri_pd)) {
603 rc = PTR_ERR(ia->ri_pd);
604 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
605 __func__, rc);
606 goto out2;
607 }
608
Chuck Lever7bc79722015-01-21 11:03:27 -0500609 rc = ib_query_device(ia->ri_id->device, devattr);
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400610 if (rc) {
611 dprintk("RPC: %s: ib_query_device failed %d\n",
612 __func__, rc);
Chuck Lever5ae711a2015-01-21 11:03:19 -0500613 goto out3;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400614 }
615
Chuck Lever7bc79722015-01-21 11:03:27 -0500616 if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400617 ia->ri_have_dma_lkey = 1;
618 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
619 }
620
Chuck Leverf10eafd2014-05-28 10:32:51 -0400621 if (memreg == RPCRDMA_FRMR) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400622 /* Requires both frmr reg and local dma lkey */
Chuck Lever7bc79722015-01-21 11:03:27 -0500623 if ((devattr->device_cap_flags &
Tom Talpey3197d3092008-10-09 15:00:20 -0400624 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
625 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400626 dprintk("RPC: %s: FRMR registration "
Chuck Leverf10eafd2014-05-28 10:32:51 -0400627 "not supported by HCA\n", __func__);
628 memreg = RPCRDMA_MTHCAFMR;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400629 } else {
630 /* Mind the ia limit on FRMR page list depth */
631 ia->ri_max_frmr_depth = min_t(unsigned int,
632 RPCRDMA_MAX_DATA_SEGS,
Chuck Lever7bc79722015-01-21 11:03:27 -0500633 devattr->max_fast_reg_page_list_len);
Tom Talpey3197d3092008-10-09 15:00:20 -0400634 }
Chuck Leverf10eafd2014-05-28 10:32:51 -0400635 }
636 if (memreg == RPCRDMA_MTHCAFMR) {
637 if (!ia->ri_id->device->alloc_fmr) {
638 dprintk("RPC: %s: MTHCAFMR registration "
639 "not supported by HCA\n", __func__);
Chuck Leverf10eafd2014-05-28 10:32:51 -0400640 memreg = RPCRDMA_ALLPHYSICAL;
Chuck Leverf10eafd2014-05-28 10:32:51 -0400641 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400642 }
643
644 /*
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400645 * Optionally obtain an underlying physical identity mapping in
646 * order to do a memory window-based bind. This base registration
647 * is protected from remote access - that is enabled only by binding
648 * for the specific bytes targeted during each RPC operation, and
649 * revoked after the corresponding completion similar to a storage
650 * adapter.
651 */
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400652 switch (memreg) {
Tom Talpey3197d3092008-10-09 15:00:20 -0400653 case RPCRDMA_FRMR:
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400654 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400655 case RPCRDMA_ALLPHYSICAL:
656 mem_priv = IB_ACCESS_LOCAL_WRITE |
657 IB_ACCESS_REMOTE_WRITE |
658 IB_ACCESS_REMOTE_READ;
659 goto register_setup;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400660 case RPCRDMA_MTHCAFMR:
661 if (ia->ri_have_dma_lkey)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400662 break;
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400663 mem_priv = IB_ACCESS_LOCAL_WRITE;
664 register_setup:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400665 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
666 if (IS_ERR(ia->ri_bind_mem)) {
667 printk(KERN_ALERT "%s: ib_get_dma_mr for "
Chuck Lever0ac531c2014-05-28 10:32:43 -0400668 "phys register failed with %lX\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400669 __func__, PTR_ERR(ia->ri_bind_mem));
Chuck Lever0ac531c2014-05-28 10:32:43 -0400670 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500671 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400672 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400673 break;
674 default:
Chuck Levercdd9ade2014-05-28 10:33:00 -0400675 printk(KERN_ERR "RPC: Unsupported memory "
676 "registration mode: %d\n", memreg);
677 rc = -ENOMEM;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500678 goto out3;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400679 }
Tom Talpeybd7ed1d2008-10-09 15:00:09 -0400680 dprintk("RPC: %s: memory registration strategy is %d\n",
681 __func__, memreg);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400682
683 /* Else will do memory reg/dereg for each chunk */
684 ia->ri_memreg_strategy = memreg;
685
Chuck Lever73806c82014-07-29 17:23:25 -0400686 rwlock_init(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400687 return 0;
Chuck Lever5ae711a2015-01-21 11:03:19 -0500688
689out3:
690 ib_dealloc_pd(ia->ri_pd);
691 ia->ri_pd = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400692out2:
693 rdma_destroy_id(ia->ri_id);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400694 ia->ri_id = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400695out1:
696 return rc;
697}
698
699/*
700 * Clean up/close an IA.
701 * o if event handles and PD have been initialized, free them.
702 * o close the IA
703 */
704void
705rpcrdma_ia_close(struct rpcrdma_ia *ia)
706{
707 int rc;
708
709 dprintk("RPC: %s: entering\n", __func__);
710 if (ia->ri_bind_mem != NULL) {
711 rc = ib_dereg_mr(ia->ri_bind_mem);
712 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
713 __func__, rc);
714 }
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400715 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
716 if (ia->ri_id->qp)
717 rdma_destroy_qp(ia->ri_id);
718 rdma_destroy_id(ia->ri_id);
719 ia->ri_id = NULL;
720 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400721 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
722 rc = ib_dealloc_pd(ia->ri_pd);
723 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
724 __func__, rc);
725 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400726}
727
728/*
729 * Create unconnected endpoint.
730 */
731int
732rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
733 struct rpcrdma_create_data_internal *cdata)
734{
Chuck Lever7bc79722015-01-21 11:03:27 -0500735 struct ib_device_attr *devattr = &ia->ri_devattr;
Chuck Leverfc664482014-05-28 10:33:25 -0400736 struct ib_cq *sendcq, *recvcq;
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400737 int rc, err;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400738
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400739 /* check provider's send/recv wr limits */
Chuck Lever7bc79722015-01-21 11:03:27 -0500740 if (cdata->max_requests > devattr->max_qp_wr)
741 cdata->max_requests = devattr->max_qp_wr;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400742
743 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
744 ep->rep_attr.qp_context = ep;
745 /* send_cq and recv_cq initialized below */
746 ep->rep_attr.srq = NULL;
747 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
748 switch (ia->ri_memreg_strategy) {
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400749 case RPCRDMA_FRMR: {
750 int depth = 7;
751
Tom Tucker15cdc6442010-08-11 12:47:24 -0400752 /* Add room for frmr register and invalidate WRs.
753 * 1. FRMR reg WR for head
754 * 2. FRMR invalidate WR for head
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400755 * 3. N FRMR reg WRs for pagelist
756 * 4. N FRMR invalidate WRs for pagelist
Tom Tucker15cdc6442010-08-11 12:47:24 -0400757 * 5. FRMR reg WR for tail
758 * 6. FRMR invalidate WR for tail
759 * 7. The RDMA_SEND WR
760 */
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400761
762 /* Calculate N if the device max FRMR depth is smaller than
763 * RPCRDMA_MAX_DATA_SEGS.
764 */
765 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
766 int delta = RPCRDMA_MAX_DATA_SEGS -
767 ia->ri_max_frmr_depth;
768
769 do {
770 depth += 2; /* FRMR reg + invalidate */
771 delta -= ia->ri_max_frmr_depth;
772 } while (delta > 0);
773
774 }
775 ep->rep_attr.cap.max_send_wr *= depth;
Chuck Lever7bc79722015-01-21 11:03:27 -0500776 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
777 cdata->max_requests = devattr->max_qp_wr / depth;
Tom Tucker15cdc6442010-08-11 12:47:24 -0400778 if (!cdata->max_requests)
779 return -EINVAL;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400780 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
781 depth;
Tom Tucker15cdc6442010-08-11 12:47:24 -0400782 }
Tom Talpey3197d3092008-10-09 15:00:20 -0400783 break;
Steve Wise0fc6c4e2014-05-28 10:32:00 -0400784 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400785 default:
786 break;
787 }
788 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
789 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
790 ep->rep_attr.cap.max_recv_sge = 1;
791 ep->rep_attr.cap.max_inline_data = 0;
792 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
793 ep->rep_attr.qp_type = IB_QPT_RC;
794 ep->rep_attr.port_num = ~0;
795
796 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
797 "iovs: send %d recv %d\n",
798 __func__,
799 ep->rep_attr.cap.max_send_wr,
800 ep->rep_attr.cap.max_recv_wr,
801 ep->rep_attr.cap.max_send_sge,
802 ep->rep_attr.cap.max_recv_sge);
803
804 /* set trigger for requesting send completion */
Chuck Leverfc664482014-05-28 10:33:25 -0400805 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
Chuck Levere7104a22014-11-08 20:14:20 -0500806 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
807 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
808 else if (ep->rep_cqinit <= 2)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400809 ep->rep_cqinit = 0;
810 INIT_CQCOUNT(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400811 init_waitqueue_head(&ep->rep_connect_wait);
Chuck Lever254f91e2014-05-28 10:32:17 -0400812 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400813
Chuck Leverfc664482014-05-28 10:33:25 -0400814 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
Chuck Lever1c00dd02014-05-28 10:33:42 -0400815 rpcrdma_cq_async_error_upcall, ep,
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400816 ep->rep_attr.cap.max_send_wr + 1, 0);
Chuck Leverfc664482014-05-28 10:33:25 -0400817 if (IS_ERR(sendcq)) {
818 rc = PTR_ERR(sendcq);
819 dprintk("RPC: %s: failed to create send CQ: %i\n",
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400820 __func__, rc);
821 goto out1;
822 }
823
Chuck Leverfc664482014-05-28 10:33:25 -0400824 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400825 if (rc) {
826 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
827 __func__, rc);
828 goto out2;
829 }
830
Chuck Leverfc664482014-05-28 10:33:25 -0400831 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
Chuck Lever1c00dd02014-05-28 10:33:42 -0400832 rpcrdma_cq_async_error_upcall, ep,
Chuck Leverfc664482014-05-28 10:33:25 -0400833 ep->rep_attr.cap.max_recv_wr + 1, 0);
834 if (IS_ERR(recvcq)) {
835 rc = PTR_ERR(recvcq);
836 dprintk("RPC: %s: failed to create recv CQ: %i\n",
837 __func__, rc);
838 goto out2;
839 }
840
841 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
842 if (rc) {
843 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
844 __func__, rc);
845 ib_destroy_cq(recvcq);
846 goto out2;
847 }
848
849 ep->rep_attr.send_cq = sendcq;
850 ep->rep_attr.recv_cq = recvcq;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400851
852 /* Initialize cma parameters */
853
854 /* RPC/RDMA does not use private data */
855 ep->rep_remote_cma.private_data = NULL;
856 ep->rep_remote_cma.private_data_len = 0;
857
858 /* Client offers RDMA Read but does not initiate */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400859 ep->rep_remote_cma.initiator_depth = 0;
Chuck Lever7bc79722015-01-21 11:03:27 -0500860 if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
Tom Tuckerb334eaa2008-10-09 15:00:30 -0400861 ep->rep_remote_cma.responder_resources = 32;
862 else
Chuck Lever7bc79722015-01-21 11:03:27 -0500863 ep->rep_remote_cma.responder_resources =
864 devattr->max_qp_rd_atom;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400865
866 ep->rep_remote_cma.retry_count = 7;
867 ep->rep_remote_cma.flow_control = 0;
868 ep->rep_remote_cma.rnr_retry_count = 0;
869
870 return 0;
871
872out2:
Chuck Leverfc664482014-05-28 10:33:25 -0400873 err = ib_destroy_cq(sendcq);
Chuck Lever5d40a8a2007-10-26 13:30:54 -0400874 if (err)
875 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
876 __func__, err);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400877out1:
878 return rc;
879}
880
881/*
882 * rpcrdma_ep_destroy
883 *
884 * Disconnect and destroy endpoint. After this, the only
885 * valid operations on the ep are to free it (if dynamically
886 * allocated) or re-create it.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400887 */
Chuck Lever7f1d5412014-05-28 10:33:16 -0400888void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400889rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
890{
891 int rc;
892
893 dprintk("RPC: %s: entering, connected is %d\n",
894 __func__, ep->rep_connected);
895
Chuck Lever254f91e2014-05-28 10:32:17 -0400896 cancel_delayed_work_sync(&ep->rep_connect_worker);
897
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400898 if (ia->ri_id->qp) {
Chuck Lever282191c2014-07-29 17:25:55 -0400899 rpcrdma_ep_disconnect(ep, ia);
Tom Talpeyfee08ca2008-10-09 15:01:00 -0400900 rdma_destroy_qp(ia->ri_id);
901 ia->ri_id->qp = NULL;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400902 }
903
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400904 /* padding - could be done in rpcrdma_buffer_destroy... */
905 if (ep->rep_pad_mr) {
906 rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
907 ep->rep_pad_mr = NULL;
908 }
909
Chuck Leverfc664482014-05-28 10:33:25 -0400910 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
911 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
912 if (rc)
913 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
914 __func__, rc);
915
916 rpcrdma_clean_cq(ep->rep_attr.send_cq);
917 rc = ib_destroy_cq(ep->rep_attr.send_cq);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400918 if (rc)
919 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
920 __func__, rc);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400921}
922
923/*
924 * Connect unconnected endpoint.
925 */
926int
927rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
928{
Chuck Lever73806c82014-07-29 17:23:25 -0400929 struct rdma_cm_id *id, *old;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400930 int rc = 0;
931 int retry_count = 0;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400932
Tom Talpeyc0555512008-10-10 11:32:45 -0400933 if (ep->rep_connected != 0) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400934 struct rpcrdma_xprt *xprt;
935retry:
Chuck Leverec62f402014-05-28 10:34:07 -0400936 dprintk("RPC: %s: reconnecting...\n", __func__);
Chuck Lever282191c2014-07-29 17:25:55 -0400937
938 rpcrdma_ep_disconnect(ep, ia);
Chuck Levera7bc2112014-07-29 17:23:52 -0400939 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400940
Chuck Lever467c9672014-11-08 20:14:29 -0500941 switch (ia->ri_memreg_strategy) {
942 case RPCRDMA_FRMR:
Chuck Lever9f9d8022014-07-29 17:24:45 -0400943 rpcrdma_reset_frmrs(ia);
Chuck Lever467c9672014-11-08 20:14:29 -0500944 break;
945 case RPCRDMA_MTHCAFMR:
946 rpcrdma_reset_fmrs(ia);
947 break;
948 case RPCRDMA_ALLPHYSICAL:
949 break;
950 default:
951 rc = -EIO;
952 goto out;
953 }
Chuck Lever9f9d8022014-07-29 17:24:45 -0400954
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400955 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
956 id = rpcrdma_create_id(xprt, ia,
957 (struct sockaddr *)&xprt->rx_data.addr);
958 if (IS_ERR(id)) {
Chuck Leverec62f402014-05-28 10:34:07 -0400959 rc = -EHOSTUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400960 goto out;
961 }
962 /* TEMP TEMP TEMP - fail if new device:
963 * Deregister/remarshal *all* requests!
964 * Close and recreate adapter, pd, etc!
965 * Re-determine all attributes still sane!
966 * More stuff I haven't thought of!
967 * Rrrgh!
968 */
969 if (ia->ri_id->device != id->device) {
970 printk("RPC: %s: can't reconnect on "
971 "different device!\n", __func__);
972 rdma_destroy_id(id);
Chuck Leverec62f402014-05-28 10:34:07 -0400973 rc = -ENETUNREACH;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400974 goto out;
975 }
976 /* END TEMP */
Chuck Leverec62f402014-05-28 10:34:07 -0400977 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
978 if (rc) {
979 dprintk("RPC: %s: rdma_create_qp failed %i\n",
980 __func__, rc);
981 rdma_destroy_id(id);
982 rc = -ENETUNREACH;
983 goto out;
984 }
Chuck Lever73806c82014-07-29 17:23:25 -0400985
986 write_lock(&ia->ri_qplock);
987 old = ia->ri_id;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -0400988 ia->ri_id = id;
Chuck Lever73806c82014-07-29 17:23:25 -0400989 write_unlock(&ia->ri_qplock);
990
991 rdma_destroy_qp(old);
992 rdma_destroy_id(old);
Chuck Leverec62f402014-05-28 10:34:07 -0400993 } else {
994 dprintk("RPC: %s: connecting...\n", __func__);
995 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
996 if (rc) {
997 dprintk("RPC: %s: rdma_create_qp failed %i\n",
998 __func__, rc);
999 /* do not update ep->rep_connected */
1000 return -ENETUNREACH;
1001 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001002 }
1003
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001004 ep->rep_connected = 0;
1005
1006 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
1007 if (rc) {
1008 dprintk("RPC: %s: rdma_connect() failed with %i\n",
1009 __func__, rc);
1010 goto out;
1011 }
1012
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001013 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
1014
1015 /*
1016 * Check state. A non-peer reject indicates no listener
1017 * (ECONNREFUSED), which may be a transient state. All
1018 * others indicate a transport condition which has already
1019 * undergone a best-effort.
1020 */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001021 if (ep->rep_connected == -ECONNREFUSED &&
1022 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001023 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
1024 goto retry;
1025 }
1026 if (ep->rep_connected <= 0) {
1027 /* Sometimes, the only way to reliably connect to remote
1028 * CMs is to use same nonzero values for ORD and IRD. */
Tom Tuckerb334eaa2008-10-09 15:00:30 -04001029 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
1030 (ep->rep_remote_cma.responder_resources == 0 ||
1031 ep->rep_remote_cma.initiator_depth !=
1032 ep->rep_remote_cma.responder_resources)) {
1033 if (ep->rep_remote_cma.responder_resources == 0)
1034 ep->rep_remote_cma.responder_resources = 1;
1035 ep->rep_remote_cma.initiator_depth =
1036 ep->rep_remote_cma.responder_resources;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001037 goto retry;
Tom Tuckerb334eaa2008-10-09 15:00:30 -04001038 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001039 rc = ep->rep_connected;
1040 } else {
1041 dprintk("RPC: %s: connected\n", __func__);
1042 }
1043
1044out:
1045 if (rc)
1046 ep->rep_connected = rc;
1047 return rc;
1048}
1049
1050/*
1051 * rpcrdma_ep_disconnect
1052 *
1053 * This is separate from destroy to facilitate the ability
1054 * to reconnect without recreating the endpoint.
1055 *
1056 * This call is not reentrant, and must not be made in parallel
1057 * on the same endpoint.
1058 */
Chuck Lever282191c2014-07-29 17:25:55 -04001059void
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001060rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
1061{
1062 int rc;
1063
Chuck Levera7bc2112014-07-29 17:23:52 -04001064 rpcrdma_flush_cqs(ep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001065 rc = rdma_disconnect(ia->ri_id);
1066 if (!rc) {
1067 /* returns without wait if not connected */
1068 wait_event_interruptible(ep->rep_connect_wait,
1069 ep->rep_connected != 1);
1070 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
1071 (ep->rep_connected == 1) ? "still " : "dis");
1072 } else {
1073 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
1074 ep->rep_connected = rc;
1075 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001076}
1077
Chuck Lever13924022015-01-21 11:03:52 -05001078static struct rpcrdma_req *
1079rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1080{
Chuck Lever13924022015-01-21 11:03:52 -05001081 struct rpcrdma_req *req;
Chuck Lever13924022015-01-21 11:03:52 -05001082
Chuck Lever85275c82015-01-21 11:04:16 -05001083 req = kzalloc(sizeof(*req), GFP_KERNEL);
Chuck Lever13924022015-01-21 11:03:52 -05001084 if (req == NULL)
Chuck Lever85275c82015-01-21 11:04:16 -05001085 return ERR_PTR(-ENOMEM);
Chuck Lever13924022015-01-21 11:03:52 -05001086
Chuck Lever13924022015-01-21 11:03:52 -05001087 req->rl_buffer = &r_xprt->rx_buf;
1088 return req;
Chuck Lever13924022015-01-21 11:03:52 -05001089}
1090
1091static struct rpcrdma_rep *
1092rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
1093{
1094 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1095 size_t rlen = 1 << fls(cdata->inline_rsize +
1096 sizeof(struct rpcrdma_rep));
1097 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1098 struct rpcrdma_rep *rep;
1099 int rc;
1100
1101 rc = -ENOMEM;
1102 rep = kmalloc(rlen, GFP_KERNEL);
1103 if (rep == NULL)
1104 goto out;
Chuck Lever0ca77dc2015-01-21 11:04:08 -05001105 memset(rep, 0, sizeof(*rep));
Chuck Lever13924022015-01-21 11:03:52 -05001106
1107 rc = rpcrdma_register_internal(ia, rep->rr_base, rlen -
1108 offsetof(struct rpcrdma_rep, rr_base),
1109 &rep->rr_handle, &rep->rr_iov);
1110 if (rc)
1111 goto out_free;
1112
1113 rep->rr_buffer = &r_xprt->rx_buf;
1114 return rep;
1115
1116out_free:
1117 kfree(rep);
1118out:
1119 return ERR_PTR(rc);
1120}
1121
Chuck Lever2e845222014-07-29 17:25:38 -04001122static int
1123rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
1124{
1125 int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
1126 struct ib_fmr_attr fmr_attr = {
1127 .max_pages = RPCRDMA_MAX_DATA_SEGS,
1128 .max_maps = 1,
1129 .page_shift = PAGE_SHIFT
1130 };
1131 struct rpcrdma_mw *r;
1132 int i, rc;
1133
1134 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
1135 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
1136
1137 while (i--) {
1138 r = kzalloc(sizeof(*r), GFP_KERNEL);
1139 if (r == NULL)
1140 return -ENOMEM;
1141
1142 r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr);
1143 if (IS_ERR(r->r.fmr)) {
1144 rc = PTR_ERR(r->r.fmr);
1145 dprintk("RPC: %s: ib_alloc_fmr failed %i\n",
1146 __func__, rc);
1147 goto out_free;
1148 }
1149
1150 list_add(&r->mw_list, &buf->rb_mws);
1151 list_add(&r->mw_all, &buf->rb_all);
1152 }
1153 return 0;
1154
1155out_free:
1156 kfree(r);
1157 return rc;
1158}
1159
1160static int
1161rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf)
1162{
1163 struct rpcrdma_frmr *f;
1164 struct rpcrdma_mw *r;
1165 int i, rc;
1166
1167 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
1168 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
1169
1170 while (i--) {
1171 r = kzalloc(sizeof(*r), GFP_KERNEL);
1172 if (r == NULL)
1173 return -ENOMEM;
1174 f = &r->r.frmr;
1175
1176 f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1177 ia->ri_max_frmr_depth);
1178 if (IS_ERR(f->fr_mr)) {
1179 rc = PTR_ERR(f->fr_mr);
1180 dprintk("RPC: %s: ib_alloc_fast_reg_mr "
1181 "failed %i\n", __func__, rc);
1182 goto out_free;
1183 }
1184
1185 f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device,
1186 ia->ri_max_frmr_depth);
1187 if (IS_ERR(f->fr_pgl)) {
1188 rc = PTR_ERR(f->fr_pgl);
1189 dprintk("RPC: %s: ib_alloc_fast_reg_page_list "
1190 "failed %i\n", __func__, rc);
1191
1192 ib_dereg_mr(f->fr_mr);
1193 goto out_free;
1194 }
1195
1196 list_add(&r->mw_list, &buf->rb_mws);
1197 list_add(&r->mw_all, &buf->rb_all);
1198 }
1199
1200 return 0;
1201
1202out_free:
1203 kfree(r);
1204 return rc;
1205}
1206
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001207int
Chuck Leverac920d02015-01-21 11:03:44 -05001208rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001209{
Chuck Leverac920d02015-01-21 11:03:44 -05001210 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1211 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1212 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001213 char *p;
Chuck Lever13924022015-01-21 11:03:52 -05001214 size_t len;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001215 int i, rc;
1216
1217 buf->rb_max_requests = cdata->max_requests;
1218 spin_lock_init(&buf->rb_lock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001219
1220 /* Need to allocate:
1221 * 1. arrays for send and recv pointers
1222 * 2. arrays of struct rpcrdma_req to fill in pointers
1223 * 3. array of struct rpcrdma_rep for replies
1224 * 4. padding, if any
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001225 * Send/recv buffers in req/rep need to be registered
1226 */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001227 len = buf->rb_max_requests *
1228 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
1229 len += cdata->padding;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001230
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001231 p = kzalloc(len, GFP_KERNEL);
1232 if (p == NULL) {
1233 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1234 __func__, len);
1235 rc = -ENOMEM;
1236 goto out;
1237 }
1238 buf->rb_pool = p; /* for freeing it later */
1239
1240 buf->rb_send_bufs = (struct rpcrdma_req **) p;
1241 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1242 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1243 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1244
1245 /*
1246 * Register the zeroed pad buffer, if any.
1247 */
1248 if (cdata->padding) {
Chuck Leverac920d02015-01-21 11:03:44 -05001249 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001250 rc = rpcrdma_register_internal(ia, p, cdata->padding,
1251 &ep->rep_pad_mr, &ep->rep_pad);
1252 if (rc)
1253 goto out;
1254 }
1255 p += cdata->padding;
1256
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001257 INIT_LIST_HEAD(&buf->rb_mws);
Chuck Lever3111d722014-07-29 17:24:28 -04001258 INIT_LIST_HEAD(&buf->rb_all);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001259 switch (ia->ri_memreg_strategy) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001260 case RPCRDMA_FRMR:
Chuck Lever2e845222014-07-29 17:25:38 -04001261 rc = rpcrdma_init_frmrs(ia, buf);
1262 if (rc)
1263 goto out;
Tom Talpey3197d3092008-10-09 15:00:20 -04001264 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001265 case RPCRDMA_MTHCAFMR:
Chuck Lever2e845222014-07-29 17:25:38 -04001266 rc = rpcrdma_init_fmrs(ia, buf);
1267 if (rc)
1268 goto out;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001269 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001270 default:
1271 break;
1272 }
1273
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001274 for (i = 0; i < buf->rb_max_requests; i++) {
1275 struct rpcrdma_req *req;
1276 struct rpcrdma_rep *rep;
1277
Chuck Lever13924022015-01-21 11:03:52 -05001278 req = rpcrdma_create_req(r_xprt);
1279 if (IS_ERR(req)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001280 dprintk("RPC: %s: request buffer %d alloc"
1281 " failed\n", __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001282 rc = PTR_ERR(req);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001283 goto out;
1284 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001285 buf->rb_send_bufs[i] = req;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001286
Chuck Lever13924022015-01-21 11:03:52 -05001287 rep = rpcrdma_create_rep(r_xprt);
1288 if (IS_ERR(rep)) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001289 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1290 __func__, i);
Chuck Lever13924022015-01-21 11:03:52 -05001291 rc = PTR_ERR(rep);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001292 goto out;
1293 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001294 buf->rb_recv_bufs[i] = rep;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001295 }
Chuck Lever13924022015-01-21 11:03:52 -05001296
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001297 return 0;
1298out:
1299 rpcrdma_buffer_destroy(buf);
1300 return rc;
1301}
1302
Chuck Lever2e845222014-07-29 17:25:38 -04001303static void
Chuck Lever13924022015-01-21 11:03:52 -05001304rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1305{
1306 if (!rep)
1307 return;
1308
1309 rpcrdma_deregister_internal(ia, rep->rr_handle, &rep->rr_iov);
1310 kfree(rep);
1311}
1312
1313static void
1314rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1315{
1316 if (!req)
1317 return;
1318
Chuck Lever0ca77dc2015-01-21 11:04:08 -05001319 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
Chuck Lever85275c82015-01-21 11:04:16 -05001320 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
Chuck Lever13924022015-01-21 11:03:52 -05001321 kfree(req);
1322}
1323
1324static void
Chuck Lever2e845222014-07-29 17:25:38 -04001325rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf)
1326{
1327 struct rpcrdma_mw *r;
1328 int rc;
1329
1330 while (!list_empty(&buf->rb_all)) {
1331 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1332 list_del(&r->mw_all);
1333 list_del(&r->mw_list);
1334
1335 rc = ib_dealloc_fmr(r->r.fmr);
1336 if (rc)
1337 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
1338 __func__, rc);
1339
1340 kfree(r);
1341 }
1342}
1343
1344static void
1345rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf)
1346{
1347 struct rpcrdma_mw *r;
1348 int rc;
1349
1350 while (!list_empty(&buf->rb_all)) {
1351 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1352 list_del(&r->mw_all);
1353 list_del(&r->mw_list);
1354
1355 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1356 if (rc)
1357 dprintk("RPC: %s: ib_dereg_mr failed %i\n",
1358 __func__, rc);
1359 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1360
1361 kfree(r);
1362 }
1363}
1364
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001365void
1366rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1367{
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001368 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
Chuck Lever2e845222014-07-29 17:25:38 -04001369 int i;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001370
1371 /* clean up in reverse order from create
1372 * 1. recv mr memory (mr free, then kfree)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001373 * 2. send mr memory (mr free, then kfree)
Chuck Lever2e845222014-07-29 17:25:38 -04001374 * 3. MWs
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001375 */
1376 dprintk("RPC: %s: entering\n", __func__);
1377
1378 for (i = 0; i < buf->rb_max_requests; i++) {
Chuck Lever13924022015-01-21 11:03:52 -05001379 if (buf->rb_recv_bufs)
1380 rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1381 if (buf->rb_send_bufs)
1382 rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001383 }
1384
Chuck Lever2e845222014-07-29 17:25:38 -04001385 switch (ia->ri_memreg_strategy) {
1386 case RPCRDMA_FRMR:
1387 rpcrdma_destroy_frmrs(buf);
1388 break;
1389 case RPCRDMA_MTHCAFMR:
1390 rpcrdma_destroy_fmrs(buf);
1391 break;
1392 default:
1393 break;
Allen Andrews4034ba02014-05-28 10:32:09 -04001394 }
1395
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001396 kfree(buf->rb_pool);
1397}
1398
Chuck Lever467c9672014-11-08 20:14:29 -05001399/* After a disconnect, unmap all FMRs.
1400 *
1401 * This is invoked only in the transport connect worker in order
1402 * to serialize with rpcrdma_register_fmr_external().
1403 */
1404static void
1405rpcrdma_reset_fmrs(struct rpcrdma_ia *ia)
1406{
1407 struct rpcrdma_xprt *r_xprt =
1408 container_of(ia, struct rpcrdma_xprt, rx_ia);
1409 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1410 struct list_head *pos;
1411 struct rpcrdma_mw *r;
1412 LIST_HEAD(l);
1413 int rc;
1414
1415 list_for_each(pos, &buf->rb_all) {
1416 r = list_entry(pos, struct rpcrdma_mw, mw_all);
1417
1418 INIT_LIST_HEAD(&l);
1419 list_add(&r->r.fmr->list, &l);
1420 rc = ib_unmap_fmr(&l);
1421 if (rc)
1422 dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
1423 __func__, rc);
1424 }
1425}
1426
Chuck Lever9f9d8022014-07-29 17:24:45 -04001427/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
1428 * an unusable state. Find FRMRs in this state and dereg / reg
1429 * each. FRMRs that are VALID and attached to an rpcrdma_req are
1430 * also torn down.
1431 *
1432 * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
1433 *
1434 * This is invoked only in the transport connect worker in order
1435 * to serialize with rpcrdma_register_frmr_external().
1436 */
1437static void
1438rpcrdma_reset_frmrs(struct rpcrdma_ia *ia)
1439{
1440 struct rpcrdma_xprt *r_xprt =
1441 container_of(ia, struct rpcrdma_xprt, rx_ia);
1442 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1443 struct list_head *pos;
1444 struct rpcrdma_mw *r;
1445 int rc;
1446
1447 list_for_each(pos, &buf->rb_all) {
1448 r = list_entry(pos, struct rpcrdma_mw, mw_all);
1449
1450 if (r->r.frmr.fr_state == FRMR_IS_INVALID)
1451 continue;
1452
1453 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1454 if (rc)
1455 dprintk("RPC: %s: ib_dereg_mr failed %i\n",
1456 __func__, rc);
1457 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1458
1459 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1460 ia->ri_max_frmr_depth);
1461 if (IS_ERR(r->r.frmr.fr_mr)) {
1462 rc = PTR_ERR(r->r.frmr.fr_mr);
1463 dprintk("RPC: %s: ib_alloc_fast_reg_mr"
1464 " failed %i\n", __func__, rc);
1465 continue;
1466 }
1467 r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
1468 ia->ri_id->device,
1469 ia->ri_max_frmr_depth);
1470 if (IS_ERR(r->r.frmr.fr_pgl)) {
1471 rc = PTR_ERR(r->r.frmr.fr_pgl);
1472 dprintk("RPC: %s: "
1473 "ib_alloc_fast_reg_page_list "
1474 "failed %i\n", __func__, rc);
1475
1476 ib_dereg_mr(r->r.frmr.fr_mr);
1477 continue;
1478 }
1479 r->r.frmr.fr_state = FRMR_IS_INVALID;
1480 }
1481}
1482
Chuck Leverc2922c02014-07-29 17:24:36 -04001483/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
1484 * some req segments uninitialized.
1485 */
1486static void
1487rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
1488{
1489 if (*mw) {
1490 list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
1491 *mw = NULL;
1492 }
1493}
1494
1495/* Cycle mw's back in reverse order, and "spin" them.
1496 * This delays and scrambles reuse as much as possible.
1497 */
1498static void
1499rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1500{
1501 struct rpcrdma_mr_seg *seg = req->rl_segments;
1502 struct rpcrdma_mr_seg *seg1 = seg;
1503 int i;
1504
1505 for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
Chuck Lever3eb35812015-01-21 11:02:54 -05001506 rpcrdma_buffer_put_mr(&seg->rl_mw, buf);
1507 rpcrdma_buffer_put_mr(&seg1->rl_mw, buf);
Chuck Leverc2922c02014-07-29 17:24:36 -04001508}
1509
1510static void
1511rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1512{
1513 buf->rb_send_bufs[--buf->rb_send_index] = req;
1514 req->rl_niovs = 0;
1515 if (req->rl_reply) {
1516 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1517 req->rl_reply->rr_func = NULL;
1518 req->rl_reply = NULL;
1519 }
1520}
1521
Chuck Leverddb6beb2014-07-29 17:24:54 -04001522/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external().
1523 * Redo only the ib_post_send().
1524 */
1525static void
1526rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia)
1527{
1528 struct rpcrdma_xprt *r_xprt =
1529 container_of(ia, struct rpcrdma_xprt, rx_ia);
1530 struct ib_send_wr invalidate_wr, *bad_wr;
1531 int rc;
1532
1533 dprintk("RPC: %s: FRMR %p is stale\n", __func__, r);
1534
1535 /* When this FRMR is re-inserted into rb_mws, it is no longer stale */
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001536 r->r.frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001537
1538 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
1539 invalidate_wr.wr_id = (unsigned long)(void *)r;
1540 invalidate_wr.opcode = IB_WR_LOCAL_INV;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001541 invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey;
1542 DECR_CQCOUNT(&r_xprt->rx_ep);
1543
1544 dprintk("RPC: %s: frmr %p invalidating rkey %08x\n",
1545 __func__, r, r->r.frmr.fr_mr->rkey);
1546
1547 read_lock(&ia->ri_qplock);
1548 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1549 read_unlock(&ia->ri_qplock);
1550 if (rc) {
1551 /* Force rpcrdma_buffer_get() to retry */
1552 r->r.frmr.fr_state = FRMR_IS_STALE;
1553 dprintk("RPC: %s: ib_post_send failed, %i\n",
1554 __func__, rc);
1555 }
1556}
1557
1558static void
1559rpcrdma_retry_flushed_linv(struct list_head *stale,
1560 struct rpcrdma_buffer *buf)
1561{
1562 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1563 struct list_head *pos;
1564 struct rpcrdma_mw *r;
1565 unsigned long flags;
1566
1567 list_for_each(pos, stale) {
1568 r = list_entry(pos, struct rpcrdma_mw, mw_list);
1569 rpcrdma_retry_local_inv(r, ia);
1570 }
1571
1572 spin_lock_irqsave(&buf->rb_lock, flags);
1573 list_splice_tail(stale, &buf->rb_mws);
1574 spin_unlock_irqrestore(&buf->rb_lock, flags);
1575}
1576
Chuck Leverc2922c02014-07-29 17:24:36 -04001577static struct rpcrdma_req *
Chuck Leverddb6beb2014-07-29 17:24:54 -04001578rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf,
1579 struct list_head *stale)
1580{
1581 struct rpcrdma_mw *r;
1582 int i;
1583
1584 i = RPCRDMA_MAX_SEGS - 1;
1585 while (!list_empty(&buf->rb_mws)) {
1586 r = list_entry(buf->rb_mws.next,
1587 struct rpcrdma_mw, mw_list);
1588 list_del(&r->mw_list);
1589 if (r->r.frmr.fr_state == FRMR_IS_STALE) {
1590 list_add(&r->mw_list, stale);
1591 continue;
1592 }
Chuck Lever3eb35812015-01-21 11:02:54 -05001593 req->rl_segments[i].rl_mw = r;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001594 if (unlikely(i-- == 0))
1595 return req; /* Success */
1596 }
1597
1598 /* Not enough entries on rb_mws for this req */
1599 rpcrdma_buffer_put_sendbuf(req, buf);
1600 rpcrdma_buffer_put_mrs(req, buf);
1601 return NULL;
1602}
1603
1604static struct rpcrdma_req *
1605rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
Chuck Leverc2922c02014-07-29 17:24:36 -04001606{
1607 struct rpcrdma_mw *r;
1608 int i;
1609
1610 i = RPCRDMA_MAX_SEGS - 1;
1611 while (!list_empty(&buf->rb_mws)) {
1612 r = list_entry(buf->rb_mws.next,
1613 struct rpcrdma_mw, mw_list);
1614 list_del(&r->mw_list);
Chuck Lever3eb35812015-01-21 11:02:54 -05001615 req->rl_segments[i].rl_mw = r;
Chuck Leverc2922c02014-07-29 17:24:36 -04001616 if (unlikely(i-- == 0))
1617 return req; /* Success */
1618 }
1619
1620 /* Not enough entries on rb_mws for this req */
1621 rpcrdma_buffer_put_sendbuf(req, buf);
1622 rpcrdma_buffer_put_mrs(req, buf);
1623 return NULL;
1624}
1625
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001626/*
1627 * Get a set of request/reply buffers.
1628 *
1629 * Reply buffer (if needed) is attached to send buffer upon return.
1630 * Rule:
1631 * rb_send_index and rb_recv_index MUST always be pointing to the
1632 * *next* available buffer (non-NULL). They are incremented after
1633 * removing buffers, and decremented *before* returning them.
1634 */
1635struct rpcrdma_req *
1636rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1637{
Chuck Leverc2922c02014-07-29 17:24:36 -04001638 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
Chuck Leverddb6beb2014-07-29 17:24:54 -04001639 struct list_head stale;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001640 struct rpcrdma_req *req;
1641 unsigned long flags;
1642
1643 spin_lock_irqsave(&buffers->rb_lock, flags);
1644 if (buffers->rb_send_index == buffers->rb_max_requests) {
1645 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1646 dprintk("RPC: %s: out of request buffers\n", __func__);
1647 return ((struct rpcrdma_req *)NULL);
1648 }
1649
1650 req = buffers->rb_send_bufs[buffers->rb_send_index];
1651 if (buffers->rb_send_index < buffers->rb_recv_index) {
1652 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1653 __func__,
1654 buffers->rb_recv_index - buffers->rb_send_index);
1655 req->rl_reply = NULL;
1656 } else {
1657 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1658 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1659 }
1660 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
Chuck Leverddb6beb2014-07-29 17:24:54 -04001661
1662 INIT_LIST_HEAD(&stale);
Chuck Leverc2922c02014-07-29 17:24:36 -04001663 switch (ia->ri_memreg_strategy) {
1664 case RPCRDMA_FRMR:
Chuck Leverddb6beb2014-07-29 17:24:54 -04001665 req = rpcrdma_buffer_get_frmrs(req, buffers, &stale);
1666 break;
Chuck Leverc2922c02014-07-29 17:24:36 -04001667 case RPCRDMA_MTHCAFMR:
Chuck Leverddb6beb2014-07-29 17:24:54 -04001668 req = rpcrdma_buffer_get_fmrs(req, buffers);
Chuck Leverc2922c02014-07-29 17:24:36 -04001669 break;
1670 default:
1671 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001672 }
1673 spin_unlock_irqrestore(&buffers->rb_lock, flags);
Chuck Leverddb6beb2014-07-29 17:24:54 -04001674 if (!list_empty(&stale))
1675 rpcrdma_retry_flushed_linv(&stale, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001676 return req;
1677}
1678
1679/*
1680 * Put request/reply buffers back into pool.
1681 * Pre-decrement counter/array index.
1682 */
1683void
1684rpcrdma_buffer_put(struct rpcrdma_req *req)
1685{
1686 struct rpcrdma_buffer *buffers = req->rl_buffer;
1687 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001688 unsigned long flags;
1689
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001690 spin_lock_irqsave(&buffers->rb_lock, flags);
Chuck Leverc2922c02014-07-29 17:24:36 -04001691 rpcrdma_buffer_put_sendbuf(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001692 switch (ia->ri_memreg_strategy) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001693 case RPCRDMA_FRMR:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001694 case RPCRDMA_MTHCAFMR:
Chuck Leverc2922c02014-07-29 17:24:36 -04001695 rpcrdma_buffer_put_mrs(req, buffers);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001696 break;
1697 default:
1698 break;
1699 }
1700 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1701}
1702
1703/*
1704 * Recover reply buffers from pool.
1705 * This happens when recovering from error conditions.
1706 * Post-increment counter/array index.
1707 */
1708void
1709rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1710{
1711 struct rpcrdma_buffer *buffers = req->rl_buffer;
1712 unsigned long flags;
1713
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001714 spin_lock_irqsave(&buffers->rb_lock, flags);
1715 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1716 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1717 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1718 }
1719 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1720}
1721
1722/*
1723 * Put reply buffers back into pool when not attached to
Chuck Leverb45ccfd2014-05-28 10:32:34 -04001724 * request. This happens in error conditions.
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001725 */
1726void
1727rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1728{
1729 struct rpcrdma_buffer *buffers = rep->rr_buffer;
1730 unsigned long flags;
1731
1732 rep->rr_func = NULL;
1733 spin_lock_irqsave(&buffers->rb_lock, flags);
1734 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1735 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1736}
1737
1738/*
1739 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1740 */
1741
1742int
1743rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1744 struct ib_mr **mrp, struct ib_sge *iov)
1745{
1746 struct ib_phys_buf ipb;
1747 struct ib_mr *mr;
1748 int rc;
1749
1750 /*
1751 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1752 */
1753 iov->addr = ib_dma_map_single(ia->ri_id->device,
1754 va, len, DMA_BIDIRECTIONAL);
Yan Burmanbf858ab2014-06-19 16:06:30 +03001755 if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
1756 return -ENOMEM;
1757
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001758 iov->length = len;
1759
Tom Talpeybd7ed1d2008-10-09 15:00:09 -04001760 if (ia->ri_have_dma_lkey) {
1761 *mrp = NULL;
1762 iov->lkey = ia->ri_dma_lkey;
1763 return 0;
1764 } else if (ia->ri_bind_mem != NULL) {
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001765 *mrp = NULL;
1766 iov->lkey = ia->ri_bind_mem->lkey;
1767 return 0;
1768 }
1769
1770 ipb.addr = iov->addr;
1771 ipb.size = iov->length;
1772 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
1773 IB_ACCESS_LOCAL_WRITE, &iov->addr);
1774
1775 dprintk("RPC: %s: phys convert: 0x%llx "
1776 "registered 0x%llx length %d\n",
Andrew Mortona56daeb2007-10-16 01:29:57 -07001777 __func__, (unsigned long long)ipb.addr,
1778 (unsigned long long)iov->addr, len);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001779
1780 if (IS_ERR(mr)) {
1781 *mrp = NULL;
1782 rc = PTR_ERR(mr);
1783 dprintk("RPC: %s: failed with %i\n", __func__, rc);
1784 } else {
1785 *mrp = mr;
1786 iov->lkey = mr->lkey;
1787 rc = 0;
1788 }
1789
1790 return rc;
1791}
1792
1793int
1794rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
1795 struct ib_mr *mr, struct ib_sge *iov)
1796{
1797 int rc;
1798
1799 ib_dma_unmap_single(ia->ri_id->device,
1800 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1801
1802 if (NULL == mr)
1803 return 0;
1804
1805 rc = ib_dereg_mr(mr);
1806 if (rc)
1807 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
1808 return rc;
1809}
1810
Chuck Lever9128c3e2015-01-21 11:04:00 -05001811/**
1812 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1813 * @ia: controlling rpcrdma_ia
1814 * @size: size of buffer to be allocated, in bytes
1815 * @flags: GFP flags
1816 *
1817 * Returns pointer to private header of an area of internally
1818 * registered memory, or an ERR_PTR. The registered buffer follows
1819 * the end of the private header.
1820 *
1821 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1822 * receiving the payload of RDMA RECV operations. regbufs are not
1823 * used for RDMA READ/WRITE operations, thus are registered only for
1824 * LOCAL access.
1825 */
1826struct rpcrdma_regbuf *
1827rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1828{
1829 struct rpcrdma_regbuf *rb;
1830 int rc;
1831
1832 rc = -ENOMEM;
1833 rb = kmalloc(sizeof(*rb) + size, flags);
1834 if (rb == NULL)
1835 goto out;
1836
1837 rb->rg_size = size;
1838 rb->rg_owner = NULL;
1839 rc = rpcrdma_register_internal(ia, rb->rg_base, size,
1840 &rb->rg_mr, &rb->rg_iov);
1841 if (rc)
1842 goto out_free;
1843
1844 return rb;
1845
1846out_free:
1847 kfree(rb);
1848out:
1849 return ERR_PTR(rc);
1850}
1851
1852/**
1853 * rpcrdma_free_regbuf - deregister and free registered buffer
1854 * @ia: controlling rpcrdma_ia
1855 * @rb: regbuf to be deregistered and freed
1856 */
1857void
1858rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1859{
1860 if (rb) {
1861 rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
1862 kfree(rb);
1863 }
1864}
1865
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001866/*
1867 * Wrappers for chunk registration, shared by read/write chunk code.
1868 */
1869
1870static void
1871rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
1872{
1873 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1874 seg->mr_dmalen = seg->mr_len;
1875 if (seg->mr_page)
1876 seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
1877 seg->mr_page, offset_in_page(seg->mr_offset),
1878 seg->mr_dmalen, seg->mr_dir);
1879 else
1880 seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
1881 seg->mr_offset,
1882 seg->mr_dmalen, seg->mr_dir);
Tom Tucker5c635e02011-02-09 19:45:34 +00001883 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
1884 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1885 __func__,
Randy Dunlap986d4ab2011-03-15 17:11:59 -07001886 (unsigned long long)seg->mr_dma,
1887 seg->mr_offset, seg->mr_dmalen);
Tom Tucker5c635e02011-02-09 19:45:34 +00001888 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04001889}
1890
1891static void
1892rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
1893{
1894 if (seg->mr_page)
1895 ib_dma_unmap_page(ia->ri_id->device,
1896 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1897 else
1898 ib_dma_unmap_single(ia->ri_id->device,
1899 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1900}
1901
Tom Talpey8d4ba032008-10-09 14:59:49 -04001902static int
Tom Talpey3197d3092008-10-09 15:00:20 -04001903rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1904 int *nsegs, int writing, struct rpcrdma_ia *ia,
1905 struct rpcrdma_xprt *r_xprt)
1906{
1907 struct rpcrdma_mr_seg *seg1 = seg;
Chuck Lever3eb35812015-01-21 11:02:54 -05001908 struct rpcrdma_mw *mw = seg1->rl_mw;
Chuck Lever0dbb4102014-07-29 17:24:09 -04001909 struct rpcrdma_frmr *frmr = &mw->r.frmr;
1910 struct ib_mr *mr = frmr->fr_mr;
Chuck Leverf590e872014-07-29 17:25:29 -04001911 struct ib_send_wr fastreg_wr, *bad_wr;
Tom Talpey3197d3092008-10-09 15:00:20 -04001912 u8 key;
1913 int len, pageoff;
1914 int i, rc;
Tom Tucker9b781452012-02-20 13:07:57 -06001915 int seg_len;
1916 u64 pa;
1917 int page_no;
Tom Talpey3197d3092008-10-09 15:00:20 -04001918
1919 pageoff = offset_in_page(seg1->mr_offset);
1920 seg1->mr_offset -= pageoff; /* start of page */
1921 seg1->mr_len += pageoff;
1922 len = -pageoff;
Steve Wise0fc6c4e2014-05-28 10:32:00 -04001923 if (*nsegs > ia->ri_max_frmr_depth)
1924 *nsegs = ia->ri_max_frmr_depth;
Tom Tucker9b781452012-02-20 13:07:57 -06001925 for (page_no = i = 0; i < *nsegs;) {
Tom Talpey3197d3092008-10-09 15:00:20 -04001926 rpcrdma_map_one(ia, seg, writing);
Tom Tucker9b781452012-02-20 13:07:57 -06001927 pa = seg->mr_dma;
1928 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
Chuck Lever0dbb4102014-07-29 17:24:09 -04001929 frmr->fr_pgl->page_list[page_no++] = pa;
Tom Tucker9b781452012-02-20 13:07:57 -06001930 pa += PAGE_SIZE;
1931 }
Tom Talpey3197d3092008-10-09 15:00:20 -04001932 len += seg->mr_len;
1933 ++seg;
1934 ++i;
1935 /* Check for holes */
1936 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1937 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1938 break;
1939 }
1940 dprintk("RPC: %s: Using frmr %p to map %d segments\n",
Chuck Lever0dbb4102014-07-29 17:24:09 -04001941 __func__, mw, i);
Tom Talpey3197d3092008-10-09 15:00:20 -04001942
Chuck Lever05055722014-07-29 17:25:12 -04001943 frmr->fr_state = FRMR_IS_VALID;
1944
Chuck Leverf590e872014-07-29 17:25:29 -04001945 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
1946 fastreg_wr.wr_id = (unsigned long)(void *)mw;
1947 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1948 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1949 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
1950 fastreg_wr.wr.fast_reg.page_list_len = page_no;
1951 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1952 fastreg_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
1953 if (fastreg_wr.wr.fast_reg.length < len) {
Chuck Lever5fc83f42014-07-29 17:23:17 -04001954 rc = -EIO;
1955 goto out_err;
Chuck Leverc977dea2014-05-28 10:35:06 -04001956 }
1957
1958 /* Bump the key */
Chuck Lever0dbb4102014-07-29 17:24:09 -04001959 key = (u8)(mr->rkey & 0x000000FF);
1960 ib_update_fast_reg_key(mr, ++key);
Chuck Leverc977dea2014-05-28 10:35:06 -04001961
Chuck Leverf590e872014-07-29 17:25:29 -04001962 fastreg_wr.wr.fast_reg.access_flags = (writing ?
Vu Pham68743082009-05-26 14:51:00 -04001963 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1964 IB_ACCESS_REMOTE_READ);
Chuck Leverf590e872014-07-29 17:25:29 -04001965 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001966 DECR_CQCOUNT(&r_xprt->rx_ep);
1967
Chuck Leverf590e872014-07-29 17:25:29 -04001968 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
Tom Talpey3197d3092008-10-09 15:00:20 -04001969 if (rc) {
1970 dprintk("RPC: %s: failed ib_post_send for register,"
1971 " status %i\n", __func__, rc);
Chuck Leverc93e9862014-07-29 17:24:19 -04001972 ib_update_fast_reg_key(mr, --key);
Chuck Lever5fc83f42014-07-29 17:23:17 -04001973 goto out_err;
Tom Talpey3197d3092008-10-09 15:00:20 -04001974 } else {
Chuck Lever0dbb4102014-07-29 17:24:09 -04001975 seg1->mr_rkey = mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04001976 seg1->mr_base = seg1->mr_dma + pageoff;
1977 seg1->mr_nsegs = i;
1978 seg1->mr_len = len;
1979 }
1980 *nsegs = i;
Chuck Lever5fc83f42014-07-29 17:23:17 -04001981 return 0;
1982out_err:
Chuck Lever05055722014-07-29 17:25:12 -04001983 frmr->fr_state = FRMR_IS_INVALID;
Chuck Lever5fc83f42014-07-29 17:23:17 -04001984 while (i--)
1985 rpcrdma_unmap_one(ia, --seg);
Tom Talpey3197d3092008-10-09 15:00:20 -04001986 return rc;
1987}
1988
1989static int
1990rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
1991 struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
1992{
1993 struct rpcrdma_mr_seg *seg1 = seg;
1994 struct ib_send_wr invalidate_wr, *bad_wr;
1995 int rc;
1996
Chuck Lever3eb35812015-01-21 11:02:54 -05001997 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
Chuck Leverdab7e3b2014-07-29 17:25:20 -04001998
Tom Talpey3197d3092008-10-09 15:00:20 -04001999 memset(&invalidate_wr, 0, sizeof invalidate_wr);
Chuck Lever3eb35812015-01-21 11:02:54 -05002000 invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
Tom Talpey3197d3092008-10-09 15:00:20 -04002001 invalidate_wr.opcode = IB_WR_LOCAL_INV;
Chuck Lever3eb35812015-01-21 11:02:54 -05002002 invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
Tom Talpey3197d3092008-10-09 15:00:20 -04002003 DECR_CQCOUNT(&r_xprt->rx_ep);
2004
Chuck Lever73806c82014-07-29 17:23:25 -04002005 read_lock(&ia->ri_qplock);
2006 while (seg1->mr_nsegs--)
2007 rpcrdma_unmap_one(ia, seg++);
Tom Talpey3197d3092008-10-09 15:00:20 -04002008 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
Chuck Lever73806c82014-07-29 17:23:25 -04002009 read_unlock(&ia->ri_qplock);
Chuck Leverdab7e3b2014-07-29 17:25:20 -04002010 if (rc) {
2011 /* Force rpcrdma_buffer_get() to retry */
Chuck Lever3eb35812015-01-21 11:02:54 -05002012 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
Tom Talpey3197d3092008-10-09 15:00:20 -04002013 dprintk("RPC: %s: failed ib_post_send for invalidate,"
2014 " status %i\n", __func__, rc);
Chuck Leverdab7e3b2014-07-29 17:25:20 -04002015 }
Tom Talpey3197d3092008-10-09 15:00:20 -04002016 return rc;
2017}
2018
2019static int
Tom Talpey8d4ba032008-10-09 14:59:49 -04002020rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
2021 int *nsegs, int writing, struct rpcrdma_ia *ia)
2022{
2023 struct rpcrdma_mr_seg *seg1 = seg;
2024 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
2025 int len, pageoff, i, rc;
2026
2027 pageoff = offset_in_page(seg1->mr_offset);
2028 seg1->mr_offset -= pageoff; /* start of page */
2029 seg1->mr_len += pageoff;
2030 len = -pageoff;
2031 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
2032 *nsegs = RPCRDMA_MAX_DATA_SEGS;
2033 for (i = 0; i < *nsegs;) {
2034 rpcrdma_map_one(ia, seg, writing);
2035 physaddrs[i] = seg->mr_dma;
2036 len += seg->mr_len;
2037 ++seg;
2038 ++i;
2039 /* Check for holes */
2040 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
2041 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
2042 break;
2043 }
Chuck Lever3eb35812015-01-21 11:02:54 -05002044 rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002045 if (rc) {
2046 dprintk("RPC: %s: failed ib_map_phys_fmr "
2047 "%u@0x%llx+%i (%d)... status %i\n", __func__,
2048 len, (unsigned long long)seg1->mr_dma,
2049 pageoff, i, rc);
2050 while (i--)
2051 rpcrdma_unmap_one(ia, --seg);
2052 } else {
Chuck Lever3eb35812015-01-21 11:02:54 -05002053 seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey;
Tom Talpey8d4ba032008-10-09 14:59:49 -04002054 seg1->mr_base = seg1->mr_dma + pageoff;
2055 seg1->mr_nsegs = i;
2056 seg1->mr_len = len;
2057 }
2058 *nsegs = i;
2059 return rc;
2060}
2061
2062static int
2063rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
2064 struct rpcrdma_ia *ia)
2065{
2066 struct rpcrdma_mr_seg *seg1 = seg;
2067 LIST_HEAD(l);
2068 int rc;
2069
Chuck Lever3eb35812015-01-21 11:02:54 -05002070 list_add(&seg1->rl_mw->r.fmr->list, &l);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002071 rc = ib_unmap_fmr(&l);
Chuck Lever73806c82014-07-29 17:23:25 -04002072 read_lock(&ia->ri_qplock);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002073 while (seg1->mr_nsegs--)
2074 rpcrdma_unmap_one(ia, seg++);
Chuck Lever73806c82014-07-29 17:23:25 -04002075 read_unlock(&ia->ri_qplock);
Tom Talpey8d4ba032008-10-09 14:59:49 -04002076 if (rc)
2077 dprintk("RPC: %s: failed ib_unmap_fmr,"
2078 " status %i\n", __func__, rc);
2079 return rc;
2080}
2081
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002082int
2083rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
2084 int nsegs, int writing, struct rpcrdma_xprt *r_xprt)
2085{
2086 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002087 int rc = 0;
2088
2089 switch (ia->ri_memreg_strategy) {
2090
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002091 case RPCRDMA_ALLPHYSICAL:
2092 rpcrdma_map_one(ia, seg, writing);
2093 seg->mr_rkey = ia->ri_bind_mem->rkey;
2094 seg->mr_base = seg->mr_dma;
2095 seg->mr_nsegs = 1;
2096 nsegs = 1;
2097 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002098
Tom Talpey3197d3092008-10-09 15:00:20 -04002099 /* Registration using frmr registration */
2100 case RPCRDMA_FRMR:
2101 rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt);
2102 break;
2103
Tom Talpey8d4ba032008-10-09 14:59:49 -04002104 /* Registration using fmr memory registration */
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002105 case RPCRDMA_MTHCAFMR:
Tom Talpey8d4ba032008-10-09 14:59:49 -04002106 rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002107 break;
2108
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002109 default:
Chuck Lever92b98362014-11-08 20:14:12 -05002110 return -EIO;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002111 }
2112 if (rc)
Chuck Lever92b98362014-11-08 20:14:12 -05002113 return rc;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002114
2115 return nsegs;
2116}
2117
2118int
2119rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
Chuck Lever13c9ff82014-05-28 10:33:08 -04002120 struct rpcrdma_xprt *r_xprt)
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002121{
2122 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002123 int nsegs = seg->mr_nsegs, rc;
2124
2125 switch (ia->ri_memreg_strategy) {
2126
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002127 case RPCRDMA_ALLPHYSICAL:
Chuck Lever73806c82014-07-29 17:23:25 -04002128 read_lock(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002129 rpcrdma_unmap_one(ia, seg);
Chuck Lever73806c82014-07-29 17:23:25 -04002130 read_unlock(&ia->ri_qplock);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002131 break;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002132
Tom Talpey3197d3092008-10-09 15:00:20 -04002133 case RPCRDMA_FRMR:
2134 rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
2135 break;
2136
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002137 case RPCRDMA_MTHCAFMR:
Tom Talpey8d4ba032008-10-09 14:59:49 -04002138 rc = rpcrdma_deregister_fmr_external(seg, ia);
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002139 break;
2140
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002141 default:
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002142 break;
2143 }
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002144 return nsegs;
2145}
2146
2147/*
2148 * Prepost any receive buffer, then post send.
2149 *
2150 * Receive buffer is donated to hardware, reclaimed upon recv completion.
2151 */
2152int
2153rpcrdma_ep_post(struct rpcrdma_ia *ia,
2154 struct rpcrdma_ep *ep,
2155 struct rpcrdma_req *req)
2156{
2157 struct ib_send_wr send_wr, *send_wr_fail;
2158 struct rpcrdma_rep *rep = req->rl_reply;
2159 int rc;
2160
2161 if (rep) {
2162 rc = rpcrdma_ep_post_recv(ia, ep, rep);
2163 if (rc)
2164 goto out;
2165 req->rl_reply = NULL;
2166 }
2167
2168 send_wr.next = NULL;
2169 send_wr.wr_id = 0ULL; /* no send cookie */
2170 send_wr.sg_list = req->rl_send_iov;
2171 send_wr.num_sge = req->rl_niovs;
2172 send_wr.opcode = IB_WR_SEND;
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002173 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
2174 ib_dma_sync_single_for_device(ia->ri_id->device,
2175 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
2176 DMA_TO_DEVICE);
2177 ib_dma_sync_single_for_device(ia->ri_id->device,
2178 req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
2179 DMA_TO_DEVICE);
2180 ib_dma_sync_single_for_device(ia->ri_id->device,
2181 req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
2182 DMA_TO_DEVICE);
2183
2184 if (DECR_CQCOUNT(ep) > 0)
2185 send_wr.send_flags = 0;
2186 else { /* Provider must take a send completion every now and then */
2187 INIT_CQCOUNT(ep);
2188 send_wr.send_flags = IB_SEND_SIGNALED;
2189 }
2190
2191 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
2192 if (rc)
2193 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
2194 rc);
2195out:
2196 return rc;
2197}
2198
2199/*
2200 * (Re)post a receive buffer.
2201 */
2202int
2203rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
2204 struct rpcrdma_ep *ep,
2205 struct rpcrdma_rep *rep)
2206{
2207 struct ib_recv_wr recv_wr, *recv_wr_fail;
2208 int rc;
2209
2210 recv_wr.next = NULL;
2211 recv_wr.wr_id = (u64) (unsigned long) rep;
2212 recv_wr.sg_list = &rep->rr_iov;
2213 recv_wr.num_sge = 1;
2214
2215 ib_dma_sync_single_for_cpu(ia->ri_id->device,
2216 rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL);
2217
\"Talpey, Thomas\c56c65f2007-09-10 13:51:18 -04002218 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
2219
2220 if (rc)
2221 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
2222 rc);
2223 return rc;
2224}
Chuck Lever43e95982014-07-29 17:23:34 -04002225
2226/* Physical mapping means one Read/Write list entry per-page.
2227 * All list entries must fit within an inline buffer
2228 *
2229 * NB: The server must return a Write list for NFS READ,
2230 * which has the same constraint. Factor in the inline
2231 * rsize as well.
2232 */
2233static size_t
2234rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
2235{
2236 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
2237 unsigned int inline_size, pages;
2238
2239 inline_size = min_t(unsigned int,
2240 cdata->inline_wsize, cdata->inline_rsize);
2241 inline_size -= RPCRDMA_HDRLEN_MIN;
2242 pages = inline_size / sizeof(struct rpcrdma_segment);
2243 return pages << PAGE_SHIFT;
2244}
2245
2246static size_t
2247rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
2248{
2249 return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
2250}
2251
2252size_t
2253rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
2254{
2255 size_t result;
2256
2257 switch (r_xprt->rx_ia.ri_memreg_strategy) {
2258 case RPCRDMA_ALLPHYSICAL:
2259 result = rpcrdma_physical_max_payload(r_xprt);
2260 break;
2261 default:
2262 result = rpcrdma_mr_max_payload(r_xprt);
2263 }
2264 return result;
2265}