blob: d150bb4aa3cb913510f704959e4aac4004710909 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000036#include <linux/vmalloc.h>
Manuel Zerpiescb0a6052011-06-16 02:09:57 +000037#include <linux/ratelimit.h>
Andy Groverec162272009-02-24 15:30:30 +000038
39#include "rds.h"
40#include "ib.h"
41
42/*
43 * Set the selected protocol version
44 */
45static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
46{
47 conn->c_version = version;
48}
49
50/*
51 * Set up flow control
52 */
53static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
54{
55 struct rds_ib_connection *ic = conn->c_transport_data;
56
57 if (rds_ib_sysctl_flow_control && credits != 0) {
58 /* We're doing flow control */
59 ic->i_flowctl = 1;
60 rds_ib_send_add_credits(conn, credits);
61 } else {
62 ic->i_flowctl = 0;
63 }
64}
65
66/*
67 * Tune RNR behavior. Without flow control, we use a rather
68 * low timeout, but not the absolute minimum - this should
69 * be tunable.
70 *
71 * We already set the RNR retry count to 7 (which is the
72 * smallest infinite number :-) above.
73 * If flow control is off, we want to change this back to 0
74 * so that we learn quickly when our credit accounting is
75 * buggy.
76 *
77 * Caller passes in a qp_attr pointer - don't waste stack spacv
78 * by allocation this twice.
79 */
80static void
81rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
82{
83 int ret;
84
85 attr->min_rnr_timer = IB_RNR_TIMER_000_32;
86 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
87 if (ret)
88 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
89}
90
91/*
92 * Connection established.
93 * We get here for both outgoing and incoming connection.
94 */
95void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
96{
97 const struct rds_ib_connect_private *dp = NULL;
98 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Groverec162272009-02-24 15:30:30 +000099 struct ib_qp_attr qp_attr;
100 int err;
101
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000102 if (event->param.conn.private_data_len >= sizeof(*dp)) {
Andy Groverec162272009-02-24 15:30:30 +0000103 dp = event->param.conn.private_data;
104
Andy Grover02a6a252009-07-17 13:13:24 +0000105 /* make sure it isn't empty data */
106 if (dp->dp_protocol_major) {
107 rds_ib_set_protocol(conn,
Andy Groverec162272009-02-24 15:30:30 +0000108 RDS_PROTOCOL(dp->dp_protocol_major,
Andy Grover02a6a252009-07-17 13:13:24 +0000109 dp->dp_protocol_minor));
110 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
111 }
Andy Groverec162272009-02-24 15:30:30 +0000112 }
113
Andy Groverf147dd92010-01-13 15:50:09 -0800114 if (conn->c_version < RDS_PROTOCOL(3,1)) {
115 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
116 " no longer supported\n",
117 &conn->c_faddr,
118 RDS_PROTOCOL_MAJOR(conn->c_version),
119 RDS_PROTOCOL_MINOR(conn->c_version));
120 rds_conn_destroy(conn);
121 return;
122 } else {
123 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
124 &conn->c_faddr,
125 RDS_PROTOCOL_MAJOR(conn->c_version),
126 RDS_PROTOCOL_MINOR(conn->c_version),
127 ic->i_flowctl ? ", flow control" : "");
128 }
Andy Groverec162272009-02-24 15:30:30 +0000129
Andy Grovere11d9122009-07-17 13:13:29 +0000130 /*
131 * Init rings and fill recv. this needs to wait until protocol negotiation
132 * is complete, since ring layout is different from 3.0 to 3.1.
133 */
134 rds_ib_send_init_ring(ic);
135 rds_ib_recv_init_ring(ic);
136 /* Post receive buffers - as a side effect, this will update
137 * the posted credit count. */
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700138 rds_ib_recv_refill(conn, 1, GFP_KERNEL);
Andy Grovere11d9122009-07-17 13:13:29 +0000139
Andy Groverec162272009-02-24 15:30:30 +0000140 /* Tune RNR behavior */
141 rds_ib_tune_rnr(ic, &qp_attr);
142
143 qp_attr.qp_state = IB_QPS_RTS;
144 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
145 if (err)
146 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
147
Zach Brown3e0249f2010-05-18 15:48:51 -0700148 /* update ib_device with this local ipaddr */
149 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
Andy Groverec162272009-02-24 15:30:30 +0000150 if (err)
Zach Brown3e0249f2010-05-18 15:48:51 -0700151 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
152 err);
Andy Groverec162272009-02-24 15:30:30 +0000153
154 /* If the peer gave us the last packet it saw, process this as if
155 * we had received a regular ACK. */
shamir rabinovitchc0adf542015-04-30 20:58:07 -0400156 if (dp) {
157 /* dp structure start is not guaranteed to be 8 bytes aligned.
158 * Since dp_ack_seq is 64-bit extended load operations can be
159 * used so go through get_unaligned to avoid unaligned errors.
160 */
David Aherne2783712015-05-04 11:51:38 -0400161 __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
shamir rabinovitchc0adf542015-04-30 20:58:07 -0400162
163 if (dp_ack_seq)
164 rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
165 NULL);
166 }
Andy Groverec162272009-02-24 15:30:30 +0000167
168 rds_connect_complete(conn);
169}
170
171static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
172 struct rdma_conn_param *conn_param,
173 struct rds_ib_connect_private *dp,
Andy Grover40589e72010-01-12 10:50:48 -0800174 u32 protocol_version,
175 u32 max_responder_resources,
176 u32 max_initiator_depth)
Andy Groverec162272009-02-24 15:30:30 +0000177{
Andy Grover40589e72010-01-12 10:50:48 -0800178 struct rds_ib_connection *ic = conn->c_transport_data;
Zach Brown3e0249f2010-05-18 15:48:51 -0700179 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
Andy Grover40589e72010-01-12 10:50:48 -0800180
Andy Groverec162272009-02-24 15:30:30 +0000181 memset(conn_param, 0, sizeof(struct rdma_conn_param));
Andy Grover40589e72010-01-12 10:50:48 -0800182
Andy Grover40589e72010-01-12 10:50:48 -0800183 conn_param->responder_resources =
184 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
185 conn_param->initiator_depth =
186 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
Andy Grover3ba23ad2009-07-17 13:13:22 +0000187 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
Andy Groverec162272009-02-24 15:30:30 +0000188 conn_param->rnr_retry_count = 7;
189
190 if (dp) {
Andy Groverec162272009-02-24 15:30:30 +0000191 memset(dp, 0, sizeof(*dp));
192 dp->dp_saddr = conn->c_laddr;
193 dp->dp_daddr = conn->c_faddr;
194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
197 dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
198
199 /* Advertise flow control */
200 if (ic->i_flowctl) {
201 unsigned int credits;
202
203 credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
204 dp->dp_credit = cpu_to_be32(credits);
205 atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
206 }
207
208 conn_param->private_data = dp;
209 conn_param->private_data_len = sizeof(*dp);
210 }
211}
212
213static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
214{
Zach Brown1bde04a2010-07-14 14:01:21 -0700215 rdsdebug("event %u (%s) data %p\n",
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300216 event->event, ib_event_msg(event->event), data);
Andy Groverec162272009-02-24 15:30:30 +0000217}
218
219static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
220{
221 struct rds_connection *conn = data;
222 struct rds_ib_connection *ic = conn->c_transport_data;
223
Zach Brown1bde04a2010-07-14 14:01:21 -0700224 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300225 ib_event_msg(event->event));
Andy Groverec162272009-02-24 15:30:30 +0000226
227 switch (event->event) {
228 case IB_EVENT_COMM_EST:
229 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
230 break;
231 default:
Zach Brown1bde04a2010-07-14 14:01:21 -0700232 rdsdebug("Fatal QP Event %u (%s) "
Andy Groverfdf6e6b2009-07-17 13:13:31 +0000233 "- connection %pI4->%pI4, reconnecting\n",
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300234 event->event, ib_event_msg(event->event),
Zach Brown1bde04a2010-07-14 14:01:21 -0700235 &conn->c_laddr, &conn->c_faddr);
Andy Grover97069782010-03-11 13:50:02 +0000236 rds_conn_drop(conn);
Andy Groverec162272009-02-24 15:30:30 +0000237 break;
238 }
239}
240
241/*
242 * This needs to be very careful to not leave IS_ERR pointers around for
243 * cleanup to trip over.
244 */
245static int rds_ib_setup_qp(struct rds_connection *conn)
246{
247 struct rds_ib_connection *ic = conn->c_transport_data;
248 struct ib_device *dev = ic->i_cm_id->device;
249 struct ib_qp_init_attr attr;
Matan Barak8e372102015-06-11 16:35:21 +0300250 struct ib_cq_init_attr cq_attr = {};
Andy Groverec162272009-02-24 15:30:30 +0000251 struct rds_ib_device *rds_ibdev;
252 int ret;
253
Zach Brown3e0249f2010-05-18 15:48:51 -0700254 /*
255 * It's normal to see a null device if an incoming connection races
256 * with device removal, so we don't print a warning.
Andy Groverec162272009-02-24 15:30:30 +0000257 */
Zach Brown3e0249f2010-05-18 15:48:51 -0700258 rds_ibdev = rds_ib_get_client_data(dev);
259 if (!rds_ibdev)
Andy Groverec162272009-02-24 15:30:30 +0000260 return -EOPNOTSUPP;
Zach Brown3e0249f2010-05-18 15:48:51 -0700261
262 /* add the conn now so that connection establishment has the dev */
263 rds_ib_add_conn(rds_ibdev, conn);
Andy Groverec162272009-02-24 15:30:30 +0000264
265 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
266 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
267 if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
268 rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
269
270 /* Protection domain and memory range */
271 ic->i_pd = rds_ibdev->pd;
272 ic->i_mr = rds_ibdev->mr;
273
Matan Barak8e372102015-06-11 16:35:21 +0300274 cq_attr.cqe = ic->i_send_ring.w_nr + 1;
Andy Groverec162272009-02-24 15:30:30 +0000275 ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
276 rds_ib_cq_event_handler, conn,
Matan Barak8e372102015-06-11 16:35:21 +0300277 &cq_attr);
Andy Groverec162272009-02-24 15:30:30 +0000278 if (IS_ERR(ic->i_send_cq)) {
279 ret = PTR_ERR(ic->i_send_cq);
280 ic->i_send_cq = NULL;
281 rdsdebug("ib_create_cq send failed: %d\n", ret);
282 goto out;
283 }
284
Matan Barak8e372102015-06-11 16:35:21 +0300285 cq_attr.cqe = ic->i_recv_ring.w_nr;
Andy Groverec162272009-02-24 15:30:30 +0000286 ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
287 rds_ib_cq_event_handler, conn,
Matan Barak8e372102015-06-11 16:35:21 +0300288 &cq_attr);
Andy Groverec162272009-02-24 15:30:30 +0000289 if (IS_ERR(ic->i_recv_cq)) {
290 ret = PTR_ERR(ic->i_recv_cq);
291 ic->i_recv_cq = NULL;
292 rdsdebug("ib_create_cq recv failed: %d\n", ret);
293 goto out;
294 }
295
296 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
297 if (ret) {
298 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
299 goto out;
300 }
301
302 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
303 if (ret) {
304 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
305 goto out;
306 }
307
308 /* XXX negotiate max send/recv with remote? */
309 memset(&attr, 0, sizeof(attr));
310 attr.event_handler = rds_ib_qp_event_handler;
311 attr.qp_context = conn;
312 /* + 1 to allow for the single ack message */
313 attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
314 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
315 attr.cap.max_send_sge = rds_ibdev->max_sge;
316 attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
317 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
318 attr.qp_type = IB_QPT_RC;
319 attr.send_cq = ic->i_send_cq;
320 attr.recv_cq = ic->i_recv_cq;
321
322 /*
323 * XXX this can fail if max_*_wr is too large? Are we supposed
324 * to back off until we get a value that the hardware can support?
325 */
326 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
327 if (ret) {
328 rdsdebug("rdma_create_qp failed: %d\n", ret);
329 goto out;
330 }
331
332 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
333 ic->i_send_ring.w_nr *
334 sizeof(struct rds_header),
335 &ic->i_send_hdrs_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800336 if (!ic->i_send_hdrs) {
Andy Groverec162272009-02-24 15:30:30 +0000337 ret = -ENOMEM;
338 rdsdebug("ib_dma_alloc_coherent send failed\n");
339 goto out;
340 }
341
342 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
343 ic->i_recv_ring.w_nr *
344 sizeof(struct rds_header),
345 &ic->i_recv_hdrs_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800346 if (!ic->i_recv_hdrs) {
Andy Groverec162272009-02-24 15:30:30 +0000347 ret = -ENOMEM;
348 rdsdebug("ib_dma_alloc_coherent recv failed\n");
349 goto out;
350 }
351
352 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
353 &ic->i_ack_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800354 if (!ic->i_ack) {
Andy Groverec162272009-02-24 15:30:30 +0000355 ret = -ENOMEM;
356 rdsdebug("ib_dma_alloc_coherent ack failed\n");
357 goto out;
358 }
359
Joe Perches3dbd4432011-05-28 10:36:35 -0700360 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
Andy Grovere4c52c92010-04-23 10:49:53 -0700361 ibdev_to_node(dev));
Andy Grover8690bfa2010-01-12 11:56:44 -0800362 if (!ic->i_sends) {
Andy Groverec162272009-02-24 15:30:30 +0000363 ret = -ENOMEM;
364 rdsdebug("send allocation failed\n");
365 goto out;
366 }
Andy Groverec162272009-02-24 15:30:30 +0000367
Joe Perches3dbd4432011-05-28 10:36:35 -0700368 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
Andy Grovere4c52c92010-04-23 10:49:53 -0700369 ibdev_to_node(dev));
Andy Grover8690bfa2010-01-12 11:56:44 -0800370 if (!ic->i_recvs) {
Andy Groverec162272009-02-24 15:30:30 +0000371 ret = -ENOMEM;
372 rdsdebug("recv allocation failed\n");
373 goto out;
374 }
375
Andy Groverec162272009-02-24 15:30:30 +0000376 rds_ib_recv_init_ack(ic);
377
Andy Groverec162272009-02-24 15:30:30 +0000378 rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
379 ic->i_send_cq, ic->i_recv_cq);
380
381out:
Zach Brown3e0249f2010-05-18 15:48:51 -0700382 rds_ib_dev_put(rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000383 return ret;
384}
385
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000386static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
Andy Groverec162272009-02-24 15:30:30 +0000387{
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000388 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
Andy Groverec162272009-02-24 15:30:30 +0000389 u16 common;
390 u32 version = 0;
391
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000392 /*
393 * rdma_cm private data is odd - when there is any private data in the
Andy Groverec162272009-02-24 15:30:30 +0000394 * request, we will be given a pretty large buffer without telling us the
395 * original size. The only way to tell the difference is by looking at
396 * the contents, which are initialized to zero.
397 * If the protocol version fields aren't set, this is a connection attempt
398 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000399 * We really should have changed this for OFED 1.3 :-(
400 */
401
402 /* Be paranoid. RDS always has privdata */
403 if (!event->param.conn.private_data_len) {
404 printk(KERN_NOTICE "RDS incoming connection has no private data, "
405 "rejecting\n");
406 return 0;
407 }
408
409 /* Even if len is crap *now* I still want to check it. -ASG */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800410 if (event->param.conn.private_data_len < sizeof (*dp) ||
411 dp->dp_protocol_major == 0)
Andy Groverec162272009-02-24 15:30:30 +0000412 return RDS_PROTOCOL_3_0;
413
414 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
415 if (dp->dp_protocol_major == 3 && common) {
416 version = RDS_PROTOCOL_3_0;
417 while ((common >>= 1) != 0)
418 version++;
Marciniszyn, Mikea4967592012-12-21 08:01:54 +0000419 } else
420 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
421 &dp->dp_saddr,
422 dp->dp_protocol_major,
423 dp->dp_protocol_minor);
Andy Groverec162272009-02-24 15:30:30 +0000424 return version;
425}
426
427int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
428 struct rdma_cm_event *event)
429{
430 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
431 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
432 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
433 struct rds_ib_connect_private dp_rep;
434 struct rds_connection *conn = NULL;
435 struct rds_ib_connection *ic = NULL;
436 struct rdma_conn_param conn_param;
437 u32 version;
Zach Browna46ca942010-05-24 13:14:59 -0700438 int err = 1, destroy = 1;
Andy Groverec162272009-02-24 15:30:30 +0000439
440 /* Check whether the remote protocol version matches ours. */
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000441 version = rds_ib_protocol_compatible(event);
Andy Groverec162272009-02-24 15:30:30 +0000442 if (!version)
443 goto out;
444
445 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
446 "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
447 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
448 (unsigned long long)be64_to_cpu(lguid),
449 (unsigned long long)be64_to_cpu(fguid));
450
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400451 /* RDS/IB is not currently netns aware, thus init_net */
452 conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
453 &rds_ib_transport, GFP_KERNEL);
Andy Groverec162272009-02-24 15:30:30 +0000454 if (IS_ERR(conn)) {
455 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
456 conn = NULL;
457 goto out;
458 }
459
460 /*
461 * The connection request may occur while the
462 * previous connection exist, e.g. in case of failover.
463 * But as connections may be initiated simultaneously
464 * by both hosts, we have a random backoff mechanism -
465 * see the comment above rds_queue_reconnect()
466 */
467 mutex_lock(&conn->c_cm_lock);
468 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
469 if (rds_conn_state(conn) == RDS_CONN_UP) {
470 rdsdebug("incoming connect while connecting\n");
471 rds_conn_drop(conn);
472 rds_ib_stats_inc(s_ib_listen_closed_stale);
473 } else
474 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
475 /* Wait and see - our connect may still be succeeding */
476 rds_ib_stats_inc(s_ib_connect_raced);
477 }
Andy Groverec162272009-02-24 15:30:30 +0000478 goto out;
479 }
480
481 ic = conn->c_transport_data;
482
483 rds_ib_set_protocol(conn, version);
484 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
485
486 /* If the peer gave us the last packet it saw, process this as if
487 * we had received a regular ACK. */
488 if (dp->dp_ack_seq)
489 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
490
491 BUG_ON(cm_id->context);
492 BUG_ON(ic->i_cm_id);
493
494 ic->i_cm_id = cm_id;
495 cm_id->context = conn;
496
497 /* We got halfway through setting up the ib_connection, if we
498 * fail now, we have to take the long route out of this mess. */
499 destroy = 0;
500
501 err = rds_ib_setup_qp(conn);
502 if (err) {
503 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
504 goto out;
505 }
506
Andy Grover40589e72010-01-12 10:50:48 -0800507 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
508 event->param.conn.responder_resources,
509 event->param.conn.initiator_depth);
Andy Groverec162272009-02-24 15:30:30 +0000510
511 /* rdma_accept() calls rdma_reject() internally if it fails */
512 err = rdma_accept(cm_id, &conn_param);
Zach Browna46ca942010-05-24 13:14:59 -0700513 if (err)
Andy Groverec162272009-02-24 15:30:30 +0000514 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
Andy Groverec162272009-02-24 15:30:30 +0000515
516out:
Zach Browna46ca942010-05-24 13:14:59 -0700517 if (conn)
518 mutex_unlock(&conn->c_cm_lock);
519 if (err)
520 rdma_reject(cm_id, NULL, 0);
Andy Groverec162272009-02-24 15:30:30 +0000521 return destroy;
522}
523
524
525int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
526{
527 struct rds_connection *conn = cm_id->context;
528 struct rds_ib_connection *ic = conn->c_transport_data;
529 struct rdma_conn_param conn_param;
530 struct rds_ib_connect_private dp;
531 int ret;
532
533 /* If the peer doesn't do protocol negotiation, we must
534 * default to RDSv3.0 */
535 rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
536 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
537
538 ret = rds_ib_setup_qp(conn);
539 if (ret) {
540 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
541 goto out;
542 }
543
Andy Grover40589e72010-01-12 10:50:48 -0800544 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
545 UINT_MAX, UINT_MAX);
Andy Groverec162272009-02-24 15:30:30 +0000546 ret = rdma_connect(cm_id, &conn_param);
547 if (ret)
548 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
549
550out:
551 /* Beware - returning non-zero tells the rdma_cm to destroy
552 * the cm_id. We should certainly not do it as long as we still
553 * "own" the cm_id. */
554 if (ret) {
555 if (ic->i_cm_id == cm_id)
556 ret = 0;
557 }
558 return ret;
559}
560
561int rds_ib_conn_connect(struct rds_connection *conn)
562{
563 struct rds_ib_connection *ic = conn->c_transport_data;
564 struct sockaddr_in src, dest;
565 int ret;
566
567 /* XXX I wonder what affect the port space has */
568 /* delegate cm event handler to rdma_transport */
569 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
Sean Heftyb26f9b92010-04-01 17:08:41 +0000570 RDMA_PS_TCP, IB_QPT_RC);
Andy Groverec162272009-02-24 15:30:30 +0000571 if (IS_ERR(ic->i_cm_id)) {
572 ret = PTR_ERR(ic->i_cm_id);
573 ic->i_cm_id = NULL;
574 rdsdebug("rdma_create_id() failed: %d\n", ret);
575 goto out;
576 }
577
578 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
579
580 src.sin_family = AF_INET;
581 src.sin_addr.s_addr = (__force u32)conn->c_laddr;
582 src.sin_port = (__force u16)htons(0);
583
584 dest.sin_family = AF_INET;
585 dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
586 dest.sin_port = (__force u16)htons(RDS_PORT);
587
588 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
589 (struct sockaddr *)&dest,
590 RDS_RDMA_RESOLVE_TIMEOUT_MS);
591 if (ret) {
592 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
593 ret);
594 rdma_destroy_id(ic->i_cm_id);
595 ic->i_cm_id = NULL;
596 }
597
598out:
599 return ret;
600}
601
602/*
603 * This is so careful about only cleaning up resources that were built up
604 * so that it can be called at any point during startup. In fact it
605 * can be called multiple times for a given connection.
606 */
607void rds_ib_conn_shutdown(struct rds_connection *conn)
608{
609 struct rds_ib_connection *ic = conn->c_transport_data;
610 int err = 0;
611
612 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
613 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
614 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
615
616 if (ic->i_cm_id) {
617 struct ib_device *dev = ic->i_cm_id->device;
618
619 rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
620 err = rdma_disconnect(ic->i_cm_id);
621 if (err) {
622 /* Actually this may happen quite frequently, when
623 * an outgoing connect raced with an incoming connect.
624 */
625 rdsdebug("failed to disconnect, cm: %p err %d\n",
626 ic->i_cm_id, err);
627 }
628
Andy Grovere32b4a72010-03-03 19:25:21 -0800629 /*
Zach Brownf0460112010-07-14 13:55:35 -0700630 * We want to wait for tx and rx completion to finish
631 * before we tear down the connection, but we have to be
632 * careful not to get stuck waiting on a send ring that
633 * only has unsignaled sends in it. We've shutdown new
634 * sends before getting here so by waiting for signaled
635 * sends to complete we're ensured that there will be no
636 * more tx processing.
Andy Grovere32b4a72010-03-03 19:25:21 -0800637 */
Andy Groverec162272009-02-24 15:30:30 +0000638 wait_event(rds_ib_ring_empty_wait,
Zach Brownf0460112010-07-14 13:55:35 -0700639 rds_ib_ring_empty(&ic->i_recv_ring) &&
640 (atomic_read(&ic->i_signaled_sends) == 0));
641 tasklet_kill(&ic->i_recv_tasklet);
Andy Groverec162272009-02-24 15:30:30 +0000642
santosh.shilimkar@oracle.com1bc7b8632015-08-22 15:45:24 -0700643 /* first destroy the ib state that generates callbacks */
644 if (ic->i_cm_id->qp)
645 rdma_destroy_qp(ic->i_cm_id);
646 if (ic->i_send_cq)
647 ib_destroy_cq(ic->i_send_cq);
648 if (ic->i_recv_cq)
649 ib_destroy_cq(ic->i_recv_cq);
santosh.shilimkar@oracle.com1bc7b8632015-08-22 15:45:24 -0700650
651 /* then free the resources that ib callbacks use */
Andy Groverec162272009-02-24 15:30:30 +0000652 if (ic->i_send_hdrs)
653 ib_dma_free_coherent(dev,
654 ic->i_send_ring.w_nr *
655 sizeof(struct rds_header),
656 ic->i_send_hdrs,
657 ic->i_send_hdrs_dma);
658
659 if (ic->i_recv_hdrs)
660 ib_dma_free_coherent(dev,
661 ic->i_recv_ring.w_nr *
662 sizeof(struct rds_header),
663 ic->i_recv_hdrs,
664 ic->i_recv_hdrs_dma);
665
666 if (ic->i_ack)
667 ib_dma_free_coherent(dev, sizeof(struct rds_header),
668 ic->i_ack, ic->i_ack_dma);
669
670 if (ic->i_sends)
671 rds_ib_send_clear_ring(ic);
672 if (ic->i_recvs)
673 rds_ib_recv_clear_ring(ic);
674
Santosh Shilimkar1c3be622015-08-22 15:45:32 -0700675 rdma_destroy_id(ic->i_cm_id);
676
Andy Groverec162272009-02-24 15:30:30 +0000677 /*
678 * Move connection back to the nodev list.
679 */
Andy Grover745cbcc2009-04-01 08:20:19 +0000680 if (ic->rds_ibdev)
681 rds_ib_remove_conn(ic->rds_ibdev, conn);
Andy Groverec162272009-02-24 15:30:30 +0000682
683 ic->i_cm_id = NULL;
684 ic->i_pd = NULL;
685 ic->i_mr = NULL;
686 ic->i_send_cq = NULL;
687 ic->i_recv_cq = NULL;
688 ic->i_send_hdrs = NULL;
689 ic->i_recv_hdrs = NULL;
690 ic->i_ack = NULL;
691 }
692 BUG_ON(ic->rds_ibdev);
693
694 /* Clear pending transmit */
Andy Groverff3d7d32010-03-01 14:03:09 -0800695 if (ic->i_data_op) {
696 struct rds_message *rm;
697
698 rm = container_of(ic->i_data_op, struct rds_message, data);
699 rds_message_put(rm);
700 ic->i_data_op = NULL;
Andy Groverec162272009-02-24 15:30:30 +0000701 }
702
703 /* Clear the ACK state */
704 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
Andy Grover8cbd9602009-04-01 08:20:20 +0000705#ifdef KERNEL_HAS_ATOMIC64
706 atomic64_set(&ic->i_ack_next, 0);
707#else
708 ic->i_ack_next = 0;
709#endif
Andy Groverec162272009-02-24 15:30:30 +0000710 ic->i_ack_recv = 0;
711
712 /* Clear flow control state */
713 ic->i_flowctl = 0;
714 atomic_set(&ic->i_credits, 0);
715
716 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
717 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
718
719 if (ic->i_ibinc) {
720 rds_inc_put(&ic->i_ibinc->ii_inc);
721 ic->i_ibinc = NULL;
722 }
723
724 vfree(ic->i_sends);
725 ic->i_sends = NULL;
726 vfree(ic->i_recvs);
727 ic->i_recvs = NULL;
728}
729
730int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
731{
732 struct rds_ib_connection *ic;
733 unsigned long flags;
Chris Mason33244122010-05-26 22:05:37 -0700734 int ret;
Andy Groverec162272009-02-24 15:30:30 +0000735
736 /* XXX too lazy? */
Dan Carpenterf0229ea2012-03-21 20:44:09 +0000737 ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800738 if (!ic)
Andy Groverec162272009-02-24 15:30:30 +0000739 return -ENOMEM;
740
Chris Mason33244122010-05-26 22:05:37 -0700741 ret = rds_ib_recv_alloc_caches(ic);
742 if (ret) {
743 kfree(ic);
744 return ret;
745 }
746
Andy Groverec162272009-02-24 15:30:30 +0000747 INIT_LIST_HEAD(&ic->ib_node);
Andy Groverd521b632009-10-30 08:51:57 +0000748 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
749 (unsigned long) ic);
Andy Groverec162272009-02-24 15:30:30 +0000750 mutex_init(&ic->i_recv_mutex);
Andy Grover8cbd9602009-04-01 08:20:20 +0000751#ifndef KERNEL_HAS_ATOMIC64
752 spin_lock_init(&ic->i_ack_lock);
753#endif
Zach Brownf0460112010-07-14 13:55:35 -0700754 atomic_set(&ic->i_signaled_sends, 0);
Andy Groverec162272009-02-24 15:30:30 +0000755
756 /*
757 * rds_ib_conn_shutdown() waits for these to be emptied so they
758 * must be initialized before it can be called.
759 */
760 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
761 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
762
763 ic->conn = conn;
764 conn->c_transport_data = ic;
765
766 spin_lock_irqsave(&ib_nodev_conns_lock, flags);
767 list_add_tail(&ic->ib_node, &ib_nodev_conns);
768 spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
769
770
771 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
772 return 0;
773}
774
Andy Grover745cbcc2009-04-01 08:20:19 +0000775/*
776 * Free a connection. Connection must be shut down and not set for reconnect.
777 */
Andy Groverec162272009-02-24 15:30:30 +0000778void rds_ib_conn_free(void *arg)
779{
780 struct rds_ib_connection *ic = arg;
Andy Grover745cbcc2009-04-01 08:20:19 +0000781 spinlock_t *lock_ptr;
782
Andy Groverec162272009-02-24 15:30:30 +0000783 rdsdebug("ic %p\n", ic);
Andy Grover745cbcc2009-04-01 08:20:19 +0000784
785 /*
786 * Conn is either on a dev's list or on the nodev list.
787 * A race with shutdown() or connect() would cause problems
788 * (since rds_ibdev would change) but that should never happen.
789 */
790 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
791
792 spin_lock_irq(lock_ptr);
Andy Groverec162272009-02-24 15:30:30 +0000793 list_del(&ic->ib_node);
Andy Grover745cbcc2009-04-01 08:20:19 +0000794 spin_unlock_irq(lock_ptr);
795
Chris Mason33244122010-05-26 22:05:37 -0700796 rds_ib_recv_free_caches(ic);
797
Andy Groverec162272009-02-24 15:30:30 +0000798 kfree(ic);
799}
800
801
802/*
803 * An error occurred on the connection
804 */
805void
806__rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
807{
808 va_list ap;
809
810 rds_conn_drop(conn);
811
812 va_start(ap, fmt);
813 vprintk(fmt, ap);
814 va_end(ap);
815}