blob: bc3dbc1ba61f38cb68a1e5cef1a466254bd52414 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000036#include <linux/vmalloc.h>
37
38#include "rds.h"
39#include "ib.h"
40
Zach Brown1bde04a2010-07-14 14:01:21 -070041static char *rds_ib_event_type_strings[] = {
Zach Brown59f740a2010-08-03 13:52:47 -070042#define RDS_IB_EVENT_STRING(foo) \
43 [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
Zach Brown1bde04a2010-07-14 14:01:21 -070044 RDS_IB_EVENT_STRING(CQ_ERR),
45 RDS_IB_EVENT_STRING(QP_FATAL),
46 RDS_IB_EVENT_STRING(QP_REQ_ERR),
47 RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
48 RDS_IB_EVENT_STRING(COMM_EST),
49 RDS_IB_EVENT_STRING(SQ_DRAINED),
50 RDS_IB_EVENT_STRING(PATH_MIG),
51 RDS_IB_EVENT_STRING(PATH_MIG_ERR),
52 RDS_IB_EVENT_STRING(DEVICE_FATAL),
53 RDS_IB_EVENT_STRING(PORT_ACTIVE),
54 RDS_IB_EVENT_STRING(PORT_ERR),
55 RDS_IB_EVENT_STRING(LID_CHANGE),
56 RDS_IB_EVENT_STRING(PKEY_CHANGE),
57 RDS_IB_EVENT_STRING(SM_CHANGE),
58 RDS_IB_EVENT_STRING(SRQ_ERR),
59 RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
60 RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
61 RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
62#undef RDS_IB_EVENT_STRING
63};
64
65static char *rds_ib_event_str(enum ib_event_type type)
66{
Zach Brown59f740a2010-08-03 13:52:47 -070067 return rds_str_array(rds_ib_event_type_strings,
68 ARRAY_SIZE(rds_ib_event_type_strings), type);
Zach Brown1bde04a2010-07-14 14:01:21 -070069};
70
Andy Groverec162272009-02-24 15:30:30 +000071/*
72 * Set the selected protocol version
73 */
74static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
75{
76 conn->c_version = version;
77}
78
79/*
80 * Set up flow control
81 */
82static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
83{
84 struct rds_ib_connection *ic = conn->c_transport_data;
85
86 if (rds_ib_sysctl_flow_control && credits != 0) {
87 /* We're doing flow control */
88 ic->i_flowctl = 1;
89 rds_ib_send_add_credits(conn, credits);
90 } else {
91 ic->i_flowctl = 0;
92 }
93}
94
95/*
96 * Tune RNR behavior. Without flow control, we use a rather
97 * low timeout, but not the absolute minimum - this should
98 * be tunable.
99 *
100 * We already set the RNR retry count to 7 (which is the
101 * smallest infinite number :-) above.
102 * If flow control is off, we want to change this back to 0
103 * so that we learn quickly when our credit accounting is
104 * buggy.
105 *
106 * Caller passes in a qp_attr pointer - don't waste stack spacv
107 * by allocation this twice.
108 */
109static void
110rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
111{
112 int ret;
113
114 attr->min_rnr_timer = IB_RNR_TIMER_000_32;
115 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
116 if (ret)
117 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
118}
119
120/*
121 * Connection established.
122 * We get here for both outgoing and incoming connection.
123 */
124void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
125{
126 const struct rds_ib_connect_private *dp = NULL;
127 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Groverec162272009-02-24 15:30:30 +0000128 struct ib_qp_attr qp_attr;
129 int err;
130
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000131 if (event->param.conn.private_data_len >= sizeof(*dp)) {
Andy Groverec162272009-02-24 15:30:30 +0000132 dp = event->param.conn.private_data;
133
Andy Grover02a6a252009-07-17 13:13:24 +0000134 /* make sure it isn't empty data */
135 if (dp->dp_protocol_major) {
136 rds_ib_set_protocol(conn,
Andy Groverec162272009-02-24 15:30:30 +0000137 RDS_PROTOCOL(dp->dp_protocol_major,
Andy Grover02a6a252009-07-17 13:13:24 +0000138 dp->dp_protocol_minor));
139 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
140 }
Andy Groverec162272009-02-24 15:30:30 +0000141 }
142
Andy Groverf147dd92010-01-13 15:50:09 -0800143 if (conn->c_version < RDS_PROTOCOL(3,1)) {
144 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
145 " no longer supported\n",
146 &conn->c_faddr,
147 RDS_PROTOCOL_MAJOR(conn->c_version),
148 RDS_PROTOCOL_MINOR(conn->c_version));
149 rds_conn_destroy(conn);
150 return;
151 } else {
152 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
153 &conn->c_faddr,
154 RDS_PROTOCOL_MAJOR(conn->c_version),
155 RDS_PROTOCOL_MINOR(conn->c_version),
156 ic->i_flowctl ? ", flow control" : "");
157 }
Andy Groverec162272009-02-24 15:30:30 +0000158
Andy Grovere11d9122009-07-17 13:13:29 +0000159 /*
160 * Init rings and fill recv. this needs to wait until protocol negotiation
161 * is complete, since ring layout is different from 3.0 to 3.1.
162 */
163 rds_ib_send_init_ring(ic);
164 rds_ib_recv_init_ring(ic);
165 /* Post receive buffers - as a side effect, this will update
166 * the posted credit count. */
Andy Groverf17a1a52010-03-18 17:19:52 -0700167 rds_ib_recv_refill(conn, 1);
Andy Grovere11d9122009-07-17 13:13:29 +0000168
Andy Groverec162272009-02-24 15:30:30 +0000169 /* Tune RNR behavior */
170 rds_ib_tune_rnr(ic, &qp_attr);
171
172 qp_attr.qp_state = IB_QPS_RTS;
173 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
174 if (err)
175 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
176
Zach Brown3e0249f2010-05-18 15:48:51 -0700177 /* update ib_device with this local ipaddr */
178 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
Andy Groverec162272009-02-24 15:30:30 +0000179 if (err)
Zach Brown3e0249f2010-05-18 15:48:51 -0700180 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
181 err);
Andy Groverec162272009-02-24 15:30:30 +0000182
183 /* If the peer gave us the last packet it saw, process this as if
184 * we had received a regular ACK. */
185 if (dp && dp->dp_ack_seq)
186 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
187
188 rds_connect_complete(conn);
189}
190
191static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
192 struct rdma_conn_param *conn_param,
193 struct rds_ib_connect_private *dp,
Andy Grover40589e72010-01-12 10:50:48 -0800194 u32 protocol_version,
195 u32 max_responder_resources,
196 u32 max_initiator_depth)
Andy Groverec162272009-02-24 15:30:30 +0000197{
Andy Grover40589e72010-01-12 10:50:48 -0800198 struct rds_ib_connection *ic = conn->c_transport_data;
Zach Brown3e0249f2010-05-18 15:48:51 -0700199 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
Andy Grover40589e72010-01-12 10:50:48 -0800200
Andy Groverec162272009-02-24 15:30:30 +0000201 memset(conn_param, 0, sizeof(struct rdma_conn_param));
Andy Grover40589e72010-01-12 10:50:48 -0800202
Andy Grover40589e72010-01-12 10:50:48 -0800203 conn_param->responder_resources =
204 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
205 conn_param->initiator_depth =
206 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
Andy Grover3ba23ad2009-07-17 13:13:22 +0000207 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
Andy Groverec162272009-02-24 15:30:30 +0000208 conn_param->rnr_retry_count = 7;
209
210 if (dp) {
Andy Groverec162272009-02-24 15:30:30 +0000211 memset(dp, 0, sizeof(*dp));
212 dp->dp_saddr = conn->c_laddr;
213 dp->dp_daddr = conn->c_faddr;
214 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
215 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
216 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
217 dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
218
219 /* Advertise flow control */
220 if (ic->i_flowctl) {
221 unsigned int credits;
222
223 credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
224 dp->dp_credit = cpu_to_be32(credits);
225 atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
226 }
227
228 conn_param->private_data = dp;
229 conn_param->private_data_len = sizeof(*dp);
230 }
231}
232
233static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
234{
Zach Brown1bde04a2010-07-14 14:01:21 -0700235 rdsdebug("event %u (%s) data %p\n",
236 event->event, rds_ib_event_str(event->event), data);
Andy Groverec162272009-02-24 15:30:30 +0000237}
238
239static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
240{
241 struct rds_connection *conn = data;
242 struct rds_ib_connection *ic = conn->c_transport_data;
243
Zach Brown1bde04a2010-07-14 14:01:21 -0700244 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
245 rds_ib_event_str(event->event));
Andy Groverec162272009-02-24 15:30:30 +0000246
247 switch (event->event) {
248 case IB_EVENT_COMM_EST:
249 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
250 break;
251 default:
Zach Brown1bde04a2010-07-14 14:01:21 -0700252 rdsdebug("Fatal QP Event %u (%s) "
Andy Groverfdf6e6b2009-07-17 13:13:31 +0000253 "- connection %pI4->%pI4, reconnecting\n",
Zach Brown1bde04a2010-07-14 14:01:21 -0700254 event->event, rds_ib_event_str(event->event),
255 &conn->c_laddr, &conn->c_faddr);
Andy Grover97069782010-03-11 13:50:02 +0000256 rds_conn_drop(conn);
Andy Groverec162272009-02-24 15:30:30 +0000257 break;
258 }
259}
260
261/*
262 * This needs to be very careful to not leave IS_ERR pointers around for
263 * cleanup to trip over.
264 */
265static int rds_ib_setup_qp(struct rds_connection *conn)
266{
267 struct rds_ib_connection *ic = conn->c_transport_data;
268 struct ib_device *dev = ic->i_cm_id->device;
269 struct ib_qp_init_attr attr;
270 struct rds_ib_device *rds_ibdev;
271 int ret;
272
Zach Brown3e0249f2010-05-18 15:48:51 -0700273 /*
274 * It's normal to see a null device if an incoming connection races
275 * with device removal, so we don't print a warning.
Andy Groverec162272009-02-24 15:30:30 +0000276 */
Zach Brown3e0249f2010-05-18 15:48:51 -0700277 rds_ibdev = rds_ib_get_client_data(dev);
278 if (!rds_ibdev)
Andy Groverec162272009-02-24 15:30:30 +0000279 return -EOPNOTSUPP;
Zach Brown3e0249f2010-05-18 15:48:51 -0700280
281 /* add the conn now so that connection establishment has the dev */
282 rds_ib_add_conn(rds_ibdev, conn);
Andy Groverec162272009-02-24 15:30:30 +0000283
284 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
285 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
286 if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
287 rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
288
289 /* Protection domain and memory range */
290 ic->i_pd = rds_ibdev->pd;
291 ic->i_mr = rds_ibdev->mr;
292
293 ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
294 rds_ib_cq_event_handler, conn,
295 ic->i_send_ring.w_nr + 1, 0);
296 if (IS_ERR(ic->i_send_cq)) {
297 ret = PTR_ERR(ic->i_send_cq);
298 ic->i_send_cq = NULL;
299 rdsdebug("ib_create_cq send failed: %d\n", ret);
300 goto out;
301 }
302
303 ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
304 rds_ib_cq_event_handler, conn,
305 ic->i_recv_ring.w_nr, 0);
306 if (IS_ERR(ic->i_recv_cq)) {
307 ret = PTR_ERR(ic->i_recv_cq);
308 ic->i_recv_cq = NULL;
309 rdsdebug("ib_create_cq recv failed: %d\n", ret);
310 goto out;
311 }
312
313 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
314 if (ret) {
315 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
316 goto out;
317 }
318
319 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
320 if (ret) {
321 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
322 goto out;
323 }
324
325 /* XXX negotiate max send/recv with remote? */
326 memset(&attr, 0, sizeof(attr));
327 attr.event_handler = rds_ib_qp_event_handler;
328 attr.qp_context = conn;
329 /* + 1 to allow for the single ack message */
330 attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
331 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
332 attr.cap.max_send_sge = rds_ibdev->max_sge;
333 attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
334 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
335 attr.qp_type = IB_QPT_RC;
336 attr.send_cq = ic->i_send_cq;
337 attr.recv_cq = ic->i_recv_cq;
338
339 /*
340 * XXX this can fail if max_*_wr is too large? Are we supposed
341 * to back off until we get a value that the hardware can support?
342 */
343 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
344 if (ret) {
345 rdsdebug("rdma_create_qp failed: %d\n", ret);
346 goto out;
347 }
348
349 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
350 ic->i_send_ring.w_nr *
351 sizeof(struct rds_header),
352 &ic->i_send_hdrs_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800353 if (!ic->i_send_hdrs) {
Andy Groverec162272009-02-24 15:30:30 +0000354 ret = -ENOMEM;
355 rdsdebug("ib_dma_alloc_coherent send failed\n");
356 goto out;
357 }
358
359 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
360 ic->i_recv_ring.w_nr *
361 sizeof(struct rds_header),
362 &ic->i_recv_hdrs_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800363 if (!ic->i_recv_hdrs) {
Andy Groverec162272009-02-24 15:30:30 +0000364 ret = -ENOMEM;
365 rdsdebug("ib_dma_alloc_coherent recv failed\n");
366 goto out;
367 }
368
369 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
370 &ic->i_ack_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800371 if (!ic->i_ack) {
Andy Groverec162272009-02-24 15:30:30 +0000372 ret = -ENOMEM;
373 rdsdebug("ib_dma_alloc_coherent ack failed\n");
374 goto out;
375 }
376
Andy Grovere4c52c92010-04-23 10:49:53 -0700377 ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
378 ibdev_to_node(dev));
Andy Grover8690bfa2010-01-12 11:56:44 -0800379 if (!ic->i_sends) {
Andy Groverec162272009-02-24 15:30:30 +0000380 ret = -ENOMEM;
381 rdsdebug("send allocation failed\n");
382 goto out;
383 }
Andy Grovere11d9122009-07-17 13:13:29 +0000384 memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
Andy Groverec162272009-02-24 15:30:30 +0000385
Andy Grovere4c52c92010-04-23 10:49:53 -0700386 ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
387 ibdev_to_node(dev));
Andy Grover8690bfa2010-01-12 11:56:44 -0800388 if (!ic->i_recvs) {
Andy Groverec162272009-02-24 15:30:30 +0000389 ret = -ENOMEM;
390 rdsdebug("recv allocation failed\n");
391 goto out;
392 }
Andy Grovere11d9122009-07-17 13:13:29 +0000393 memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
Andy Groverec162272009-02-24 15:30:30 +0000394
Andy Groverec162272009-02-24 15:30:30 +0000395 rds_ib_recv_init_ack(ic);
396
Andy Groverec162272009-02-24 15:30:30 +0000397 rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
398 ic->i_send_cq, ic->i_recv_cq);
399
400out:
Zach Brown3e0249f2010-05-18 15:48:51 -0700401 rds_ib_dev_put(rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000402 return ret;
403}
404
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000405static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
Andy Groverec162272009-02-24 15:30:30 +0000406{
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000407 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
Andy Groverec162272009-02-24 15:30:30 +0000408 u16 common;
409 u32 version = 0;
410
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000411 /*
412 * rdma_cm private data is odd - when there is any private data in the
Andy Groverec162272009-02-24 15:30:30 +0000413 * request, we will be given a pretty large buffer without telling us the
414 * original size. The only way to tell the difference is by looking at
415 * the contents, which are initialized to zero.
416 * If the protocol version fields aren't set, this is a connection attempt
417 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000418 * We really should have changed this for OFED 1.3 :-(
419 */
420
421 /* Be paranoid. RDS always has privdata */
422 if (!event->param.conn.private_data_len) {
423 printk(KERN_NOTICE "RDS incoming connection has no private data, "
424 "rejecting\n");
425 return 0;
426 }
427
428 /* Even if len is crap *now* I still want to check it. -ASG */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800429 if (event->param.conn.private_data_len < sizeof (*dp) ||
430 dp->dp_protocol_major == 0)
Andy Groverec162272009-02-24 15:30:30 +0000431 return RDS_PROTOCOL_3_0;
432
433 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
434 if (dp->dp_protocol_major == 3 && common) {
435 version = RDS_PROTOCOL_3_0;
436 while ((common >>= 1) != 0)
437 version++;
438 } else if (printk_ratelimit()) {
439 printk(KERN_NOTICE "RDS: Connection from %pI4 using "
440 "incompatible protocol version %u.%u\n",
441 &dp->dp_saddr,
442 dp->dp_protocol_major,
443 dp->dp_protocol_minor);
444 }
445 return version;
446}
447
448int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
449 struct rdma_cm_event *event)
450{
451 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
452 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
453 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
454 struct rds_ib_connect_private dp_rep;
455 struct rds_connection *conn = NULL;
456 struct rds_ib_connection *ic = NULL;
457 struct rdma_conn_param conn_param;
458 u32 version;
Zach Browna46ca942010-05-24 13:14:59 -0700459 int err = 1, destroy = 1;
Andy Groverec162272009-02-24 15:30:30 +0000460
461 /* Check whether the remote protocol version matches ours. */
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000462 version = rds_ib_protocol_compatible(event);
Andy Groverec162272009-02-24 15:30:30 +0000463 if (!version)
464 goto out;
465
466 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
467 "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
468 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
469 (unsigned long long)be64_to_cpu(lguid),
470 (unsigned long long)be64_to_cpu(fguid));
471
472 conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
473 GFP_KERNEL);
474 if (IS_ERR(conn)) {
475 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
476 conn = NULL;
477 goto out;
478 }
479
480 /*
481 * The connection request may occur while the
482 * previous connection exist, e.g. in case of failover.
483 * But as connections may be initiated simultaneously
484 * by both hosts, we have a random backoff mechanism -
485 * see the comment above rds_queue_reconnect()
486 */
487 mutex_lock(&conn->c_cm_lock);
488 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
489 if (rds_conn_state(conn) == RDS_CONN_UP) {
490 rdsdebug("incoming connect while connecting\n");
491 rds_conn_drop(conn);
492 rds_ib_stats_inc(s_ib_listen_closed_stale);
493 } else
494 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
495 /* Wait and see - our connect may still be succeeding */
496 rds_ib_stats_inc(s_ib_connect_raced);
497 }
Andy Groverec162272009-02-24 15:30:30 +0000498 goto out;
499 }
500
501 ic = conn->c_transport_data;
502
503 rds_ib_set_protocol(conn, version);
504 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
505
506 /* If the peer gave us the last packet it saw, process this as if
507 * we had received a regular ACK. */
508 if (dp->dp_ack_seq)
509 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
510
511 BUG_ON(cm_id->context);
512 BUG_ON(ic->i_cm_id);
513
514 ic->i_cm_id = cm_id;
515 cm_id->context = conn;
516
517 /* We got halfway through setting up the ib_connection, if we
518 * fail now, we have to take the long route out of this mess. */
519 destroy = 0;
520
521 err = rds_ib_setup_qp(conn);
522 if (err) {
523 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
Julia Lawall5daf47b2010-05-26 05:54:21 +0000524 mutex_unlock(&conn->c_cm_lock);
Andy Groverec162272009-02-24 15:30:30 +0000525 goto out;
526 }
527
Andy Grover40589e72010-01-12 10:50:48 -0800528 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
529 event->param.conn.responder_resources,
530 event->param.conn.initiator_depth);
Andy Groverec162272009-02-24 15:30:30 +0000531
532 /* rdma_accept() calls rdma_reject() internally if it fails */
533 err = rdma_accept(cm_id, &conn_param);
Zach Browna46ca942010-05-24 13:14:59 -0700534 if (err)
Andy Groverec162272009-02-24 15:30:30 +0000535 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
Andy Groverec162272009-02-24 15:30:30 +0000536
537out:
Zach Browna46ca942010-05-24 13:14:59 -0700538 if (conn)
539 mutex_unlock(&conn->c_cm_lock);
540 if (err)
541 rdma_reject(cm_id, NULL, 0);
Andy Groverec162272009-02-24 15:30:30 +0000542 return destroy;
543}
544
545
546int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
547{
548 struct rds_connection *conn = cm_id->context;
549 struct rds_ib_connection *ic = conn->c_transport_data;
550 struct rdma_conn_param conn_param;
551 struct rds_ib_connect_private dp;
552 int ret;
553
554 /* If the peer doesn't do protocol negotiation, we must
555 * default to RDSv3.0 */
556 rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
557 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
558
559 ret = rds_ib_setup_qp(conn);
560 if (ret) {
561 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
562 goto out;
563 }
564
Andy Grover40589e72010-01-12 10:50:48 -0800565 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
566 UINT_MAX, UINT_MAX);
Andy Groverec162272009-02-24 15:30:30 +0000567 ret = rdma_connect(cm_id, &conn_param);
568 if (ret)
569 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
570
571out:
572 /* Beware - returning non-zero tells the rdma_cm to destroy
573 * the cm_id. We should certainly not do it as long as we still
574 * "own" the cm_id. */
575 if (ret) {
576 if (ic->i_cm_id == cm_id)
577 ret = 0;
578 }
579 return ret;
580}
581
582int rds_ib_conn_connect(struct rds_connection *conn)
583{
584 struct rds_ib_connection *ic = conn->c_transport_data;
585 struct sockaddr_in src, dest;
586 int ret;
587
588 /* XXX I wonder what affect the port space has */
589 /* delegate cm event handler to rdma_transport */
590 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
591 RDMA_PS_TCP);
592 if (IS_ERR(ic->i_cm_id)) {
593 ret = PTR_ERR(ic->i_cm_id);
594 ic->i_cm_id = NULL;
595 rdsdebug("rdma_create_id() failed: %d\n", ret);
596 goto out;
597 }
598
599 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
600
601 src.sin_family = AF_INET;
602 src.sin_addr.s_addr = (__force u32)conn->c_laddr;
603 src.sin_port = (__force u16)htons(0);
604
605 dest.sin_family = AF_INET;
606 dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
607 dest.sin_port = (__force u16)htons(RDS_PORT);
608
609 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
610 (struct sockaddr *)&dest,
611 RDS_RDMA_RESOLVE_TIMEOUT_MS);
612 if (ret) {
613 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
614 ret);
615 rdma_destroy_id(ic->i_cm_id);
616 ic->i_cm_id = NULL;
617 }
618
619out:
620 return ret;
621}
622
623/*
624 * This is so careful about only cleaning up resources that were built up
625 * so that it can be called at any point during startup. In fact it
626 * can be called multiple times for a given connection.
627 */
628void rds_ib_conn_shutdown(struct rds_connection *conn)
629{
630 struct rds_ib_connection *ic = conn->c_transport_data;
631 int err = 0;
632
633 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
634 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
635 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
636
637 if (ic->i_cm_id) {
638 struct ib_device *dev = ic->i_cm_id->device;
639
640 rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
641 err = rdma_disconnect(ic->i_cm_id);
642 if (err) {
643 /* Actually this may happen quite frequently, when
644 * an outgoing connect raced with an incoming connect.
645 */
646 rdsdebug("failed to disconnect, cm: %p err %d\n",
647 ic->i_cm_id, err);
648 }
649
Andy Grovere32b4a72010-03-03 19:25:21 -0800650 /*
Zach Brownf0460112010-07-14 13:55:35 -0700651 * We want to wait for tx and rx completion to finish
652 * before we tear down the connection, but we have to be
653 * careful not to get stuck waiting on a send ring that
654 * only has unsignaled sends in it. We've shutdown new
655 * sends before getting here so by waiting for signaled
656 * sends to complete we're ensured that there will be no
657 * more tx processing.
Andy Grovere32b4a72010-03-03 19:25:21 -0800658 */
Andy Groverec162272009-02-24 15:30:30 +0000659 wait_event(rds_ib_ring_empty_wait,
Zach Brownf0460112010-07-14 13:55:35 -0700660 rds_ib_ring_empty(&ic->i_recv_ring) &&
661 (atomic_read(&ic->i_signaled_sends) == 0));
662 tasklet_kill(&ic->i_recv_tasklet);
Andy Groverec162272009-02-24 15:30:30 +0000663
664 if (ic->i_send_hdrs)
665 ib_dma_free_coherent(dev,
666 ic->i_send_ring.w_nr *
667 sizeof(struct rds_header),
668 ic->i_send_hdrs,
669 ic->i_send_hdrs_dma);
670
671 if (ic->i_recv_hdrs)
672 ib_dma_free_coherent(dev,
673 ic->i_recv_ring.w_nr *
674 sizeof(struct rds_header),
675 ic->i_recv_hdrs,
676 ic->i_recv_hdrs_dma);
677
678 if (ic->i_ack)
679 ib_dma_free_coherent(dev, sizeof(struct rds_header),
680 ic->i_ack, ic->i_ack_dma);
681
682 if (ic->i_sends)
683 rds_ib_send_clear_ring(ic);
684 if (ic->i_recvs)
685 rds_ib_recv_clear_ring(ic);
686
687 if (ic->i_cm_id->qp)
688 rdma_destroy_qp(ic->i_cm_id);
689 if (ic->i_send_cq)
690 ib_destroy_cq(ic->i_send_cq);
691 if (ic->i_recv_cq)
692 ib_destroy_cq(ic->i_recv_cq);
693 rdma_destroy_id(ic->i_cm_id);
694
695 /*
696 * Move connection back to the nodev list.
697 */
Andy Grover745cbcc2009-04-01 08:20:19 +0000698 if (ic->rds_ibdev)
699 rds_ib_remove_conn(ic->rds_ibdev, conn);
Andy Groverec162272009-02-24 15:30:30 +0000700
701 ic->i_cm_id = NULL;
702 ic->i_pd = NULL;
703 ic->i_mr = NULL;
704 ic->i_send_cq = NULL;
705 ic->i_recv_cq = NULL;
706 ic->i_send_hdrs = NULL;
707 ic->i_recv_hdrs = NULL;
708 ic->i_ack = NULL;
709 }
710 BUG_ON(ic->rds_ibdev);
711
712 /* Clear pending transmit */
Andy Groverff3d7d32010-03-01 14:03:09 -0800713 if (ic->i_data_op) {
714 struct rds_message *rm;
715
716 rm = container_of(ic->i_data_op, struct rds_message, data);
717 rds_message_put(rm);
718 ic->i_data_op = NULL;
Andy Groverec162272009-02-24 15:30:30 +0000719 }
720
721 /* Clear the ACK state */
722 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
Andy Grover8cbd9602009-04-01 08:20:20 +0000723#ifdef KERNEL_HAS_ATOMIC64
724 atomic64_set(&ic->i_ack_next, 0);
725#else
726 ic->i_ack_next = 0;
727#endif
Andy Groverec162272009-02-24 15:30:30 +0000728 ic->i_ack_recv = 0;
729
730 /* Clear flow control state */
731 ic->i_flowctl = 0;
732 atomic_set(&ic->i_credits, 0);
733
734 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
735 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
736
737 if (ic->i_ibinc) {
738 rds_inc_put(&ic->i_ibinc->ii_inc);
739 ic->i_ibinc = NULL;
740 }
741
742 vfree(ic->i_sends);
743 ic->i_sends = NULL;
744 vfree(ic->i_recvs);
745 ic->i_recvs = NULL;
746}
747
748int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
749{
750 struct rds_ib_connection *ic;
751 unsigned long flags;
Chris Mason33244122010-05-26 22:05:37 -0700752 int ret;
Andy Groverec162272009-02-24 15:30:30 +0000753
754 /* XXX too lazy? */
755 ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800756 if (!ic)
Andy Groverec162272009-02-24 15:30:30 +0000757 return -ENOMEM;
758
Chris Mason33244122010-05-26 22:05:37 -0700759 ret = rds_ib_recv_alloc_caches(ic);
760 if (ret) {
761 kfree(ic);
762 return ret;
763 }
764
Andy Groverec162272009-02-24 15:30:30 +0000765 INIT_LIST_HEAD(&ic->ib_node);
Andy Groverd521b632009-10-30 08:51:57 +0000766 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
767 (unsigned long) ic);
Andy Groverec162272009-02-24 15:30:30 +0000768 mutex_init(&ic->i_recv_mutex);
Andy Grover8cbd9602009-04-01 08:20:20 +0000769#ifndef KERNEL_HAS_ATOMIC64
770 spin_lock_init(&ic->i_ack_lock);
771#endif
Zach Brownf0460112010-07-14 13:55:35 -0700772 atomic_set(&ic->i_signaled_sends, 0);
Andy Groverec162272009-02-24 15:30:30 +0000773
774 /*
775 * rds_ib_conn_shutdown() waits for these to be emptied so they
776 * must be initialized before it can be called.
777 */
778 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
779 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
780
781 ic->conn = conn;
782 conn->c_transport_data = ic;
783
784 spin_lock_irqsave(&ib_nodev_conns_lock, flags);
785 list_add_tail(&ic->ib_node, &ib_nodev_conns);
786 spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
787
788
789 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
790 return 0;
791}
792
Andy Grover745cbcc2009-04-01 08:20:19 +0000793/*
794 * Free a connection. Connection must be shut down and not set for reconnect.
795 */
Andy Groverec162272009-02-24 15:30:30 +0000796void rds_ib_conn_free(void *arg)
797{
798 struct rds_ib_connection *ic = arg;
Andy Grover745cbcc2009-04-01 08:20:19 +0000799 spinlock_t *lock_ptr;
800
Andy Groverec162272009-02-24 15:30:30 +0000801 rdsdebug("ic %p\n", ic);
Andy Grover745cbcc2009-04-01 08:20:19 +0000802
803 /*
804 * Conn is either on a dev's list or on the nodev list.
805 * A race with shutdown() or connect() would cause problems
806 * (since rds_ibdev would change) but that should never happen.
807 */
808 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
809
810 spin_lock_irq(lock_ptr);
Andy Groverec162272009-02-24 15:30:30 +0000811 list_del(&ic->ib_node);
Andy Grover745cbcc2009-04-01 08:20:19 +0000812 spin_unlock_irq(lock_ptr);
813
Chris Mason33244122010-05-26 22:05:37 -0700814 rds_ib_recv_free_caches(ic);
815
Andy Groverec162272009-02-24 15:30:30 +0000816 kfree(ic);
817}
818
819
820/*
821 * An error occurred on the connection
822 */
823void
824__rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
825{
826 va_list ap;
827
828 rds_conn_drop(conn);
829
830 va_start(ap, fmt);
831 vprintk(fmt, ap);
832 va_end(ap);
833}