blob: 7f68abc8a5bf06b2cbc4f3356b5226b263c20427 [file] [log] [blame]
Andy Groverec162272009-02-24 15:30:30 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000036#include <linux/vmalloc.h>
Manuel Zerpiescb0a6052011-06-16 02:09:57 +000037#include <linux/ratelimit.h>
Andy Groverec162272009-02-24 15:30:30 +000038
39#include "rds.h"
40#include "ib.h"
41
42/*
43 * Set the selected protocol version
44 */
45static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
46{
47 conn->c_version = version;
48}
49
50/*
51 * Set up flow control
52 */
53static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
54{
55 struct rds_ib_connection *ic = conn->c_transport_data;
56
57 if (rds_ib_sysctl_flow_control && credits != 0) {
58 /* We're doing flow control */
59 ic->i_flowctl = 1;
60 rds_ib_send_add_credits(conn, credits);
61 } else {
62 ic->i_flowctl = 0;
63 }
64}
65
66/*
67 * Tune RNR behavior. Without flow control, we use a rather
68 * low timeout, but not the absolute minimum - this should
69 * be tunable.
70 *
71 * We already set the RNR retry count to 7 (which is the
72 * smallest infinite number :-) above.
73 * If flow control is off, we want to change this back to 0
74 * so that we learn quickly when our credit accounting is
75 * buggy.
76 *
77 * Caller passes in a qp_attr pointer - don't waste stack spacv
78 * by allocation this twice.
79 */
80static void
81rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
82{
83 int ret;
84
85 attr->min_rnr_timer = IB_RNR_TIMER_000_32;
86 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
87 if (ret)
88 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
89}
90
91/*
92 * Connection established.
93 * We get here for both outgoing and incoming connection.
94 */
95void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
96{
97 const struct rds_ib_connect_private *dp = NULL;
98 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Groverec162272009-02-24 15:30:30 +000099 struct ib_qp_attr qp_attr;
100 int err;
101
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000102 if (event->param.conn.private_data_len >= sizeof(*dp)) {
Andy Groverec162272009-02-24 15:30:30 +0000103 dp = event->param.conn.private_data;
104
Andy Grover02a6a252009-07-17 13:13:24 +0000105 /* make sure it isn't empty data */
106 if (dp->dp_protocol_major) {
107 rds_ib_set_protocol(conn,
Andy Groverec162272009-02-24 15:30:30 +0000108 RDS_PROTOCOL(dp->dp_protocol_major,
Andy Grover02a6a252009-07-17 13:13:24 +0000109 dp->dp_protocol_minor));
110 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
111 }
Andy Groverec162272009-02-24 15:30:30 +0000112 }
113
Andy Groverf147dd92010-01-13 15:50:09 -0800114 if (conn->c_version < RDS_PROTOCOL(3,1)) {
115 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
116 " no longer supported\n",
117 &conn->c_faddr,
118 RDS_PROTOCOL_MAJOR(conn->c_version),
119 RDS_PROTOCOL_MINOR(conn->c_version));
120 rds_conn_destroy(conn);
121 return;
122 } else {
123 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
124 &conn->c_faddr,
125 RDS_PROTOCOL_MAJOR(conn->c_version),
126 RDS_PROTOCOL_MINOR(conn->c_version),
127 ic->i_flowctl ? ", flow control" : "");
128 }
Andy Groverec162272009-02-24 15:30:30 +0000129
Andy Grovere11d9122009-07-17 13:13:29 +0000130 /*
131 * Init rings and fill recv. this needs to wait until protocol negotiation
132 * is complete, since ring layout is different from 3.0 to 3.1.
133 */
134 rds_ib_send_init_ring(ic);
135 rds_ib_recv_init_ring(ic);
136 /* Post receive buffers - as a side effect, this will update
137 * the posted credit count. */
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700138 rds_ib_recv_refill(conn, 1, GFP_KERNEL);
Andy Grovere11d9122009-07-17 13:13:29 +0000139
Andy Groverec162272009-02-24 15:30:30 +0000140 /* Tune RNR behavior */
141 rds_ib_tune_rnr(ic, &qp_attr);
142
143 qp_attr.qp_state = IB_QPS_RTS;
144 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
145 if (err)
146 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
147
Zach Brown3e0249f2010-05-18 15:48:51 -0700148 /* update ib_device with this local ipaddr */
149 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
Andy Groverec162272009-02-24 15:30:30 +0000150 if (err)
Zach Brown3e0249f2010-05-18 15:48:51 -0700151 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
152 err);
Andy Groverec162272009-02-24 15:30:30 +0000153
154 /* If the peer gave us the last packet it saw, process this as if
155 * we had received a regular ACK. */
shamir rabinovitchc0adf542015-04-30 20:58:07 -0400156 if (dp) {
157 /* dp structure start is not guaranteed to be 8 bytes aligned.
158 * Since dp_ack_seq is 64-bit extended load operations can be
159 * used so go through get_unaligned to avoid unaligned errors.
160 */
David Aherne2783712015-05-04 11:51:38 -0400161 __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
shamir rabinovitchc0adf542015-04-30 20:58:07 -0400162
163 if (dp_ack_seq)
164 rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
165 NULL);
166 }
Andy Groverec162272009-02-24 15:30:30 +0000167
168 rds_connect_complete(conn);
169}
170
171static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
172 struct rdma_conn_param *conn_param,
173 struct rds_ib_connect_private *dp,
Andy Grover40589e72010-01-12 10:50:48 -0800174 u32 protocol_version,
175 u32 max_responder_resources,
176 u32 max_initiator_depth)
Andy Groverec162272009-02-24 15:30:30 +0000177{
Andy Grover40589e72010-01-12 10:50:48 -0800178 struct rds_ib_connection *ic = conn->c_transport_data;
Zach Brown3e0249f2010-05-18 15:48:51 -0700179 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
Andy Grover40589e72010-01-12 10:50:48 -0800180
Andy Groverec162272009-02-24 15:30:30 +0000181 memset(conn_param, 0, sizeof(struct rdma_conn_param));
Andy Grover40589e72010-01-12 10:50:48 -0800182
Andy Grover40589e72010-01-12 10:50:48 -0800183 conn_param->responder_resources =
184 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
185 conn_param->initiator_depth =
186 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
Andy Grover3ba23ad2009-07-17 13:13:22 +0000187 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
Andy Groverec162272009-02-24 15:30:30 +0000188 conn_param->rnr_retry_count = 7;
189
190 if (dp) {
Andy Groverec162272009-02-24 15:30:30 +0000191 memset(dp, 0, sizeof(*dp));
192 dp->dp_saddr = conn->c_laddr;
193 dp->dp_daddr = conn->c_faddr;
194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
197 dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
198
199 /* Advertise flow control */
200 if (ic->i_flowctl) {
201 unsigned int credits;
202
203 credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
204 dp->dp_credit = cpu_to_be32(credits);
205 atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
206 }
207
208 conn_param->private_data = dp;
209 conn_param->private_data_len = sizeof(*dp);
210 }
211}
212
213static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
214{
Zach Brown1bde04a2010-07-14 14:01:21 -0700215 rdsdebug("event %u (%s) data %p\n",
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300216 event->event, ib_event_msg(event->event), data);
Andy Groverec162272009-02-24 15:30:30 +0000217}
218
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400219/* Plucking the oldest entry from the ring can be done concurrently with
220 * the thread refilling the ring. Each ring operation is protected by
221 * spinlocks and the transient state of refilling doesn't change the
222 * recording of which entry is oldest.
223 *
224 * This relies on IB only calling one cq comp_handler for each cq so that
225 * there will only be one caller of rds_recv_incoming() per RDS connection.
226 */
227static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
228{
229 struct rds_connection *conn = context;
230 struct rds_ib_connection *ic = conn->c_transport_data;
231
232 rdsdebug("conn %p cq %p\n", conn, cq);
233
234 rds_ib_stats_inc(s_ib_evt_handler_call);
235
236 tasklet_schedule(&ic->i_recv_tasklet);
237}
238
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800239static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
240 struct ib_wc *wcs)
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400241{
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800242 int nr, i;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400243 struct ib_wc *wc;
244
245 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
246 for (i = 0; i < nr; i++) {
247 wc = wcs + i;
248 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
249 (unsigned long long)wc->wr_id, wc->status,
250 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400251
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800252 rds_ib_send_cqe_handler(ic, wc);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400253 }
254 }
255}
256
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400257static void rds_ib_tasklet_fn_send(unsigned long data)
258{
259 struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
260 struct rds_connection *conn = ic->conn;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400261
262 rds_ib_stats_inc(s_ib_tasklet_call);
263
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800264 poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400265 ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800266 poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400267
268 if (rds_conn_up(conn) &&
269 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
270 test_bit(0, &conn->c_map_queued)))
271 rds_send_xmit(ic->conn);
272}
273
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800274static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
275 struct ib_wc *wcs,
276 struct rds_ib_ack_state *ack_state)
277{
278 int nr, i;
279 struct ib_wc *wc;
280
281 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
282 for (i = 0; i < nr; i++) {
283 wc = wcs + i;
284 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
285 (unsigned long long)wc->wr_id, wc->status,
286 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
287
288 rds_ib_recv_cqe_handler(ic, wc, ack_state);
289 }
290 }
291}
292
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400293static void rds_ib_tasklet_fn_recv(unsigned long data)
294{
295 struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
296 struct rds_connection *conn = ic->conn;
297 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
298 struct rds_ib_ack_state state;
299
Santosh Shilimkar9441c972015-09-19 14:01:09 -0400300 if (!rds_ibdev)
301 rds_conn_drop(conn);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400302
303 rds_ib_stats_inc(s_ib_tasklet_call);
304
305 memset(&state, 0, sizeof(state));
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800306 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400307 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
santosh.shilimkar@oracle.comdcfd0412016-03-01 15:20:45 -0800308 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400309
310 if (state.ack_next_valid)
311 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
312 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
313 rds_send_drop_acked(conn, state.ack_recv, NULL);
314 ic->i_ack_recv = state.ack_recv;
315 }
316
317 if (rds_conn_up(conn))
318 rds_ib_attempt_ack(ic);
319}
320
Andy Groverec162272009-02-24 15:30:30 +0000321static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
322{
323 struct rds_connection *conn = data;
324 struct rds_ib_connection *ic = conn->c_transport_data;
325
Zach Brown1bde04a2010-07-14 14:01:21 -0700326 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300327 ib_event_msg(event->event));
Andy Groverec162272009-02-24 15:30:30 +0000328
329 switch (event->event) {
330 case IB_EVENT_COMM_EST:
331 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
332 break;
333 default:
Zach Brown1bde04a2010-07-14 14:01:21 -0700334 rdsdebug("Fatal QP Event %u (%s) "
Andy Groverfdf6e6b2009-07-17 13:13:31 +0000335 "- connection %pI4->%pI4, reconnecting\n",
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300336 event->event, ib_event_msg(event->event),
Zach Brown1bde04a2010-07-14 14:01:21 -0700337 &conn->c_laddr, &conn->c_faddr);
Andy Grover97069782010-03-11 13:50:02 +0000338 rds_conn_drop(conn);
Andy Groverec162272009-02-24 15:30:30 +0000339 break;
340 }
341}
342
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400343static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
344{
345 struct rds_connection *conn = context;
346 struct rds_ib_connection *ic = conn->c_transport_data;
347
348 rdsdebug("conn %p cq %p\n", conn, cq);
349
350 rds_ib_stats_inc(s_ib_evt_handler_call);
351
352 tasklet_schedule(&ic->i_send_tasklet);
353}
354
Andy Groverec162272009-02-24 15:30:30 +0000355/*
356 * This needs to be very careful to not leave IS_ERR pointers around for
357 * cleanup to trip over.
358 */
359static int rds_ib_setup_qp(struct rds_connection *conn)
360{
361 struct rds_ib_connection *ic = conn->c_transport_data;
362 struct ib_device *dev = ic->i_cm_id->device;
363 struct ib_qp_init_attr attr;
Matan Barak8e372102015-06-11 16:35:21 +0300364 struct ib_cq_init_attr cq_attr = {};
Andy Groverec162272009-02-24 15:30:30 +0000365 struct rds_ib_device *rds_ibdev;
366 int ret;
367
Zach Brown3e0249f2010-05-18 15:48:51 -0700368 /*
369 * It's normal to see a null device if an incoming connection races
370 * with device removal, so we don't print a warning.
Andy Groverec162272009-02-24 15:30:30 +0000371 */
Zach Brown3e0249f2010-05-18 15:48:51 -0700372 rds_ibdev = rds_ib_get_client_data(dev);
373 if (!rds_ibdev)
Andy Groverec162272009-02-24 15:30:30 +0000374 return -EOPNOTSUPP;
Zach Brown3e0249f2010-05-18 15:48:51 -0700375
376 /* add the conn now so that connection establishment has the dev */
377 rds_ib_add_conn(rds_ibdev, conn);
Andy Groverec162272009-02-24 15:30:30 +0000378
379 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
380 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
381 if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
382 rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
383
384 /* Protection domain and memory range */
385 ic->i_pd = rds_ibdev->pd;
Andy Groverec162272009-02-24 15:30:30 +0000386
Matan Barak8e372102015-06-11 16:35:21 +0300387 cq_attr.cqe = ic->i_send_ring.w_nr + 1;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400388
389 ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
Andy Groverec162272009-02-24 15:30:30 +0000390 rds_ib_cq_event_handler, conn,
Matan Barak8e372102015-06-11 16:35:21 +0300391 &cq_attr);
Andy Groverec162272009-02-24 15:30:30 +0000392 if (IS_ERR(ic->i_send_cq)) {
393 ret = PTR_ERR(ic->i_send_cq);
394 ic->i_send_cq = NULL;
395 rdsdebug("ib_create_cq send failed: %d\n", ret);
396 goto out;
397 }
398
Matan Barak8e372102015-06-11 16:35:21 +0300399 cq_attr.cqe = ic->i_recv_ring.w_nr;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400400 ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
Andy Groverec162272009-02-24 15:30:30 +0000401 rds_ib_cq_event_handler, conn,
Matan Barak8e372102015-06-11 16:35:21 +0300402 &cq_attr);
Andy Groverec162272009-02-24 15:30:30 +0000403 if (IS_ERR(ic->i_recv_cq)) {
404 ret = PTR_ERR(ic->i_recv_cq);
405 ic->i_recv_cq = NULL;
406 rdsdebug("ib_create_cq recv failed: %d\n", ret);
407 goto out;
408 }
409
410 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
411 if (ret) {
412 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
413 goto out;
414 }
415
416 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
417 if (ret) {
418 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
419 goto out;
420 }
421
422 /* XXX negotiate max send/recv with remote? */
423 memset(&attr, 0, sizeof(attr));
424 attr.event_handler = rds_ib_qp_event_handler;
425 attr.qp_context = conn;
426 /* + 1 to allow for the single ack message */
427 attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
428 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
429 attr.cap.max_send_sge = rds_ibdev->max_sge;
430 attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
431 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
432 attr.qp_type = IB_QPT_RC;
433 attr.send_cq = ic->i_send_cq;
434 attr.recv_cq = ic->i_recv_cq;
435
436 /*
437 * XXX this can fail if max_*_wr is too large? Are we supposed
438 * to back off until we get a value that the hardware can support?
439 */
440 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
441 if (ret) {
442 rdsdebug("rdma_create_qp failed: %d\n", ret);
443 goto out;
444 }
445
446 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
447 ic->i_send_ring.w_nr *
448 sizeof(struct rds_header),
449 &ic->i_send_hdrs_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800450 if (!ic->i_send_hdrs) {
Andy Groverec162272009-02-24 15:30:30 +0000451 ret = -ENOMEM;
452 rdsdebug("ib_dma_alloc_coherent send failed\n");
453 goto out;
454 }
455
456 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
457 ic->i_recv_ring.w_nr *
458 sizeof(struct rds_header),
459 &ic->i_recv_hdrs_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800460 if (!ic->i_recv_hdrs) {
Andy Groverec162272009-02-24 15:30:30 +0000461 ret = -ENOMEM;
462 rdsdebug("ib_dma_alloc_coherent recv failed\n");
463 goto out;
464 }
465
466 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
467 &ic->i_ack_dma, GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800468 if (!ic->i_ack) {
Andy Groverec162272009-02-24 15:30:30 +0000469 ret = -ENOMEM;
470 rdsdebug("ib_dma_alloc_coherent ack failed\n");
471 goto out;
472 }
473
Joe Perches3dbd4432011-05-28 10:36:35 -0700474 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
Andy Grovere4c52c92010-04-23 10:49:53 -0700475 ibdev_to_node(dev));
Andy Grover8690bfa2010-01-12 11:56:44 -0800476 if (!ic->i_sends) {
Andy Groverec162272009-02-24 15:30:30 +0000477 ret = -ENOMEM;
478 rdsdebug("send allocation failed\n");
479 goto out;
480 }
Andy Groverec162272009-02-24 15:30:30 +0000481
Joe Perches3dbd4432011-05-28 10:36:35 -0700482 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
Andy Grovere4c52c92010-04-23 10:49:53 -0700483 ibdev_to_node(dev));
Andy Grover8690bfa2010-01-12 11:56:44 -0800484 if (!ic->i_recvs) {
Andy Groverec162272009-02-24 15:30:30 +0000485 ret = -ENOMEM;
486 rdsdebug("recv allocation failed\n");
487 goto out;
488 }
489
Andy Groverec162272009-02-24 15:30:30 +0000490 rds_ib_recv_init_ack(ic);
491
Jason Gunthorpee5580242015-07-30 17:22:26 -0600492 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
Andy Groverec162272009-02-24 15:30:30 +0000493 ic->i_send_cq, ic->i_recv_cq);
494
495out:
Zach Brown3e0249f2010-05-18 15:48:51 -0700496 rds_ib_dev_put(rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000497 return ret;
498}
499
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000500static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
Andy Groverec162272009-02-24 15:30:30 +0000501{
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000502 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
Andy Groverec162272009-02-24 15:30:30 +0000503 u16 common;
504 u32 version = 0;
505
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000506 /*
507 * rdma_cm private data is odd - when there is any private data in the
Andy Groverec162272009-02-24 15:30:30 +0000508 * request, we will be given a pretty large buffer without telling us the
509 * original size. The only way to tell the difference is by looking at
510 * the contents, which are initialized to zero.
511 * If the protocol version fields aren't set, this is a connection attempt
512 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000513 * We really should have changed this for OFED 1.3 :-(
514 */
515
516 /* Be paranoid. RDS always has privdata */
517 if (!event->param.conn.private_data_len) {
518 printk(KERN_NOTICE "RDS incoming connection has no private data, "
519 "rejecting\n");
520 return 0;
521 }
522
523 /* Even if len is crap *now* I still want to check it. -ASG */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800524 if (event->param.conn.private_data_len < sizeof (*dp) ||
525 dp->dp_protocol_major == 0)
Andy Groverec162272009-02-24 15:30:30 +0000526 return RDS_PROTOCOL_3_0;
527
528 common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
529 if (dp->dp_protocol_major == 3 && common) {
530 version = RDS_PROTOCOL_3_0;
531 while ((common >>= 1) != 0)
532 version++;
Marciniszyn, Mikea4967592012-12-21 08:01:54 +0000533 } else
534 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
535 &dp->dp_saddr,
536 dp->dp_protocol_major,
537 dp->dp_protocol_minor);
Andy Groverec162272009-02-24 15:30:30 +0000538 return version;
539}
540
541int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
542 struct rdma_cm_event *event)
543{
544 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
545 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
546 const struct rds_ib_connect_private *dp = event->param.conn.private_data;
547 struct rds_ib_connect_private dp_rep;
548 struct rds_connection *conn = NULL;
549 struct rds_ib_connection *ic = NULL;
550 struct rdma_conn_param conn_param;
551 u32 version;
Zach Browna46ca942010-05-24 13:14:59 -0700552 int err = 1, destroy = 1;
Andy Groverec162272009-02-24 15:30:30 +0000553
554 /* Check whether the remote protocol version matches ours. */
Andy Grover9ddbcfa2009-07-17 13:13:23 +0000555 version = rds_ib_protocol_compatible(event);
Andy Groverec162272009-02-24 15:30:30 +0000556 if (!version)
557 goto out;
558
559 rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
560 "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
561 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
562 (unsigned long long)be64_to_cpu(lguid),
563 (unsigned long long)be64_to_cpu(fguid));
564
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400565 /* RDS/IB is not currently netns aware, thus init_net */
566 conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
567 &rds_ib_transport, GFP_KERNEL);
Andy Groverec162272009-02-24 15:30:30 +0000568 if (IS_ERR(conn)) {
569 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
570 conn = NULL;
571 goto out;
572 }
573
574 /*
575 * The connection request may occur while the
576 * previous connection exist, e.g. in case of failover.
577 * But as connections may be initiated simultaneously
578 * by both hosts, we have a random backoff mechanism -
579 * see the comment above rds_queue_reconnect()
580 */
581 mutex_lock(&conn->c_cm_lock);
582 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
583 if (rds_conn_state(conn) == RDS_CONN_UP) {
584 rdsdebug("incoming connect while connecting\n");
585 rds_conn_drop(conn);
586 rds_ib_stats_inc(s_ib_listen_closed_stale);
587 } else
588 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
589 /* Wait and see - our connect may still be succeeding */
590 rds_ib_stats_inc(s_ib_connect_raced);
591 }
Andy Groverec162272009-02-24 15:30:30 +0000592 goto out;
593 }
594
595 ic = conn->c_transport_data;
596
597 rds_ib_set_protocol(conn, version);
598 rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
599
600 /* If the peer gave us the last packet it saw, process this as if
601 * we had received a regular ACK. */
602 if (dp->dp_ack_seq)
603 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
604
605 BUG_ON(cm_id->context);
606 BUG_ON(ic->i_cm_id);
607
608 ic->i_cm_id = cm_id;
609 cm_id->context = conn;
610
611 /* We got halfway through setting up the ib_connection, if we
612 * fail now, we have to take the long route out of this mess. */
613 destroy = 0;
614
615 err = rds_ib_setup_qp(conn);
616 if (err) {
617 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
618 goto out;
619 }
620
Andy Grover40589e72010-01-12 10:50:48 -0800621 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
622 event->param.conn.responder_resources,
623 event->param.conn.initiator_depth);
Andy Groverec162272009-02-24 15:30:30 +0000624
625 /* rdma_accept() calls rdma_reject() internally if it fails */
626 err = rdma_accept(cm_id, &conn_param);
Zach Browna46ca942010-05-24 13:14:59 -0700627 if (err)
Andy Groverec162272009-02-24 15:30:30 +0000628 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
Andy Groverec162272009-02-24 15:30:30 +0000629
630out:
Zach Browna46ca942010-05-24 13:14:59 -0700631 if (conn)
632 mutex_unlock(&conn->c_cm_lock);
633 if (err)
634 rdma_reject(cm_id, NULL, 0);
Andy Groverec162272009-02-24 15:30:30 +0000635 return destroy;
636}
637
638
639int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
640{
641 struct rds_connection *conn = cm_id->context;
642 struct rds_ib_connection *ic = conn->c_transport_data;
643 struct rdma_conn_param conn_param;
644 struct rds_ib_connect_private dp;
645 int ret;
646
647 /* If the peer doesn't do protocol negotiation, we must
648 * default to RDSv3.0 */
649 rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
650 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
651
652 ret = rds_ib_setup_qp(conn);
653 if (ret) {
654 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
655 goto out;
656 }
657
Andy Grover40589e72010-01-12 10:50:48 -0800658 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
659 UINT_MAX, UINT_MAX);
Andy Groverec162272009-02-24 15:30:30 +0000660 ret = rdma_connect(cm_id, &conn_param);
661 if (ret)
662 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
663
664out:
665 /* Beware - returning non-zero tells the rdma_cm to destroy
666 * the cm_id. We should certainly not do it as long as we still
667 * "own" the cm_id. */
668 if (ret) {
669 if (ic->i_cm_id == cm_id)
670 ret = 0;
671 }
672 return ret;
673}
674
675int rds_ib_conn_connect(struct rds_connection *conn)
676{
677 struct rds_ib_connection *ic = conn->c_transport_data;
678 struct sockaddr_in src, dest;
679 int ret;
680
681 /* XXX I wonder what affect the port space has */
682 /* delegate cm event handler to rdma_transport */
Guy Shapirofa201052015-10-22 15:20:10 +0300683 ic->i_cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, conn,
Sean Heftyb26f9b92010-04-01 17:08:41 +0000684 RDMA_PS_TCP, IB_QPT_RC);
Andy Groverec162272009-02-24 15:30:30 +0000685 if (IS_ERR(ic->i_cm_id)) {
686 ret = PTR_ERR(ic->i_cm_id);
687 ic->i_cm_id = NULL;
688 rdsdebug("rdma_create_id() failed: %d\n", ret);
689 goto out;
690 }
691
692 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
693
694 src.sin_family = AF_INET;
695 src.sin_addr.s_addr = (__force u32)conn->c_laddr;
696 src.sin_port = (__force u16)htons(0);
697
698 dest.sin_family = AF_INET;
699 dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
700 dest.sin_port = (__force u16)htons(RDS_PORT);
701
702 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
703 (struct sockaddr *)&dest,
704 RDS_RDMA_RESOLVE_TIMEOUT_MS);
705 if (ret) {
706 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
707 ret);
708 rdma_destroy_id(ic->i_cm_id);
709 ic->i_cm_id = NULL;
710 }
711
712out:
713 return ret;
714}
715
716/*
717 * This is so careful about only cleaning up resources that were built up
718 * so that it can be called at any point during startup. In fact it
719 * can be called multiple times for a given connection.
720 */
721void rds_ib_conn_shutdown(struct rds_connection *conn)
722{
723 struct rds_ib_connection *ic = conn->c_transport_data;
724 int err = 0;
725
726 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
727 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
728 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
729
730 if (ic->i_cm_id) {
731 struct ib_device *dev = ic->i_cm_id->device;
732
733 rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
734 err = rdma_disconnect(ic->i_cm_id);
735 if (err) {
736 /* Actually this may happen quite frequently, when
737 * an outgoing connect raced with an incoming connect.
738 */
739 rdsdebug("failed to disconnect, cm: %p err %d\n",
740 ic->i_cm_id, err);
741 }
742
Andy Grovere32b4a72010-03-03 19:25:21 -0800743 /*
Zach Brownf0460112010-07-14 13:55:35 -0700744 * We want to wait for tx and rx completion to finish
745 * before we tear down the connection, but we have to be
746 * careful not to get stuck waiting on a send ring that
747 * only has unsignaled sends in it. We've shutdown new
748 * sends before getting here so by waiting for signaled
749 * sends to complete we're ensured that there will be no
750 * more tx processing.
Andy Grovere32b4a72010-03-03 19:25:21 -0800751 */
Andy Groverec162272009-02-24 15:30:30 +0000752 wait_event(rds_ib_ring_empty_wait,
Zach Brownf0460112010-07-14 13:55:35 -0700753 rds_ib_ring_empty(&ic->i_recv_ring) &&
754 (atomic_read(&ic->i_signaled_sends) == 0));
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400755 tasklet_kill(&ic->i_send_tasklet);
Zach Brownf0460112010-07-14 13:55:35 -0700756 tasklet_kill(&ic->i_recv_tasklet);
Andy Groverec162272009-02-24 15:30:30 +0000757
santosh.shilimkar@oracle.com1bc7b8632015-08-22 15:45:24 -0700758 /* first destroy the ib state that generates callbacks */
759 if (ic->i_cm_id->qp)
760 rdma_destroy_qp(ic->i_cm_id);
761 if (ic->i_send_cq)
762 ib_destroy_cq(ic->i_send_cq);
763 if (ic->i_recv_cq)
764 ib_destroy_cq(ic->i_recv_cq);
santosh.shilimkar@oracle.com1bc7b8632015-08-22 15:45:24 -0700765
766 /* then free the resources that ib callbacks use */
Andy Groverec162272009-02-24 15:30:30 +0000767 if (ic->i_send_hdrs)
768 ib_dma_free_coherent(dev,
769 ic->i_send_ring.w_nr *
770 sizeof(struct rds_header),
771 ic->i_send_hdrs,
772 ic->i_send_hdrs_dma);
773
774 if (ic->i_recv_hdrs)
775 ib_dma_free_coherent(dev,
776 ic->i_recv_ring.w_nr *
777 sizeof(struct rds_header),
778 ic->i_recv_hdrs,
779 ic->i_recv_hdrs_dma);
780
781 if (ic->i_ack)
782 ib_dma_free_coherent(dev, sizeof(struct rds_header),
783 ic->i_ack, ic->i_ack_dma);
784
785 if (ic->i_sends)
786 rds_ib_send_clear_ring(ic);
787 if (ic->i_recvs)
788 rds_ib_recv_clear_ring(ic);
789
Santosh Shilimkar1c3be622015-08-22 15:45:32 -0700790 rdma_destroy_id(ic->i_cm_id);
791
Andy Groverec162272009-02-24 15:30:30 +0000792 /*
793 * Move connection back to the nodev list.
794 */
Andy Grover745cbcc2009-04-01 08:20:19 +0000795 if (ic->rds_ibdev)
796 rds_ib_remove_conn(ic->rds_ibdev, conn);
Andy Groverec162272009-02-24 15:30:30 +0000797
798 ic->i_cm_id = NULL;
799 ic->i_pd = NULL;
Andy Groverec162272009-02-24 15:30:30 +0000800 ic->i_send_cq = NULL;
801 ic->i_recv_cq = NULL;
802 ic->i_send_hdrs = NULL;
803 ic->i_recv_hdrs = NULL;
804 ic->i_ack = NULL;
805 }
806 BUG_ON(ic->rds_ibdev);
807
808 /* Clear pending transmit */
Andy Groverff3d7d32010-03-01 14:03:09 -0800809 if (ic->i_data_op) {
810 struct rds_message *rm;
811
812 rm = container_of(ic->i_data_op, struct rds_message, data);
813 rds_message_put(rm);
814 ic->i_data_op = NULL;
Andy Groverec162272009-02-24 15:30:30 +0000815 }
816
817 /* Clear the ACK state */
818 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
Andy Grover8cbd9602009-04-01 08:20:20 +0000819#ifdef KERNEL_HAS_ATOMIC64
820 atomic64_set(&ic->i_ack_next, 0);
821#else
822 ic->i_ack_next = 0;
823#endif
Andy Groverec162272009-02-24 15:30:30 +0000824 ic->i_ack_recv = 0;
825
826 /* Clear flow control state */
827 ic->i_flowctl = 0;
828 atomic_set(&ic->i_credits, 0);
829
830 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
831 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
832
833 if (ic->i_ibinc) {
834 rds_inc_put(&ic->i_ibinc->ii_inc);
835 ic->i_ibinc = NULL;
836 }
837
838 vfree(ic->i_sends);
839 ic->i_sends = NULL;
840 vfree(ic->i_recvs);
841 ic->i_recvs = NULL;
842}
843
844int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
845{
846 struct rds_ib_connection *ic;
847 unsigned long flags;
Chris Mason33244122010-05-26 22:05:37 -0700848 int ret;
Andy Groverec162272009-02-24 15:30:30 +0000849
850 /* XXX too lazy? */
Dan Carpenterf0229ea2012-03-21 20:44:09 +0000851 ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800852 if (!ic)
Andy Groverec162272009-02-24 15:30:30 +0000853 return -ENOMEM;
854
Chris Mason33244122010-05-26 22:05:37 -0700855 ret = rds_ib_recv_alloc_caches(ic);
856 if (ret) {
857 kfree(ic);
858 return ret;
859 }
860
Andy Groverec162272009-02-24 15:30:30 +0000861 INIT_LIST_HEAD(&ic->ib_node);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400862 tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
863 (unsigned long)ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400864 tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400865 (unsigned long)ic);
Andy Groverec162272009-02-24 15:30:30 +0000866 mutex_init(&ic->i_recv_mutex);
Andy Grover8cbd9602009-04-01 08:20:20 +0000867#ifndef KERNEL_HAS_ATOMIC64
868 spin_lock_init(&ic->i_ack_lock);
869#endif
Zach Brownf0460112010-07-14 13:55:35 -0700870 atomic_set(&ic->i_signaled_sends, 0);
Andy Groverec162272009-02-24 15:30:30 +0000871
872 /*
873 * rds_ib_conn_shutdown() waits for these to be emptied so they
874 * must be initialized before it can be called.
875 */
876 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
877 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
878
879 ic->conn = conn;
880 conn->c_transport_data = ic;
881
882 spin_lock_irqsave(&ib_nodev_conns_lock, flags);
883 list_add_tail(&ic->ib_node, &ib_nodev_conns);
884 spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
885
886
887 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
888 return 0;
889}
890
Andy Grover745cbcc2009-04-01 08:20:19 +0000891/*
892 * Free a connection. Connection must be shut down and not set for reconnect.
893 */
Andy Groverec162272009-02-24 15:30:30 +0000894void rds_ib_conn_free(void *arg)
895{
896 struct rds_ib_connection *ic = arg;
Andy Grover745cbcc2009-04-01 08:20:19 +0000897 spinlock_t *lock_ptr;
898
Andy Groverec162272009-02-24 15:30:30 +0000899 rdsdebug("ic %p\n", ic);
Andy Grover745cbcc2009-04-01 08:20:19 +0000900
901 /*
902 * Conn is either on a dev's list or on the nodev list.
903 * A race with shutdown() or connect() would cause problems
904 * (since rds_ibdev would change) but that should never happen.
905 */
906 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
907
908 spin_lock_irq(lock_ptr);
Andy Groverec162272009-02-24 15:30:30 +0000909 list_del(&ic->ib_node);
Andy Grover745cbcc2009-04-01 08:20:19 +0000910 spin_unlock_irq(lock_ptr);
911
Chris Mason33244122010-05-26 22:05:37 -0700912 rds_ib_recv_free_caches(ic);
913
Andy Groverec162272009-02-24 15:30:30 +0000914 kfree(ic);
915}
916
917
918/*
919 * An error occurred on the connection
920 */
921void
922__rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
923{
924 va_list ap;
925
926 rds_conn_drop(conn);
927
928 va_start(ap, fmt);
929 vprintk(fmt, ap);
930 va_end(ap);
931}