blob: 3c3b00e4e7af9b0b16d1c87ac5d27ef4e0d39488 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
Steve Wise9eccfe12014-03-26 17:08:09 -05002 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
Steve Wisecfdda9d2010-04-21 15:30:06 -07003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
Vipul Pandya1cab7752012-12-10 09:30:55 +000041#include <linux/if_vlan.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070042
43#include <net/neighbour.h>
44#include <net/netevent.h>
45#include <net/route.h>
Vipul Pandya1cab7752012-12-10 09:30:55 +000046#include <net/tcp.h>
Vipul Pandya830662f2013-07-04 16:10:47 +053047#include <net/ip6_route.h>
48#include <net/addrconf.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070049
Steve Wise11b8e222014-05-16 12:42:46 -050050#include <rdma/ib_addr.h>
51
Steve Wisecfdda9d2010-04-21 15:30:06 -070052#include "iw_cxgb4.h"
53
54static char *states[] = {
55 "idle",
56 "listen",
57 "connecting",
58 "mpa_wait_req",
59 "mpa_req_sent",
60 "mpa_req_rcvd",
61 "mpa_rep_sent",
62 "fpdu_mode",
63 "aborting",
64 "closing",
65 "moribund",
66 "dead",
67 NULL,
68};
69
Vipul Pandya5be78ee2012-12-10 09:30:54 +000070static int nocong;
71module_param(nocong, int, 0644);
72MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
73
74static int enable_ecn;
75module_param(enable_ecn, int, 0644);
76MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
77
Steve Wiseb52fe092011-03-11 22:30:01 +000078static int dack_mode = 1;
Steve Wiseba6d3922010-06-23 15:46:49 +000079module_param(dack_mode, int, 0644);
Steve Wiseb52fe092011-03-11 22:30:01 +000080MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
Steve Wiseba6d3922010-06-23 15:46:49 +000081
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053082uint c4iw_max_read_depth = 32;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -070083module_param(c4iw_max_read_depth, int, 0644);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053084MODULE_PARM_DESC(c4iw_max_read_depth,
85 "Per-connection max ORD/IRD (default=32)");
Roland Dreierbe4c9ba2010-05-05 14:45:40 -070086
Steve Wisecfdda9d2010-04-21 15:30:06 -070087static int enable_tcp_timestamps;
88module_param(enable_tcp_timestamps, int, 0644);
89MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
90
91static int enable_tcp_sack;
92module_param(enable_tcp_sack, int, 0644);
93MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
94
95static int enable_tcp_window_scaling = 1;
96module_param(enable_tcp_window_scaling, int, 0644);
97MODULE_PARM_DESC(enable_tcp_window_scaling,
98 "Enable tcp window scaling (default=1)");
99
100int c4iw_debug;
101module_param(c4iw_debug, int, 0644);
102MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
103
Steve Wisedf2d5132014-03-19 17:44:44 +0530104static int peer2peer = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700105module_param(peer2peer, int, 0644);
Steve Wisedf2d5132014-03-19 17:44:44 +0530106MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700107
108static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
109module_param(p2p_type, int, 0644);
110MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
111 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
112
113static int ep_timeout_secs = 60;
114module_param(ep_timeout_secs, int, 0644);
115MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
116 "in seconds (default=60)");
117
118static int mpa_rev = 1;
119module_param(mpa_rev, int, 0644);
120MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530121 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
122 " compliant (default=1)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700123
124static int markers_enabled;
125module_param(markers_enabled, int, 0644);
126MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
127
128static int crc_enabled = 1;
129module_param(crc_enabled, int, 0644);
130MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
131
132static int rcv_win = 256 * 1024;
133module_param(rcv_win, int, 0644);
134MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
135
Steve Wise98ae68b2010-09-10 11:15:41 -0500136static int snd_win = 128 * 1024;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700137module_param(snd_win, int, 0644);
Steve Wise98ae68b2010-09-10 11:15:41 -0500138MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700139
Steve Wisecfdda9d2010-04-21 15:30:06 -0700140static struct workqueue_struct *workq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700141
142static struct sk_buff_head rxq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700143
144static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
145static void ep_timeout(unsigned long arg);
146static void connect_reply_upcall(struct c4iw_ep *ep, int status);
147
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700148static LIST_HEAD(timeout_list);
149static spinlock_t timeout_lock;
150
Vipul Pandya325abea2013-01-07 13:11:53 +0000151static void deref_qp(struct c4iw_ep *ep)
152{
153 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
154 clear_bit(QP_REFERENCED, &ep->com.flags);
155}
156
157static void ref_qp(struct c4iw_ep *ep)
158{
159 set_bit(QP_REFERENCED, &ep->com.flags);
160 c4iw_qp_add_ref(&ep->com.qp->ibqp);
161}
162
Steve Wisecfdda9d2010-04-21 15:30:06 -0700163static void start_ep_timer(struct c4iw_ep *ep)
164{
165 PDBG("%s ep %p\n", __func__, ep);
166 if (timer_pending(&ep->timer)) {
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000167 pr_err("%s timer already started! ep %p\n",
168 __func__, ep);
169 return;
170 }
171 clear_bit(TIMEOUT, &ep->com.flags);
172 c4iw_get_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700173 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
174 ep->timer.data = (unsigned long)ep;
175 ep->timer.function = ep_timeout;
176 add_timer(&ep->timer);
177}
178
Steve Wiseb33bd0c2014-04-09 09:38:25 -0500179static int stop_ep_timer(struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700180{
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000181 PDBG("%s ep %p stopping\n", __func__, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700182 del_timer_sync(&ep->timer);
Steve Wiseb33bd0c2014-04-09 09:38:25 -0500183 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000184 c4iw_put_ep(&ep->com);
Steve Wiseb33bd0c2014-04-09 09:38:25 -0500185 return 0;
186 }
187 return 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700188}
189
190static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
191 struct l2t_entry *l2e)
192{
193 int error = 0;
194
195 if (c4iw_fatal_error(rdev)) {
196 kfree_skb(skb);
197 PDBG("%s - device in error state - dropping\n", __func__);
198 return -EIO;
199 }
200 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
201 if (error < 0)
202 kfree_skb(skb);
Steve Wise74594862010-09-10 11:14:58 -0500203 return error < 0 ? error : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700204}
205
206int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
207{
208 int error = 0;
209
210 if (c4iw_fatal_error(rdev)) {
211 kfree_skb(skb);
212 PDBG("%s - device in error state - dropping\n", __func__);
213 return -EIO;
214 }
215 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
216 if (error < 0)
217 kfree_skb(skb);
Steve Wise74594862010-09-10 11:14:58 -0500218 return error < 0 ? error : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700219}
220
221static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
222{
223 struct cpl_tid_release *req;
224
225 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
226 if (!skb)
227 return;
228 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
229 INIT_TP_WR(req, hwtid);
230 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
231 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
232 c4iw_ofld_send(rdev, skb);
233 return;
234}
235
236static void set_emss(struct c4iw_ep *ep, u16 opt)
237{
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800238 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
Hariprasad S04524a42014-09-24 03:53:41 +0530239 ((AF_INET == ep->com.remote_addr.ss_family) ?
240 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
241 sizeof(struct tcphdr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700242 ep->mss = ep->emss;
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800243 if (TCPOPT_TSTAMP_G(opt))
Hariprasad S04524a42014-09-24 03:53:41 +0530244 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700245 if (ep->emss < 128)
246 ep->emss = 128;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530247 if (ep->emss & 7)
248 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800249 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
250 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
Steve Wisecfdda9d2010-04-21 15:30:06 -0700251 ep->mss, ep->emss);
252}
253
254static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
255{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700256 enum c4iw_ep_state state;
257
Steve Wise2f5b48c2010-09-10 11:15:36 -0500258 mutex_lock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700259 state = epc->state;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500260 mutex_unlock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700261 return state;
262}
263
264static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
265{
266 epc->state = new;
267}
268
269static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
270{
Steve Wise2f5b48c2010-09-10 11:15:36 -0500271 mutex_lock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700272 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
273 __state_set(epc, new);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500274 mutex_unlock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700275 return;
276}
277
278static void *alloc_ep(int size, gfp_t gfp)
279{
280 struct c4iw_ep_common *epc;
281
282 epc = kzalloc(size, gfp);
283 if (epc) {
284 kref_init(&epc->kref);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500285 mutex_init(&epc->mutex);
Steve Wiseaadc4df2010-09-10 11:15:25 -0500286 c4iw_init_wr_wait(&epc->wr_wait);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700287 }
288 PDBG("%s alloc ep %p\n", __func__, epc);
289 return epc;
290}
291
292void _c4iw_free_ep(struct kref *kref)
293{
294 struct c4iw_ep *ep;
295
296 ep = container_of(kref, struct c4iw_ep, com.kref);
297 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
Vipul Pandya325abea2013-01-07 13:11:53 +0000298 if (test_bit(QP_REFERENCED, &ep->com.flags))
299 deref_qp(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700300 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
Vipul Pandyafe7e0a42013-01-07 13:11:57 +0000301 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700302 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
303 dst_release(ep->dst);
304 cxgb4_l2t_release(ep->l2t);
305 }
Steve Wise9eccfe12014-03-26 17:08:09 -0500306 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
307 print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
308 iwpm_remove_mapinfo(&ep->com.local_addr,
309 &ep->com.mapped_local_addr);
310 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
311 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700312 kfree(ep);
313}
314
315static void release_ep_resources(struct c4iw_ep *ep)
316{
317 set_bit(RELEASE_RESOURCES, &ep->com.flags);
318 c4iw_put_ep(&ep->com);
319}
320
Steve Wisecfdda9d2010-04-21 15:30:06 -0700321static int status2errno(int status)
322{
323 switch (status) {
324 case CPL_ERR_NONE:
325 return 0;
326 case CPL_ERR_CONN_RESET:
327 return -ECONNRESET;
328 case CPL_ERR_ARP_MISS:
329 return -EHOSTUNREACH;
330 case CPL_ERR_CONN_TIMEDOUT:
331 return -ETIMEDOUT;
332 case CPL_ERR_TCAM_FULL:
333 return -ENOMEM;
334 case CPL_ERR_CONN_EXIST:
335 return -EADDRINUSE;
336 default:
337 return -EIO;
338 }
339}
340
341/*
342 * Try and reuse skbs already allocated...
343 */
344static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
345{
346 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
347 skb_trim(skb, 0);
348 skb_get(skb);
349 skb_reset_transport_header(skb);
350 } else {
351 skb = alloc_skb(len, gfp);
352 }
Steve Wiseb38a0ad2013-08-06 21:04:37 +0530353 t4_set_arp_err_handler(skb, NULL, NULL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700354 return skb;
355}
356
Vipul Pandya830662f2013-07-04 16:10:47 +0530357static struct net_device *get_real_dev(struct net_device *egress_dev)
358{
Steve Wise11b8e222014-05-16 12:42:46 -0500359 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
Vipul Pandya830662f2013-07-04 16:10:47 +0530360}
361
362static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
363{
364 int i;
365
366 egress_dev = get_real_dev(egress_dev);
367 for (i = 0; i < dev->rdev.lldi.nports; i++)
368 if (dev->rdev.lldi.ports[i] == egress_dev)
369 return 1;
370 return 0;
371}
372
373static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
374 __u8 *peer_ip, __be16 local_port,
375 __be16 peer_port, u8 tos,
376 __u32 sin6_scope_id)
377{
378 struct dst_entry *dst = NULL;
379
380 if (IS_ENABLED(CONFIG_IPV6)) {
381 struct flowi6 fl6;
382
383 memset(&fl6, 0, sizeof(fl6));
384 memcpy(&fl6.daddr, peer_ip, 16);
385 memcpy(&fl6.saddr, local_ip, 16);
386 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
387 fl6.flowi6_oif = sin6_scope_id;
388 dst = ip6_route_output(&init_net, NULL, &fl6);
389 if (!dst)
390 goto out;
391 if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
392 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
393 dst_release(dst);
394 dst = NULL;
395 }
396 }
397
398out:
399 return dst;
400}
401
402static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700403 __be32 peer_ip, __be16 local_port,
404 __be16 peer_port, u8 tos)
405{
406 struct rtable *rt;
David S. Miller31e4543d2011-05-03 20:25:42 -0700407 struct flowi4 fl4;
Vipul Pandya830662f2013-07-04 16:10:47 +0530408 struct neighbour *n;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700409
David S. Miller31e4543d2011-05-03 20:25:42 -0700410 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
David S. Miller78fbfd82011-03-12 00:00:52 -0500411 peer_port, local_port, IPPROTO_TCP,
412 tos, 0);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800413 if (IS_ERR(rt))
Steve Wisecfdda9d2010-04-21 15:30:06 -0700414 return NULL;
Vipul Pandya830662f2013-07-04 16:10:47 +0530415 n = dst_neigh_lookup(&rt->dst, &peer_ip);
416 if (!n)
417 return NULL;
Steve Wisef8e81902014-03-19 17:44:39 +0530418 if (!our_interface(dev, n->dev) &&
419 !(n->dev->flags & IFF_LOOPBACK)) {
Hariprasad Sd4802012014-09-24 03:53:42 +0530420 neigh_release(n);
Vipul Pandya830662f2013-07-04 16:10:47 +0530421 dst_release(&rt->dst);
422 return NULL;
423 }
424 neigh_release(n);
425 return &rt->dst;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700426}
427
428static void arp_failure_discard(void *handle, struct sk_buff *skb)
429{
430 PDBG("%s c4iw_dev %p\n", __func__, handle);
431 kfree_skb(skb);
432}
433
434/*
435 * Handle an ARP failure for an active open.
436 */
437static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
438{
Hariprasad S5dab6d32014-06-23 19:12:36 +0530439 struct c4iw_ep *ep = handle;
440
Steve Wisecfdda9d2010-04-21 15:30:06 -0700441 printk(KERN_ERR MOD "ARP failure duing connect\n");
442 kfree_skb(skb);
Hariprasad S5dab6d32014-06-23 19:12:36 +0530443 connect_reply_upcall(ep, -EHOSTUNREACH);
444 state_set(&ep->com, DEAD);
445 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
446 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
447 dst_release(ep->dst);
448 cxgb4_l2t_release(ep->l2t);
449 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700450}
451
452/*
453 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
454 * and send it along.
455 */
456static void abort_arp_failure(void *handle, struct sk_buff *skb)
457{
458 struct c4iw_rdev *rdev = handle;
459 struct cpl_abort_req *req = cplhdr(skb);
460
461 PDBG("%s rdev %p\n", __func__, rdev);
462 req->cmd = CPL_ABORT_NO_RST;
463 c4iw_ofld_send(rdev, skb);
464}
465
466static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
467{
468 unsigned int flowclen = 80;
469 struct fw_flowc_wr *flowc;
470 int i;
471
472 skb = get_skb(skb, flowclen, GFP_KERNEL);
473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
474
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
476 FW_FLOWC_WR_NPARAMS_V(8));
477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
478 16)) | FW_WR_FLOWID_V(ep->hwtid));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700479
480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
Hariprasad Shenai51678652014-11-21 12:52:02 +0530481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
Hariprasad Shenai35b1de52014-06-27 19:23:47 +0530482 (ep->com.dev->rdev.lldi.pf));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700483 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
484 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
485 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
486 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
487 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
488 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
489 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
490 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
491 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
492 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
493 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530494 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700495 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
496 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
497 /* Pad WR to 16 byte boundary */
498 flowc->mnemval[8].mnemonic = 0;
499 flowc->mnemval[8].val = 0;
500 for (i = 0; i < 9; i++) {
501 flowc->mnemval[i].r4[0] = 0;
502 flowc->mnemval[i].r4[1] = 0;
503 flowc->mnemval[i].r4[2] = 0;
504 }
505
506 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
507 c4iw_ofld_send(&ep->com.dev->rdev, skb);
508}
509
510static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
511{
512 struct cpl_close_con_req *req;
513 struct sk_buff *skb;
514 int wrlen = roundup(sizeof *req, 16);
515
516 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
517 skb = get_skb(NULL, wrlen, gfp);
518 if (!skb) {
519 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
520 return -ENOMEM;
521 }
522 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
523 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
524 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
525 memset(req, 0, wrlen);
526 INIT_TP_WR(req, ep->hwtid);
527 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
528 ep->hwtid));
529 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
530}
531
532static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
533{
534 struct cpl_abort_req *req;
535 int wrlen = roundup(sizeof *req, 16);
536
537 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
538 skb = get_skb(skb, wrlen, gfp);
539 if (!skb) {
540 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
541 __func__);
542 return -ENOMEM;
543 }
544 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
545 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
546 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
547 memset(req, 0, wrlen);
548 INIT_TP_WR(req, ep->hwtid);
549 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
550 req->cmd = CPL_ABORT_SEND_RST;
551 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
552}
553
Steve Wise9eccfe12014-03-26 17:08:09 -0500554/*
555 * c4iw_form_pm_msg - Form a port mapper message with mapping info
556 */
557static void c4iw_form_pm_msg(struct c4iw_ep *ep,
558 struct iwpm_sa_data *pm_msg)
559{
560 memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
561 sizeof(ep->com.local_addr));
562 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
563 sizeof(ep->com.remote_addr));
564}
565
566/*
567 * c4iw_form_reg_msg - Form a port mapper message with dev info
568 */
569static void c4iw_form_reg_msg(struct c4iw_dev *dev,
570 struct iwpm_dev_data *pm_msg)
571{
572 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
573 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
574 IWPM_IFNAME_SIZE);
575}
576
577static void c4iw_record_pm_msg(struct c4iw_ep *ep,
578 struct iwpm_sa_data *pm_msg)
579{
580 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
581 sizeof(ep->com.mapped_local_addr));
582 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
583 sizeof(ep->com.mapped_remote_addr));
584}
585
Steve Wise5b6b8fe2015-04-21 16:28:41 -0400586static int get_remote_addr(struct c4iw_ep *ep)
587{
588 int ret;
589
590 print_addr(&ep->com, __func__, "get_remote_addr");
591
592 ret = iwpm_get_remote_info(&ep->com.mapped_local_addr,
593 &ep->com.mapped_remote_addr,
594 &ep->com.remote_addr, RDMA_NL_C4IW);
595 if (ret)
596 pr_info(MOD "Unable to find remote peer addr info - err %d\n",
597 ret);
598
599 return ret;
600}
601
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530602static void best_mtu(const unsigned short *mtus, unsigned short mtu,
Hariprasad S04524a42014-09-24 03:53:41 +0530603 unsigned int *idx, int use_ts, int ipv6)
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530604{
Hariprasad S04524a42014-09-24 03:53:41 +0530605 unsigned short hdr_size = (ipv6 ?
606 sizeof(struct ipv6hdr) :
607 sizeof(struct iphdr)) +
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530608 sizeof(struct tcphdr) +
Hariprasad S04524a42014-09-24 03:53:41 +0530609 (use_ts ?
610 round_up(TCPOLEN_TIMESTAMP, 4) : 0);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530611 unsigned short data_size = mtu - hdr_size;
612
613 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
614}
615
Steve Wisecfdda9d2010-04-21 15:30:06 -0700616static int send_connect(struct c4iw_ep *ep)
617{
618 struct cpl_act_open_req *req;
Vipul Pandyaf079af72013-03-14 05:08:58 +0000619 struct cpl_t5_act_open_req *t5_req;
Vipul Pandya830662f2013-07-04 16:10:47 +0530620 struct cpl_act_open_req6 *req6;
621 struct cpl_t5_act_open_req6 *t5_req6;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700622 struct sk_buff *skb;
623 u64 opt0;
624 u32 opt2;
625 unsigned int mtu_idx;
626 int wscale;
Vipul Pandya830662f2013-07-04 16:10:47 +0530627 int wrlen;
628 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
629 sizeof(struct cpl_act_open_req) :
630 sizeof(struct cpl_t5_act_open_req);
631 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
632 sizeof(struct cpl_act_open_req6) :
633 sizeof(struct cpl_t5_act_open_req6);
Steve Wise9eccfe12014-03-26 17:08:09 -0500634 struct sockaddr_in *la = (struct sockaddr_in *)
635 &ep->com.mapped_local_addr;
636 struct sockaddr_in *ra = (struct sockaddr_in *)
637 &ep->com.mapped_remote_addr;
638 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
639 &ep->com.mapped_local_addr;
640 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
641 &ep->com.mapped_remote_addr;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530642 int win;
Vipul Pandya830662f2013-07-04 16:10:47 +0530643
644 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
645 roundup(sizev4, 16) :
646 roundup(sizev6, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700647
648 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
649
650 skb = get_skb(NULL, wrlen, GFP_KERNEL);
651 if (!skb) {
652 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
653 __func__);
654 return -ENOMEM;
655 }
Steve Wised4f1a5c2010-07-23 19:12:32 +0000656 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700657
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530658 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
Hariprasad S04524a42014-09-24 03:53:41 +0530659 enable_tcp_timestamps,
660 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700661 wscale = compute_wscale(rcv_win);
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530662
663 /*
664 * Specify the largest window that will fit in opt0. The
665 * remainder will be specified in the rx_data_ack.
666 */
667 win = ep->rcv_win >> 10;
Anish Bhattd7990b02014-11-12 17:15:57 -0800668 if (win > RCV_BUFSIZ_M)
669 win = RCV_BUFSIZ_M;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530670
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800671 opt0 = (nocong ? NO_CONG_F : 0) |
Anish Bhattd7990b02014-11-12 17:15:57 -0800672 KEEP_ALIVE_F |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800673 DELACK_F |
Anish Bhattd7990b02014-11-12 17:15:57 -0800674 WND_SCALE_V(wscale) |
675 MSS_IDX_V(mtu_idx) |
676 L2T_IDX_V(ep->l2t->idx) |
677 TX_CHAN_V(ep->tx_chan) |
678 SMAC_SEL_V(ep->smac_idx) |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800679 DSCP_V(ep->tos) |
Anish Bhattd7990b02014-11-12 17:15:57 -0800680 ULP_MODE_V(ULP_MODE_TCPDDP) |
681 RCV_BUFSIZ_V(win);
682 opt2 = RX_CHANNEL_V(0) |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800683 CCTRL_ECN_V(enable_ecn) |
Anish Bhattd7990b02014-11-12 17:15:57 -0800684 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700685 if (enable_tcp_timestamps)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800686 opt2 |= TSTAMPS_EN_F;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700687 if (enable_tcp_sack)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -0800688 opt2 |= SACK_EN_F;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700689 if (wscale && enable_tcp_window_scaling)
Anish Bhattd7990b02014-11-12 17:15:57 -0800690 opt2 |= WND_SCALE_EN_F;
Steve Wise92e50112014-04-24 14:31:59 -0500691 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
Anish Bhattd7990b02014-11-12 17:15:57 -0800692 opt2 |= T5_OPT_2_VALID_F;
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530693 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
Hariprasad S0b741042015-04-22 01:44:58 +0530694 opt2 |= T5_ISS_F;
Steve Wise92e50112014-04-24 14:31:59 -0500695 }
Hariprasad S5dab6d32014-06-23 19:12:36 +0530696 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700697
Vipul Pandyaf079af72013-03-14 05:08:58 +0000698 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
Vipul Pandya830662f2013-07-04 16:10:47 +0530699 if (ep->com.remote_addr.ss_family == AF_INET) {
700 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
701 INIT_TP_WR(req, 0);
702 OPCODE_TID(req) = cpu_to_be32(
Vipul Pandyaf079af72013-03-14 05:08:58 +0000703 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
704 ((ep->rss_qid << 14) | ep->atid)));
Vipul Pandya830662f2013-07-04 16:10:47 +0530705 req->local_port = la->sin_port;
706 req->peer_port = ra->sin_port;
707 req->local_ip = la->sin_addr.s_addr;
708 req->peer_ip = ra->sin_addr.s_addr;
709 req->opt0 = cpu_to_be64(opt0);
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530710 req->params = cpu_to_be32(cxgb4_select_ntuple(
711 ep->com.dev->rdev.lldi.ports[0],
712 ep->l2t));
Vipul Pandya830662f2013-07-04 16:10:47 +0530713 req->opt2 = cpu_to_be32(opt2);
714 } else {
715 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
716
717 INIT_TP_WR(req6, 0);
718 OPCODE_TID(req6) = cpu_to_be32(
719 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
720 ((ep->rss_qid<<14)|ep->atid)));
721 req6->local_port = la6->sin6_port;
722 req6->peer_port = ra6->sin6_port;
723 req6->local_ip_hi = *((__be64 *)
724 (la6->sin6_addr.s6_addr));
725 req6->local_ip_lo = *((__be64 *)
726 (la6->sin6_addr.s6_addr + 8));
727 req6->peer_ip_hi = *((__be64 *)
728 (ra6->sin6_addr.s6_addr));
729 req6->peer_ip_lo = *((__be64 *)
730 (ra6->sin6_addr.s6_addr + 8));
731 req6->opt0 = cpu_to_be64(opt0);
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530732 req6->params = cpu_to_be32(cxgb4_select_ntuple(
733 ep->com.dev->rdev.lldi.ports[0],
734 ep->l2t));
Vipul Pandya830662f2013-07-04 16:10:47 +0530735 req6->opt2 = cpu_to_be32(opt2);
736 }
737 } else {
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530738 u32 isn = (prandom_u32() & ~7UL) - 1;
739
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530740 if (peer2peer)
741 isn += 4;
742
Vipul Pandya830662f2013-07-04 16:10:47 +0530743 if (ep->com.remote_addr.ss_family == AF_INET) {
744 t5_req = (struct cpl_t5_act_open_req *)
745 skb_put(skb, wrlen);
746 INIT_TP_WR(t5_req, 0);
747 OPCODE_TID(t5_req) = cpu_to_be32(
748 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
749 ((ep->rss_qid << 14) | ep->atid)));
750 t5_req->local_port = la->sin_port;
751 t5_req->peer_port = ra->sin_port;
752 t5_req->local_ip = la->sin_addr.s_addr;
753 t5_req->peer_ip = ra->sin_addr.s_addr;
754 t5_req->opt0 = cpu_to_be64(opt0);
Anish Bhattd7990b02014-11-12 17:15:57 -0800755 t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530756 cxgb4_select_ntuple(
757 ep->com.dev->rdev.lldi.ports[0],
758 ep->l2t)));
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530759 t5_req->rsvd = cpu_to_be32(isn);
760 PDBG("%s snd_isn %u\n", __func__,
761 be32_to_cpu(t5_req->rsvd));
Vipul Pandya830662f2013-07-04 16:10:47 +0530762 t5_req->opt2 = cpu_to_be32(opt2);
763 } else {
764 t5_req6 = (struct cpl_t5_act_open_req6 *)
765 skb_put(skb, wrlen);
766 INIT_TP_WR(t5_req6, 0);
767 OPCODE_TID(t5_req6) = cpu_to_be32(
768 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
769 ((ep->rss_qid<<14)|ep->atid)));
770 t5_req6->local_port = la6->sin6_port;
771 t5_req6->peer_port = ra6->sin6_port;
772 t5_req6->local_ip_hi = *((__be64 *)
773 (la6->sin6_addr.s6_addr));
774 t5_req6->local_ip_lo = *((__be64 *)
775 (la6->sin6_addr.s6_addr + 8));
776 t5_req6->peer_ip_hi = *((__be64 *)
777 (ra6->sin6_addr.s6_addr));
778 t5_req6->peer_ip_lo = *((__be64 *)
779 (ra6->sin6_addr.s6_addr + 8));
780 t5_req6->opt0 = cpu_to_be64(opt0);
Anish Bhattd7990b02014-11-12 17:15:57 -0800781 t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530782 cxgb4_select_ntuple(
783 ep->com.dev->rdev.lldi.ports[0],
Hariprasad Sda22b8962014-09-24 03:53:43 +0530784 ep->l2t)));
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530785 t5_req6->rsvd = cpu_to_be32(isn);
786 PDBG("%s snd_isn %u\n", __func__,
787 be32_to_cpu(t5_req6->rsvd));
Vipul Pandya830662f2013-07-04 16:10:47 +0530788 t5_req6->opt2 = cpu_to_be32(opt2);
789 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000790 }
791
Vipul Pandya793dad92012-12-10 09:30:56 +0000792 set_bit(ACT_OPEN_REQ, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700793 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
794}
795
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530796static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
797 u8 mpa_rev_to_use)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700798{
799 int mpalen, wrlen;
800 struct fw_ofld_tx_data_wr *req;
801 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530802 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700803
804 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
805
806 BUG_ON(skb_cloned(skb));
807
808 mpalen = sizeof(*mpa) + ep->plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530809 if (mpa_rev_to_use == 2)
810 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700811 wrlen = roundup(mpalen + sizeof *req, 16);
812 skb = get_skb(skb, wrlen, GFP_KERNEL);
813 if (!skb) {
814 connect_reply_upcall(ep, -ENOMEM);
815 return;
816 }
817 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
818
819 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
820 memset(req, 0, wrlen);
821 req->op_to_immdlen = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530822 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
823 FW_WR_COMPL_F |
824 FW_WR_IMMDLEN_V(mpalen));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700825 req->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530826 FW_WR_FLOWID_V(ep->hwtid) |
827 FW_WR_LEN16_V(wrlen >> 4));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700828 req->plen = cpu_to_be32(mpalen);
829 req->tunnel_to_proxy = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530830 FW_OFLD_TX_DATA_WR_FLUSH_F |
831 FW_OFLD_TX_DATA_WR_SHOVE_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700832
833 mpa = (struct mpa_message *)(req + 1);
834 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
835 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530836 (markers_enabled ? MPA_MARKERS : 0) |
837 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700838 mpa->private_data_size = htons(ep->plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530839 mpa->revision = mpa_rev_to_use;
Kumar Sanghvi01b225e2011-11-28 22:09:15 +0530840 if (mpa_rev_to_use == 1) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530841 ep->tried_with_mpa_v1 = 1;
Kumar Sanghvi01b225e2011-11-28 22:09:15 +0530842 ep->retry_with_mpa_v1 = 0;
843 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700844
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530845 if (mpa_rev_to_use == 2) {
Roland Dreierf747c342012-07-05 14:16:54 -0700846 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
847 sizeof (struct mpa_v2_conn_params));
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +0530848 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
849 ep->ord);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530850 mpa_v2_params.ird = htons((u16)ep->ird);
851 mpa_v2_params.ord = htons((u16)ep->ord);
852
853 if (peer2peer) {
854 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
855 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
856 mpa_v2_params.ord |=
857 htons(MPA_V2_RDMA_WRITE_RTR);
858 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
859 mpa_v2_params.ord |=
860 htons(MPA_V2_RDMA_READ_RTR);
861 }
862 memcpy(mpa->private_data, &mpa_v2_params,
863 sizeof(struct mpa_v2_conn_params));
864
865 if (ep->plen)
866 memcpy(mpa->private_data +
867 sizeof(struct mpa_v2_conn_params),
868 ep->mpa_pkt + sizeof(*mpa), ep->plen);
869 } else
870 if (ep->plen)
871 memcpy(mpa->private_data,
872 ep->mpa_pkt + sizeof(*mpa), ep->plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700873
874 /*
875 * Reference the mpa skb. This ensures the data area
876 * will remain in memory until the hw acks the tx.
877 * Function fw4_ack() will deref it.
878 */
879 skb_get(skb);
880 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
881 BUG_ON(ep->mpa_skb);
882 ep->mpa_skb = skb;
883 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
884 start_ep_timer(ep);
Steve Wisea7db89e2014-03-21 20:40:35 +0530885 __state_set(&ep->com, MPA_REQ_SENT);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700886 ep->mpa_attr.initiator = 1;
Steve Wise9c88aa02014-03-21 20:40:34 +0530887 ep->snd_seq += mpalen;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700888 return;
889}
890
891static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
892{
893 int mpalen, wrlen;
894 struct fw_ofld_tx_data_wr *req;
895 struct mpa_message *mpa;
896 struct sk_buff *skb;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530897 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700898
899 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
900
901 mpalen = sizeof(*mpa) + plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530902 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
903 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700904 wrlen = roundup(mpalen + sizeof *req, 16);
905
906 skb = get_skb(NULL, wrlen, GFP_KERNEL);
907 if (!skb) {
908 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
909 return -ENOMEM;
910 }
911 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
912
913 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
914 memset(req, 0, wrlen);
915 req->op_to_immdlen = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530916 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
917 FW_WR_COMPL_F |
918 FW_WR_IMMDLEN_V(mpalen));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700919 req->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530920 FW_WR_FLOWID_V(ep->hwtid) |
921 FW_WR_LEN16_V(wrlen >> 4));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700922 req->plen = cpu_to_be32(mpalen);
923 req->tunnel_to_proxy = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530924 FW_OFLD_TX_DATA_WR_FLUSH_F |
925 FW_OFLD_TX_DATA_WR_SHOVE_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700926
927 mpa = (struct mpa_message *)(req + 1);
928 memset(mpa, 0, sizeof(*mpa));
929 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
930 mpa->flags = MPA_REJECT;
Vipul Pandyafe7e0a42013-01-07 13:11:57 +0000931 mpa->revision = ep->mpa_attr.version;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700932 mpa->private_data_size = htons(plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530933
934 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
935 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
Roland Dreierf747c342012-07-05 14:16:54 -0700936 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
937 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530938 mpa_v2_params.ird = htons(((u16)ep->ird) |
939 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
940 0));
941 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
942 (p2p_type ==
943 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
944 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
945 FW_RI_INIT_P2PTYPE_READ_REQ ?
946 MPA_V2_RDMA_READ_RTR : 0) : 0));
947 memcpy(mpa->private_data, &mpa_v2_params,
948 sizeof(struct mpa_v2_conn_params));
949
950 if (ep->plen)
951 memcpy(mpa->private_data +
952 sizeof(struct mpa_v2_conn_params), pdata, plen);
953 } else
954 if (plen)
955 memcpy(mpa->private_data, pdata, plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700956
957 /*
958 * Reference the mpa skb again. This ensures the data area
959 * will remain in memory until the hw acks the tx.
960 * Function fw4_ack() will deref it.
961 */
962 skb_get(skb);
963 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
964 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
965 BUG_ON(ep->mpa_skb);
966 ep->mpa_skb = skb;
Steve Wise9c88aa02014-03-21 20:40:34 +0530967 ep->snd_seq += mpalen;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700968 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
969}
970
971static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
972{
973 int mpalen, wrlen;
974 struct fw_ofld_tx_data_wr *req;
975 struct mpa_message *mpa;
976 struct sk_buff *skb;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530977 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700978
979 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
980
981 mpalen = sizeof(*mpa) + plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530982 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
983 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700984 wrlen = roundup(mpalen + sizeof *req, 16);
985
986 skb = get_skb(NULL, wrlen, GFP_KERNEL);
987 if (!skb) {
988 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
989 return -ENOMEM;
990 }
991 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
992
993 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
994 memset(req, 0, wrlen);
995 req->op_to_immdlen = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530996 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
997 FW_WR_COMPL_F |
998 FW_WR_IMMDLEN_V(mpalen));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700999 req->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301000 FW_WR_FLOWID_V(ep->hwtid) |
1001 FW_WR_LEN16_V(wrlen >> 4));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001002 req->plen = cpu_to_be32(mpalen);
1003 req->tunnel_to_proxy = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301004 FW_OFLD_TX_DATA_WR_FLUSH_F |
1005 FW_OFLD_TX_DATA_WR_SHOVE_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001006
1007 mpa = (struct mpa_message *)(req + 1);
1008 memset(mpa, 0, sizeof(*mpa));
1009 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1010 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1011 (markers_enabled ? MPA_MARKERS : 0);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301012 mpa->revision = ep->mpa_attr.version;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001013 mpa->private_data_size = htons(plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301014
1015 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1016 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
Roland Dreierf747c342012-07-05 14:16:54 -07001017 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1018 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301019 mpa_v2_params.ird = htons((u16)ep->ird);
1020 mpa_v2_params.ord = htons((u16)ep->ord);
1021 if (peer2peer && (ep->mpa_attr.p2p_type !=
1022 FW_RI_INIT_P2PTYPE_DISABLED)) {
1023 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1024
1025 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1026 mpa_v2_params.ord |=
1027 htons(MPA_V2_RDMA_WRITE_RTR);
1028 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1029 mpa_v2_params.ord |=
1030 htons(MPA_V2_RDMA_READ_RTR);
1031 }
1032
1033 memcpy(mpa->private_data, &mpa_v2_params,
1034 sizeof(struct mpa_v2_conn_params));
1035
1036 if (ep->plen)
1037 memcpy(mpa->private_data +
1038 sizeof(struct mpa_v2_conn_params), pdata, plen);
1039 } else
1040 if (plen)
1041 memcpy(mpa->private_data, pdata, plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001042
1043 /*
1044 * Reference the mpa skb. This ensures the data area
1045 * will remain in memory until the hw acks the tx.
1046 * Function fw4_ack() will deref it.
1047 */
1048 skb_get(skb);
1049 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
1050 ep->mpa_skb = skb;
Steve Wisea7db89e2014-03-21 20:40:35 +05301051 __state_set(&ep->com, MPA_REP_SENT);
Steve Wise9c88aa02014-03-21 20:40:34 +05301052 ep->snd_seq += mpalen;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001053 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1054}
1055
1056static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1057{
1058 struct c4iw_ep *ep;
1059 struct cpl_act_establish *req = cplhdr(skb);
1060 unsigned int tid = GET_TID(req);
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001061 unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001062 struct tid_info *t = dev->rdev.lldi.tids;
1063
1064 ep = lookup_atid(t, atid);
1065
1066 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
1067 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1068
Steve Wisea7db89e2014-03-21 20:40:35 +05301069 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001070 dst_confirm(ep->dst);
1071
1072 /* setup the hwtid for this connection */
1073 ep->hwtid = tid;
1074 cxgb4_insert_tid(t, ep, tid);
Vipul Pandya793dad92012-12-10 09:30:56 +00001075 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001076
1077 ep->snd_seq = be32_to_cpu(req->snd_isn);
1078 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1079
1080 set_emss(ep, ntohs(req->tcp_opt));
1081
1082 /* dealloc the atid */
Vipul Pandya793dad92012-12-10 09:30:56 +00001083 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001084 cxgb4_free_atid(t, atid);
Vipul Pandya793dad92012-12-10 09:30:56 +00001085 set_bit(ACT_ESTAB, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001086
1087 /* start MPA negotiation */
1088 send_flowc(ep, NULL);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301089 if (ep->retry_with_mpa_v1)
1090 send_mpa_req(ep, skb, 1);
1091 else
1092 send_mpa_req(ep, skb, mpa_rev);
Steve Wisea7db89e2014-03-21 20:40:35 +05301093 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001094 return 0;
1095}
1096
Steve Wisebe13b2d2014-03-21 20:40:33 +05301097static void close_complete_upcall(struct c4iw_ep *ep, int status)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001098{
1099 struct iw_cm_event event;
1100
1101 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1102 memset(&event, 0, sizeof(event));
1103 event.event = IW_CM_EVENT_CLOSE;
Steve Wisebe13b2d2014-03-21 20:40:33 +05301104 event.status = status;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001105 if (ep->com.cm_id) {
1106 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1107 ep, ep->com.cm_id, ep->hwtid);
1108 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1109 ep->com.cm_id->rem_ref(ep->com.cm_id);
1110 ep->com.cm_id = NULL;
Vipul Pandya793dad92012-12-10 09:30:56 +00001111 set_bit(CLOSE_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001112 }
1113}
1114
1115static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
1116{
1117 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wisecc18b932014-04-24 14:31:53 -05001118 __state_set(&ep->com, ABORTING);
Vipul Pandya793dad92012-12-10 09:30:56 +00001119 set_bit(ABORT_CONN, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001120 return send_abort(ep, skb, gfp);
1121}
1122
1123static void peer_close_upcall(struct c4iw_ep *ep)
1124{
1125 struct iw_cm_event event;
1126
1127 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1128 memset(&event, 0, sizeof(event));
1129 event.event = IW_CM_EVENT_DISCONNECT;
1130 if (ep->com.cm_id) {
1131 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1132 ep, ep->com.cm_id, ep->hwtid);
1133 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
Vipul Pandya793dad92012-12-10 09:30:56 +00001134 set_bit(DISCONN_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001135 }
1136}
1137
1138static void peer_abort_upcall(struct c4iw_ep *ep)
1139{
1140 struct iw_cm_event event;
1141
1142 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1143 memset(&event, 0, sizeof(event));
1144 event.event = IW_CM_EVENT_CLOSE;
1145 event.status = -ECONNRESET;
1146 if (ep->com.cm_id) {
1147 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1148 ep->com.cm_id, ep->hwtid);
1149 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1150 ep->com.cm_id->rem_ref(ep->com.cm_id);
1151 ep->com.cm_id = NULL;
Vipul Pandya793dad92012-12-10 09:30:56 +00001152 set_bit(ABORT_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001153 }
1154}
1155
1156static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1157{
1158 struct iw_cm_event event;
1159
1160 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1161 memset(&event, 0, sizeof(event));
1162 event.event = IW_CM_EVENT_CONNECT_REPLY;
1163 event.status = status;
Steve Wise24d44a32013-07-04 16:10:44 +05301164 memcpy(&event.local_addr, &ep->com.local_addr,
1165 sizeof(ep->com.local_addr));
1166 memcpy(&event.remote_addr, &ep->com.remote_addr,
1167 sizeof(ep->com.remote_addr));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001168
1169 if ((status == 0) || (status == -ECONNREFUSED)) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301170 if (!ep->tried_with_mpa_v1) {
1171 /* this means MPA_v2 is used */
1172 event.private_data_len = ep->plen -
1173 sizeof(struct mpa_v2_conn_params);
1174 event.private_data = ep->mpa_pkt +
1175 sizeof(struct mpa_message) +
1176 sizeof(struct mpa_v2_conn_params);
1177 } else {
1178 /* this means MPA_v1 is used */
1179 event.private_data_len = ep->plen;
1180 event.private_data = ep->mpa_pkt +
1181 sizeof(struct mpa_message);
1182 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001183 }
Roland Dreier85963e42010-07-19 13:13:09 -07001184
1185 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1186 ep->hwtid, status);
Vipul Pandya793dad92012-12-10 09:30:56 +00001187 set_bit(CONN_RPL_UPCALL, &ep->com.history);
Roland Dreier85963e42010-07-19 13:13:09 -07001188 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1189
Steve Wisecfdda9d2010-04-21 15:30:06 -07001190 if (status < 0) {
1191 ep->com.cm_id->rem_ref(ep->com.cm_id);
1192 ep->com.cm_id = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001193 }
1194}
1195
Steve Wisebe13b2d2014-03-21 20:40:33 +05301196static int connect_request_upcall(struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001197{
1198 struct iw_cm_event event;
Steve Wisebe13b2d2014-03-21 20:40:33 +05301199 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001200
1201 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1202 memset(&event, 0, sizeof(event));
1203 event.event = IW_CM_EVENT_CONNECT_REQUEST;
Steve Wise24d44a32013-07-04 16:10:44 +05301204 memcpy(&event.local_addr, &ep->com.local_addr,
1205 sizeof(ep->com.local_addr));
1206 memcpy(&event.remote_addr, &ep->com.remote_addr,
1207 sizeof(ep->com.remote_addr));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001208 event.provider_data = ep;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301209 if (!ep->tried_with_mpa_v1) {
1210 /* this means MPA_v2 is used */
1211 event.ord = ep->ord;
1212 event.ird = ep->ird;
1213 event.private_data_len = ep->plen -
1214 sizeof(struct mpa_v2_conn_params);
1215 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1216 sizeof(struct mpa_v2_conn_params);
1217 } else {
1218 /* this means MPA_v1 is used. Send max supported */
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301219 event.ord = cur_max_read_depth(ep->com.dev);
1220 event.ird = cur_max_read_depth(ep->com.dev);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301221 event.private_data_len = ep->plen;
1222 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1223 }
Steve Wisebe13b2d2014-03-21 20:40:33 +05301224 c4iw_get_ep(&ep->com);
1225 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1226 &event);
1227 if (ret)
1228 c4iw_put_ep(&ep->com);
Vipul Pandya793dad92012-12-10 09:30:56 +00001229 set_bit(CONNREQ_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001230 c4iw_put_ep(&ep->parent_ep->com);
Steve Wisebe13b2d2014-03-21 20:40:33 +05301231 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001232}
1233
1234static void established_upcall(struct c4iw_ep *ep)
1235{
1236 struct iw_cm_event event;
1237
1238 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1239 memset(&event, 0, sizeof(event));
1240 event.event = IW_CM_EVENT_ESTABLISHED;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301241 event.ird = ep->ird;
1242 event.ord = ep->ord;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001243 if (ep->com.cm_id) {
1244 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1245 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
Vipul Pandya793dad92012-12-10 09:30:56 +00001246 set_bit(ESTAB_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001247 }
1248}
1249
1250static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1251{
1252 struct cpl_rx_data_ack *req;
1253 struct sk_buff *skb;
1254 int wrlen = roundup(sizeof *req, 16);
1255
1256 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1257 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1258 if (!skb) {
1259 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1260 return 0;
1261 }
1262
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301263 /*
1264 * If we couldn't specify the entire rcv window at connection setup
1265 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1266 * then add the overage in to the credits returned.
1267 */
Anish Bhattd7990b02014-11-12 17:15:57 -08001268 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1269 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301270
Steve Wisecfdda9d2010-04-21 15:30:06 -07001271 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1272 memset(req, 0, wrlen);
1273 INIT_TP_WR(req, ep->hwtid);
1274 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1275 ep->hwtid));
Anish Bhattd7990b02014-11-12 17:15:57 -08001276 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05301277 RX_DACK_CHANGE_F |
1278 RX_DACK_MODE_V(dack_mode));
Steve Wised4f1a5c2010-07-23 19:12:32 +00001279 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001280 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1281 return credits;
1282}
1283
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301284#define RELAXED_IRD_NEGOTIATION 1
1285
Steve Wisecc18b932014-04-24 14:31:53 -05001286static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001287{
1288 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301289 struct mpa_v2_conn_params *mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001290 u16 plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301291 u16 resp_ird, resp_ord;
1292 u8 rtr_mismatch = 0, insuff_ird = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001293 struct c4iw_qp_attributes attrs;
1294 enum c4iw_qp_attr_mask mask;
1295 int err;
Steve Wisecc18b932014-04-24 14:31:53 -05001296 int disconnect = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001297
1298 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1299
1300 /*
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001301 * Stop mpa timer. If it expired, then
1302 * we ignore the MPA reply. process_timeout()
1303 * will abort the connection.
Steve Wisecfdda9d2010-04-21 15:30:06 -07001304 */
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001305 if (stop_ep_timer(ep))
Steve Wisecc18b932014-04-24 14:31:53 -05001306 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001307
1308 /*
1309 * If we get more than the supported amount of private data
1310 * then we must fail this connection.
1311 */
1312 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1313 err = -EINVAL;
1314 goto err;
1315 }
1316
1317 /*
1318 * copy the new data into our accumulation buffer.
1319 */
1320 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1321 skb->len);
1322 ep->mpa_pkt_len += skb->len;
1323
1324 /*
1325 * if we don't even have the mpa message, then bail.
1326 */
1327 if (ep->mpa_pkt_len < sizeof(*mpa))
Steve Wisecc18b932014-04-24 14:31:53 -05001328 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001329 mpa = (struct mpa_message *) ep->mpa_pkt;
1330
1331 /* Validate MPA header. */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301332 if (mpa->revision > mpa_rev) {
1333 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1334 " Received = %d\n", __func__, mpa_rev, mpa->revision);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001335 err = -EPROTO;
1336 goto err;
1337 }
1338 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1339 err = -EPROTO;
1340 goto err;
1341 }
1342
1343 plen = ntohs(mpa->private_data_size);
1344
1345 /*
1346 * Fail if there's too much private data.
1347 */
1348 if (plen > MPA_MAX_PRIVATE_DATA) {
1349 err = -EPROTO;
1350 goto err;
1351 }
1352
1353 /*
1354 * If plen does not account for pkt size
1355 */
1356 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1357 err = -EPROTO;
1358 goto err;
1359 }
1360
1361 ep->plen = (u8) plen;
1362
1363 /*
1364 * If we don't have all the pdata yet, then bail.
1365 * We'll continue process when more data arrives.
1366 */
1367 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
Steve Wisecc18b932014-04-24 14:31:53 -05001368 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001369
1370 if (mpa->flags & MPA_REJECT) {
1371 err = -ECONNREFUSED;
1372 goto err;
1373 }
1374
1375 /*
1376 * If we get here we have accumulated the entire mpa
1377 * start reply message including private data. And
1378 * the MPA header is valid.
1379 */
Steve Wisec529fb52014-03-21 20:40:37 +05301380 __state_set(&ep->com, FPDU_MODE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001381 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1382 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1383 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301384 ep->mpa_attr.version = mpa->revision;
1385 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1386
1387 if (mpa->revision == 2) {
1388 ep->mpa_attr.enhanced_rdma_conn =
1389 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1390 if (ep->mpa_attr.enhanced_rdma_conn) {
1391 mpa_v2_params = (struct mpa_v2_conn_params *)
1392 (ep->mpa_pkt + sizeof(*mpa));
1393 resp_ird = ntohs(mpa_v2_params->ird) &
1394 MPA_V2_IRD_ORD_MASK;
1395 resp_ord = ntohs(mpa_v2_params->ord) &
1396 MPA_V2_IRD_ORD_MASK;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301397 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1398 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301399
1400 /*
1401 * This is a double-check. Ideally, below checks are
1402 * not required since ird/ord stuff has been taken
1403 * care of in c4iw_accept_cr
1404 */
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301405 if (ep->ird < resp_ord) {
1406 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1407 ep->com.dev->rdev.lldi.max_ordird_qp)
1408 ep->ird = resp_ord;
1409 else
1410 insuff_ird = 1;
1411 } else if (ep->ird > resp_ord) {
1412 ep->ird = resp_ord;
1413 }
1414 if (ep->ord > resp_ird) {
1415 if (RELAXED_IRD_NEGOTIATION)
1416 ep->ord = resp_ird;
1417 else
1418 insuff_ird = 1;
1419 }
1420 if (insuff_ird) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301421 err = -ENOMEM;
1422 ep->ird = resp_ord;
1423 ep->ord = resp_ird;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301424 }
1425
1426 if (ntohs(mpa_v2_params->ird) &
1427 MPA_V2_PEER2PEER_MODEL) {
1428 if (ntohs(mpa_v2_params->ord) &
1429 MPA_V2_RDMA_WRITE_RTR)
1430 ep->mpa_attr.p2p_type =
1431 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1432 else if (ntohs(mpa_v2_params->ord) &
1433 MPA_V2_RDMA_READ_RTR)
1434 ep->mpa_attr.p2p_type =
1435 FW_RI_INIT_P2PTYPE_READ_REQ;
1436 }
1437 }
1438 } else if (mpa->revision == 1)
1439 if (peer2peer)
1440 ep->mpa_attr.p2p_type = p2p_type;
1441
Steve Wisecfdda9d2010-04-21 15:30:06 -07001442 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301443 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1444 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1445 ep->mpa_attr.recv_marker_enabled,
1446 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1447 ep->mpa_attr.p2p_type, p2p_type);
1448
1449 /*
1450 * If responder's RTR does not match with that of initiator, assign
1451 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1452 * generated when moving QP to RTS state.
1453 * A TERM message will be sent after QP has moved to RTS state
1454 */
Kumar Sanghvi91018f82012-02-25 17:45:02 -08001455 if ((ep->mpa_attr.version == 2) && peer2peer &&
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301456 (ep->mpa_attr.p2p_type != p2p_type)) {
1457 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1458 rtr_mismatch = 1;
1459 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001460
1461 attrs.mpa_attr = ep->mpa_attr;
1462 attrs.max_ird = ep->ird;
1463 attrs.max_ord = ep->ord;
1464 attrs.llp_stream_handle = ep;
1465 attrs.next_state = C4IW_QP_STATE_RTS;
1466
1467 mask = C4IW_QP_ATTR_NEXT_STATE |
1468 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1469 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1470
1471 /* bind QP and TID with INIT_WR */
1472 err = c4iw_modify_qp(ep->com.qp->rhp,
1473 ep->com.qp, mask, &attrs, 1);
1474 if (err)
1475 goto err;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301476
1477 /*
1478 * If responder's RTR requirement did not match with what initiator
1479 * supports, generate TERM message
1480 */
1481 if (rtr_mismatch) {
1482 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1483 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1484 attrs.ecode = MPA_NOMATCH_RTR;
1485 attrs.next_state = C4IW_QP_STATE_TERMINATE;
Steve Wisecc18b932014-04-24 14:31:53 -05001486 attrs.send_term = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301487 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wisecc18b932014-04-24 14:31:53 -05001488 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301489 err = -ENOMEM;
Steve Wisecc18b932014-04-24 14:31:53 -05001490 disconnect = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301491 goto out;
1492 }
1493
1494 /*
1495 * Generate TERM if initiator IRD is not sufficient for responder
1496 * provided ORD. Currently, we do the same behaviour even when
1497 * responder provided IRD is also not sufficient as regards to
1498 * initiator ORD.
1499 */
1500 if (insuff_ird) {
1501 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1502 __func__);
1503 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1504 attrs.ecode = MPA_INSUFF_IRD;
1505 attrs.next_state = C4IW_QP_STATE_TERMINATE;
Steve Wisecc18b932014-04-24 14:31:53 -05001506 attrs.send_term = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301507 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wisecc18b932014-04-24 14:31:53 -05001508 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301509 err = -ENOMEM;
Steve Wisecc18b932014-04-24 14:31:53 -05001510 disconnect = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301511 goto out;
1512 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001513 goto out;
1514err:
Steve Wisec529fb52014-03-21 20:40:37 +05301515 __state_set(&ep->com, ABORTING);
Steve Wiseb21ef162010-06-10 19:02:55 +00001516 send_abort(ep, skb, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001517out:
1518 connect_reply_upcall(ep, err);
Steve Wisecc18b932014-04-24 14:31:53 -05001519 return disconnect;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001520}
1521
1522static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1523{
1524 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301525 struct mpa_v2_conn_params *mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001526 u16 plen;
1527
1528 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1529
Steve Wisecfdda9d2010-04-21 15:30:06 -07001530 /*
1531 * If we get more than the supported amount of private data
1532 * then we must fail this connection.
1533 */
1534 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001535 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001536 abort_connection(ep, skb, GFP_KERNEL);
1537 return;
1538 }
1539
1540 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1541
1542 /*
1543 * Copy the new data into our accumulation buffer.
1544 */
1545 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1546 skb->len);
1547 ep->mpa_pkt_len += skb->len;
1548
1549 /*
1550 * If we don't even have the mpa message, then bail.
1551 * We'll continue process when more data arrives.
1552 */
1553 if (ep->mpa_pkt_len < sizeof(*mpa))
1554 return;
1555
1556 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001557 mpa = (struct mpa_message *) ep->mpa_pkt;
1558
1559 /*
1560 * Validate MPA Header.
1561 */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301562 if (mpa->revision > mpa_rev) {
1563 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1564 " Received = %d\n", __func__, mpa_rev, mpa->revision);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001565 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001566 abort_connection(ep, skb, GFP_KERNEL);
1567 return;
1568 }
1569
1570 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001571 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001572 abort_connection(ep, skb, GFP_KERNEL);
1573 return;
1574 }
1575
1576 plen = ntohs(mpa->private_data_size);
1577
1578 /*
1579 * Fail if there's too much private data.
1580 */
1581 if (plen > MPA_MAX_PRIVATE_DATA) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001582 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001583 abort_connection(ep, skb, GFP_KERNEL);
1584 return;
1585 }
1586
1587 /*
1588 * If plen does not account for pkt size
1589 */
1590 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001591 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001592 abort_connection(ep, skb, GFP_KERNEL);
1593 return;
1594 }
1595 ep->plen = (u8) plen;
1596
1597 /*
1598 * If we don't have all the pdata yet, then bail.
1599 */
1600 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1601 return;
1602
1603 /*
1604 * If we get here we have accumulated the entire mpa
1605 * start reply message including private data.
1606 */
1607 ep->mpa_attr.initiator = 0;
1608 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1609 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1610 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301611 ep->mpa_attr.version = mpa->revision;
1612 if (mpa->revision == 1)
1613 ep->tried_with_mpa_v1 = 1;
1614 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1615
1616 if (mpa->revision == 2) {
1617 ep->mpa_attr.enhanced_rdma_conn =
1618 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1619 if (ep->mpa_attr.enhanced_rdma_conn) {
1620 mpa_v2_params = (struct mpa_v2_conn_params *)
1621 (ep->mpa_pkt + sizeof(*mpa));
1622 ep->ird = ntohs(mpa_v2_params->ird) &
1623 MPA_V2_IRD_ORD_MASK;
1624 ep->ord = ntohs(mpa_v2_params->ord) &
1625 MPA_V2_IRD_ORD_MASK;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301626 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1627 ep->ord);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301628 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1629 if (peer2peer) {
1630 if (ntohs(mpa_v2_params->ord) &
1631 MPA_V2_RDMA_WRITE_RTR)
1632 ep->mpa_attr.p2p_type =
1633 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1634 else if (ntohs(mpa_v2_params->ord) &
1635 MPA_V2_RDMA_READ_RTR)
1636 ep->mpa_attr.p2p_type =
1637 FW_RI_INIT_P2PTYPE_READ_REQ;
1638 }
1639 }
1640 } else if (mpa->revision == 1)
1641 if (peer2peer)
1642 ep->mpa_attr.p2p_type = p2p_type;
1643
Steve Wisecfdda9d2010-04-21 15:30:06 -07001644 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1645 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1646 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1647 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1648 ep->mpa_attr.p2p_type);
1649
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001650 /*
1651 * If the endpoint timer already expired, then we ignore
1652 * the start request. process_timeout() will abort
1653 * the connection.
1654 */
1655 if (!stop_ep_timer(ep)) {
1656 __state_set(&ep->com, MPA_REQ_RCVD);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001657
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001658 /* drive upcall */
Hariprasad Shenai10be6b42014-11-21 09:36:35 -06001659 mutex_lock_nested(&ep->parent_ep->com.mutex,
1660 SINGLE_DEPTH_NESTING);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001661 if (ep->parent_ep->com.state != DEAD) {
1662 if (connect_request_upcall(ep))
1663 abort_connection(ep, skb, GFP_KERNEL);
1664 } else {
Steve Wisebe13b2d2014-03-21 20:40:33 +05301665 abort_connection(ep, skb, GFP_KERNEL);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001666 }
1667 mutex_unlock(&ep->parent_ep->com.mutex);
Steve Wisebe13b2d2014-03-21 20:40:33 +05301668 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001669 return;
1670}
1671
1672static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1673{
1674 struct c4iw_ep *ep;
1675 struct cpl_rx_data *hdr = cplhdr(skb);
1676 unsigned int dlen = ntohs(hdr->len);
1677 unsigned int tid = GET_TID(hdr);
1678 struct tid_info *t = dev->rdev.lldi.tids;
Vipul Pandya793dad92012-12-10 09:30:56 +00001679 __u8 status = hdr->status;
Steve Wisecc18b932014-04-24 14:31:53 -05001680 int disconnect = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001681
1682 ep = lookup_tid(t, tid);
Steve Wise977116c2014-03-21 20:40:36 +05301683 if (!ep)
1684 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001685 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1686 skb_pull(skb, sizeof(*hdr));
1687 skb_trim(skb, dlen);
Steve Wisec529fb52014-03-21 20:40:37 +05301688 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001689
Steve Wisecfdda9d2010-04-21 15:30:06 -07001690 /* update RX credits */
1691 update_rx_credits(ep, dlen);
1692
Steve Wisec529fb52014-03-21 20:40:37 +05301693 switch (ep->com.state) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001694 case MPA_REQ_SENT:
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001695 ep->rcv_seq += dlen;
Steve Wisecc18b932014-04-24 14:31:53 -05001696 disconnect = process_mpa_reply(ep, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001697 break;
1698 case MPA_REQ_WAIT:
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001699 ep->rcv_seq += dlen;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001700 process_mpa_request(ep, skb);
1701 break;
Vipul Pandya15579672013-01-07 13:11:52 +00001702 case FPDU_MODE: {
1703 struct c4iw_qp_attributes attrs;
1704 BUG_ON(!ep->com.qp);
Vipul Pandyae8e5b922013-01-07 13:11:55 +00001705 if (status)
Vipul Pandya15579672013-01-07 13:11:52 +00001706 pr_err("%s Unexpected streaming data." \
Vipul Pandya04236df2013-01-07 13:11:54 +00001707 " qpid %u ep %p state %d tid %u status %d\n",
1708 __func__, ep->com.qp->wq.sq.qid, ep,
Steve Wisec529fb52014-03-21 20:40:37 +05301709 ep->com.state, ep->hwtid, status);
Steve Wise97d7ec02013-08-06 21:04:34 +05301710 attrs.next_state = C4IW_QP_STATE_TERMINATE;
Vipul Pandya15579672013-01-07 13:11:52 +00001711 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wisecc18b932014-04-24 14:31:53 -05001712 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1713 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001714 break;
1715 }
Vipul Pandya15579672013-01-07 13:11:52 +00001716 default:
1717 break;
1718 }
Steve Wisec529fb52014-03-21 20:40:37 +05301719 mutex_unlock(&ep->com.mutex);
Steve Wisecc18b932014-04-24 14:31:53 -05001720 if (disconnect)
1721 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001722 return 0;
1723}
1724
1725static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1726{
1727 struct c4iw_ep *ep;
1728 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001729 int release = 0;
1730 unsigned int tid = GET_TID(rpl);
1731 struct tid_info *t = dev->rdev.lldi.tids;
1732
1733 ep = lookup_tid(t, tid);
Vipul Pandya49840372012-05-18 15:29:29 +05301734 if (!ep) {
1735 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1736 return 0;
1737 }
Wei Yongjun92dd6c32012-09-07 06:51:23 +00001738 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001739 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001740 switch (ep->com.state) {
1741 case ABORTING:
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001742 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001743 __state_set(&ep->com, DEAD);
1744 release = 1;
1745 break;
1746 default:
1747 printk(KERN_ERR "%s ep %p state %d\n",
1748 __func__, ep, ep->com.state);
1749 break;
1750 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001751 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001752
1753 if (release)
1754 release_ep_resources(ep);
1755 return 0;
1756}
1757
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001758static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1759{
1760 struct sk_buff *skb;
1761 struct fw_ofld_connection_wr *req;
1762 unsigned int mtu_idx;
1763 int wscale;
Vipul Pandya830662f2013-07-04 16:10:47 +05301764 struct sockaddr_in *sin;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301765 int win;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001766
1767 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1768 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1769 memset(req, 0, sizeof(*req));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001770 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301771 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
Kumar Sanghvi41b4f862013-12-18 16:38:26 +05301772 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1773 ep->com.dev->rdev.lldi.ports[0],
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001774 ep->l2t));
Steve Wise9eccfe12014-03-26 17:08:09 -05001775 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05301776 req->le.lport = sin->sin_port;
1777 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -05001778 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05301779 req->le.pport = sin->sin_port;
1780 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001781 req->tcb.t_state_to_astid =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301782 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1783 FW_OFLD_CONNECTION_WR_ASTID_V(atid));
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001784 req->tcb.cplrxdataack_cplpassacceptrpl =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05301785 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001786 req->tcb.tx_max = (__force __be32) jiffies;
Vipul Pandya793dad92012-12-10 09:30:56 +00001787 req->tcb.rcv_adv = htons(1);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05301788 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
Hariprasad S04524a42014-09-24 03:53:41 +05301789 enable_tcp_timestamps,
1790 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001791 wscale = compute_wscale(rcv_win);
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301792
1793 /*
1794 * Specify the largest window that will fit in opt0. The
1795 * remainder will be specified in the rx_data_ack.
1796 */
1797 win = ep->rcv_win >> 10;
Anish Bhattd7990b02014-11-12 17:15:57 -08001798 if (win > RCV_BUFSIZ_M)
1799 win = RCV_BUFSIZ_M;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301800
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001801 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1802 (nocong ? NO_CONG_F : 0) |
Anish Bhattd7990b02014-11-12 17:15:57 -08001803 KEEP_ALIVE_F |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001804 DELACK_F |
Anish Bhattd7990b02014-11-12 17:15:57 -08001805 WND_SCALE_V(wscale) |
1806 MSS_IDX_V(mtu_idx) |
1807 L2T_IDX_V(ep->l2t->idx) |
1808 TX_CHAN_V(ep->tx_chan) |
1809 SMAC_SEL_V(ep->smac_idx) |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001810 DSCP_V(ep->tos) |
Anish Bhattd7990b02014-11-12 17:15:57 -08001811 ULP_MODE_V(ULP_MODE_TCPDDP) |
1812 RCV_BUFSIZ_V(win));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001813 req->tcb.opt2 = (__force __be32) (PACE_V(1) |
1814 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
Anish Bhattd7990b02014-11-12 17:15:57 -08001815 RX_CHANNEL_V(0) |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001816 CCTRL_ECN_V(enable_ecn) |
Anish Bhattd7990b02014-11-12 17:15:57 -08001817 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001818 if (enable_tcp_timestamps)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001819 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001820 if (enable_tcp_sack)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08001821 req->tcb.opt2 |= (__force __be32)SACK_EN_F;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001822 if (wscale && enable_tcp_window_scaling)
Anish Bhattd7990b02014-11-12 17:15:57 -08001823 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1824 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1825 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
Vipul Pandya793dad92012-12-10 09:30:56 +00001826 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1827 set_bit(ACT_OFLD_CONN, &ep->com.history);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001828 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1829}
1830
Steve Wisecfdda9d2010-04-21 15:30:06 -07001831/*
1832 * Return whether a failed active open has allocated a TID
1833 */
1834static inline int act_open_has_tid(int status)
1835{
1836 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1837 status != CPL_ERR_ARP_MISS;
1838}
1839
Steve Wise7a2cea22014-03-14 21:52:07 +05301840/* Returns whether a CPL status conveys negative advice.
1841 */
1842static int is_neg_adv(unsigned int status)
1843{
1844 return status == CPL_ERR_RTX_NEG_ADVICE ||
1845 status == CPL_ERR_PERSIST_NEG_ADVICE ||
1846 status == CPL_ERR_KEEPALV_NEG_ADVICE;
1847}
1848
Hariprasad Shenaidd92b122014-07-21 20:55:13 +05301849static char *neg_adv_str(unsigned int status)
1850{
1851 switch (status) {
1852 case CPL_ERR_RTX_NEG_ADVICE:
1853 return "Retransmit timeout";
1854 case CPL_ERR_PERSIST_NEG_ADVICE:
1855 return "Persist timeout";
1856 case CPL_ERR_KEEPALV_NEG_ADVICE:
1857 return "Keepalive timeout";
1858 default:
1859 return "Unknown";
1860 }
1861}
1862
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301863static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1864{
1865 ep->snd_win = snd_win;
1866 ep->rcv_win = rcv_win;
1867 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
1868}
1869
Vipul Pandya793dad92012-12-10 09:30:56 +00001870#define ACT_OPEN_RETRY_COUNT 2
1871
Vipul Pandya830662f2013-07-04 16:10:47 +05301872static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1873 struct dst_entry *dst, struct c4iw_dev *cdev,
1874 bool clear_mpa_v1)
1875{
1876 struct neighbour *n;
1877 int err, step;
1878 struct net_device *pdev;
1879
1880 n = dst_neigh_lookup(dst, peer_ip);
1881 if (!n)
1882 return -ENODEV;
1883
1884 rcu_read_lock();
1885 err = -ENOMEM;
1886 if (n->dev->flags & IFF_LOOPBACK) {
1887 if (iptype == 4)
1888 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
1889 else if (IS_ENABLED(CONFIG_IPV6))
1890 for_each_netdev(&init_net, pdev) {
1891 if (ipv6_chk_addr(&init_net,
1892 (struct in6_addr *)peer_ip,
1893 pdev, 1))
1894 break;
1895 }
1896 else
1897 pdev = NULL;
1898
1899 if (!pdev) {
1900 err = -ENODEV;
1901 goto out;
1902 }
1903 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1904 n, pdev, 0);
1905 if (!ep->l2t)
1906 goto out;
1907 ep->mtu = pdev->mtu;
1908 ep->tx_chan = cxgb4_port_chan(pdev);
1909 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1910 step = cdev->rdev.lldi.ntxq /
1911 cdev->rdev.lldi.nchan;
1912 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1913 step = cdev->rdev.lldi.nrxq /
1914 cdev->rdev.lldi.nchan;
1915 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1916 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1917 cxgb4_port_idx(pdev) * step];
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301918 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
Vipul Pandya830662f2013-07-04 16:10:47 +05301919 dev_put(pdev);
1920 } else {
1921 pdev = get_real_dev(n->dev);
1922 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1923 n, pdev, 0);
1924 if (!ep->l2t)
1925 goto out;
1926 ep->mtu = dst_mtu(dst);
Steve Wise11b8e222014-05-16 12:42:46 -05001927 ep->tx_chan = cxgb4_port_chan(pdev);
1928 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
Vipul Pandya830662f2013-07-04 16:10:47 +05301929 step = cdev->rdev.lldi.ntxq /
1930 cdev->rdev.lldi.nchan;
Steve Wise11b8e222014-05-16 12:42:46 -05001931 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1932 ep->ctrlq_idx = cxgb4_port_idx(pdev);
Vipul Pandya830662f2013-07-04 16:10:47 +05301933 step = cdev->rdev.lldi.nrxq /
1934 cdev->rdev.lldi.nchan;
1935 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
Steve Wise11b8e222014-05-16 12:42:46 -05001936 cxgb4_port_idx(pdev) * step];
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301937 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
Vipul Pandya830662f2013-07-04 16:10:47 +05301938
1939 if (clear_mpa_v1) {
1940 ep->retry_with_mpa_v1 = 0;
1941 ep->tried_with_mpa_v1 = 0;
1942 }
1943 }
1944 err = 0;
1945out:
1946 rcu_read_unlock();
1947
1948 neigh_release(n);
1949
1950 return err;
1951}
1952
Vipul Pandya793dad92012-12-10 09:30:56 +00001953static int c4iw_reconnect(struct c4iw_ep *ep)
1954{
1955 int err = 0;
Steve Wise24d44a32013-07-04 16:10:44 +05301956 struct sockaddr_in *laddr = (struct sockaddr_in *)
1957 &ep->com.cm_id->local_addr;
1958 struct sockaddr_in *raddr = (struct sockaddr_in *)
1959 &ep->com.cm_id->remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05301960 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
1961 &ep->com.cm_id->local_addr;
1962 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
1963 &ep->com.cm_id->remote_addr;
1964 int iptype;
1965 __u8 *ra;
Vipul Pandya793dad92012-12-10 09:30:56 +00001966
1967 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1968 init_timer(&ep->timer);
1969
1970 /*
1971 * Allocate an active TID to initiate a TCP connection.
1972 */
1973 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1974 if (ep->atid == -1) {
1975 pr_err("%s - cannot alloc atid.\n", __func__);
1976 err = -ENOMEM;
1977 goto fail2;
1978 }
1979 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1980
1981 /* find a route */
Vipul Pandya830662f2013-07-04 16:10:47 +05301982 if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
1983 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
1984 raddr->sin_addr.s_addr, laddr->sin_port,
1985 raddr->sin_port, 0);
1986 iptype = 4;
1987 ra = (__u8 *)&raddr->sin_addr;
1988 } else {
1989 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
1990 raddr6->sin6_addr.s6_addr,
1991 laddr6->sin6_port, raddr6->sin6_port, 0,
1992 raddr6->sin6_scope_id);
1993 iptype = 6;
1994 ra = (__u8 *)&raddr6->sin6_addr;
1995 }
1996 if (!ep->dst) {
Vipul Pandya793dad92012-12-10 09:30:56 +00001997 pr_err("%s - cannot find route.\n", __func__);
1998 err = -EHOSTUNREACH;
1999 goto fail3;
2000 }
Vipul Pandya830662f2013-07-04 16:10:47 +05302001 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
2002 if (err) {
Vipul Pandya793dad92012-12-10 09:30:56 +00002003 pr_err("%s - cannot alloc l2e.\n", __func__);
Vipul Pandya793dad92012-12-10 09:30:56 +00002004 goto fail4;
2005 }
2006
2007 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2008 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2009 ep->l2t->idx);
2010
2011 state_set(&ep->com, CONNECTING);
2012 ep->tos = 0;
2013
2014 /* send connect request to rnic */
2015 err = send_connect(ep);
2016 if (!err)
2017 goto out;
2018
2019 cxgb4_l2t_release(ep->l2t);
2020fail4:
2021 dst_release(ep->dst);
2022fail3:
2023 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2024 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2025fail2:
2026 /*
2027 * remember to send notification to upper layer.
2028 * We are in here so the upper layer is not aware that this is
2029 * re-connect attempt and so, upper layer is still waiting for
2030 * response of 1st connect request.
2031 */
2032 connect_reply_upcall(ep, -ECONNRESET);
2033 c4iw_put_ep(&ep->com);
2034out:
2035 return err;
2036}
2037
Steve Wisecfdda9d2010-04-21 15:30:06 -07002038static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2039{
2040 struct c4iw_ep *ep;
2041 struct cpl_act_open_rpl *rpl = cplhdr(skb);
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002042 unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2043 ntohl(rpl->atid_status)));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002044 struct tid_info *t = dev->rdev.lldi.tids;
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002045 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
Vipul Pandya830662f2013-07-04 16:10:47 +05302046 struct sockaddr_in *la;
2047 struct sockaddr_in *ra;
2048 struct sockaddr_in6 *la6;
2049 struct sockaddr_in6 *ra6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002050
2051 ep = lookup_atid(t, atid);
Steve Wise9eccfe12014-03-26 17:08:09 -05002052 la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
2053 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
2054 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
2055 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002056
2057 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
2058 status, status2errno(status));
2059
Steve Wise7a2cea22014-03-14 21:52:07 +05302060 if (is_neg_adv(status)) {
Hariprasad Shenaidd92b122014-07-21 20:55:13 +05302061 dev_warn(&dev->rdev.lldi.pdev->dev,
2062 "Connection problems for atid %u status %u (%s)\n",
2063 atid, status, neg_adv_str(status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002064 return 0;
2065 }
2066
Vipul Pandya793dad92012-12-10 09:30:56 +00002067 set_bit(ACT_OPEN_RPL, &ep->com.history);
2068
Vipul Pandyad716a2a2012-05-18 15:29:31 +05302069 /*
2070 * Log interesting failures.
2071 */
2072 switch (status) {
2073 case CPL_ERR_CONN_RESET:
2074 case CPL_ERR_CONN_TIMEDOUT:
2075 break;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002076 case CPL_ERR_TCAM_FULL:
Vipul Pandya830662f2013-07-04 16:10:47 +05302077 mutex_lock(&dev->rdev.stats.lock);
Vipul Pandya3b174d92013-03-14 05:09:03 +00002078 dev->rdev.stats.tcam_full++;
Vipul Pandya830662f2013-07-04 16:10:47 +05302079 mutex_unlock(&dev->rdev.stats.lock);
2080 if (ep->com.local_addr.ss_family == AF_INET &&
2081 dev->rdev.lldi.enable_fw_ofld_conn) {
Vipul Pandya793dad92012-12-10 09:30:56 +00002082 send_fw_act_open_req(ep,
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002083 TID_TID_G(AOPEN_ATID_G(
Vipul Pandya793dad92012-12-10 09:30:56 +00002084 ntohl(rpl->atid_status))));
2085 return 0;
2086 }
2087 break;
2088 case CPL_ERR_CONN_EXIST:
2089 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2090 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2091 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2092 atid);
2093 cxgb4_free_atid(t, atid);
2094 dst_release(ep->dst);
2095 cxgb4_l2t_release(ep->l2t);
2096 c4iw_reconnect(ep);
2097 return 0;
2098 }
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002099 break;
Vipul Pandyad716a2a2012-05-18 15:29:31 +05302100 default:
Vipul Pandya830662f2013-07-04 16:10:47 +05302101 if (ep->com.local_addr.ss_family == AF_INET) {
2102 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2103 atid, status, status2errno(status),
2104 &la->sin_addr.s_addr, ntohs(la->sin_port),
2105 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2106 } else {
2107 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2108 atid, status, status2errno(status),
2109 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2110 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2111 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +05302112 break;
2113 }
2114
Steve Wisecfdda9d2010-04-21 15:30:06 -07002115 connect_reply_upcall(ep, status2errno(status));
2116 state_set(&ep->com, DEAD);
2117
2118 if (status && act_open_has_tid(status))
2119 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
2120
Vipul Pandya793dad92012-12-10 09:30:56 +00002121 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002122 cxgb4_free_atid(t, atid);
2123 dst_release(ep->dst);
2124 cxgb4_l2t_release(ep->l2t);
2125 c4iw_put_ep(&ep->com);
2126
2127 return 0;
2128}
2129
2130static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2131{
2132 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2133 struct tid_info *t = dev->rdev.lldi.tids;
2134 unsigned int stid = GET_TID(rpl);
2135 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2136
2137 if (!ep) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00002138 PDBG("%s stid %d lookup failure!\n", __func__, stid);
2139 goto out;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002140 }
2141 PDBG("%s ep %p status %d error %d\n", __func__, ep,
2142 rpl->status, status2errno(rpl->status));
Steve Wised9594d92011-05-09 22:06:22 -07002143 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002144
Vipul Pandya1cab7752012-12-10 09:30:55 +00002145out:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002146 return 0;
2147}
2148
Steve Wisecfdda9d2010-04-21 15:30:06 -07002149static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2150{
2151 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2152 struct tid_info *t = dev->rdev.lldi.tids;
2153 unsigned int stid = GET_TID(rpl);
2154 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2155
2156 PDBG("%s ep %p\n", __func__, ep);
Steve Wised9594d92011-05-09 22:06:22 -07002157 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002158 return 0;
2159}
2160
Vipul Pandya830662f2013-07-04 16:10:47 +05302161static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
Steve Wisecfdda9d2010-04-21 15:30:06 -07002162 struct cpl_pass_accept_req *req)
2163{
2164 struct cpl_pass_accept_rpl *rpl;
2165 unsigned int mtu_idx;
2166 u64 opt0;
2167 u32 opt2;
2168 int wscale;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302169 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05302170 int win;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002171
2172 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2173 BUG_ON(skb_cloned(skb));
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302174
Steve Wisecfdda9d2010-04-21 15:30:06 -07002175 skb_get(skb);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302176 rpl = cplhdr(skb);
2177 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2178 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2179 rpl5 = (void *)rpl;
2180 INIT_TP_WR(rpl5, ep->hwtid);
2181 } else {
2182 skb_trim(skb, sizeof(*rpl));
2183 INIT_TP_WR(rpl, ep->hwtid);
2184 }
2185 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2186 ep->hwtid));
2187
2188 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
Hariprasad S04524a42014-09-24 03:53:41 +05302189 enable_tcp_timestamps && req->tcpopt.tstamp,
2190 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002191 wscale = compute_wscale(rcv_win);
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05302192
2193 /*
2194 * Specify the largest window that will fit in opt0. The
2195 * remainder will be specified in the rx_data_ack.
2196 */
2197 win = ep->rcv_win >> 10;
Anish Bhattd7990b02014-11-12 17:15:57 -08002198 if (win > RCV_BUFSIZ_M)
2199 win = RCV_BUFSIZ_M;
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002200 opt0 = (nocong ? NO_CONG_F : 0) |
Anish Bhattd7990b02014-11-12 17:15:57 -08002201 KEEP_ALIVE_F |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002202 DELACK_F |
Anish Bhattd7990b02014-11-12 17:15:57 -08002203 WND_SCALE_V(wscale) |
2204 MSS_IDX_V(mtu_idx) |
2205 L2T_IDX_V(ep->l2t->idx) |
2206 TX_CHAN_V(ep->tx_chan) |
2207 SMAC_SEL_V(ep->smac_idx) |
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002208 DSCP_V(ep->tos >> 2) |
Anish Bhattd7990b02014-11-12 17:15:57 -08002209 ULP_MODE_V(ULP_MODE_TCPDDP) |
2210 RCV_BUFSIZ_V(win);
2211 opt2 = RX_CHANNEL_V(0) |
2212 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002213
2214 if (enable_tcp_timestamps && req->tcpopt.tstamp)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002215 opt2 |= TSTAMPS_EN_F;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002216 if (enable_tcp_sack && req->tcpopt.sack)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002217 opt2 |= SACK_EN_F;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002218 if (wscale && enable_tcp_window_scaling)
Anish Bhattd7990b02014-11-12 17:15:57 -08002219 opt2 |= WND_SCALE_EN_F;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002220 if (enable_ecn) {
2221 const struct tcphdr *tcph;
2222 u32 hlen = ntohl(req->hdr_len);
2223
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05302224 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2225 IP_HDR_LEN_G(hlen);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002226 if (tcph->ece && tcph->cwr)
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002227 opt2 |= CCTRL_ECN_V(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002228 }
Steve Wise92e50112014-04-24 14:31:59 -05002229 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302230 u32 isn = (prandom_u32() & ~7UL) - 1;
Anish Bhattd7990b02014-11-12 17:15:57 -08002231 opt2 |= T5_OPT_2_VALID_F;
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05302232 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
Hariprasad S0b741042015-04-22 01:44:58 +05302233 opt2 |= T5_ISS_F;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302234 rpl5 = (void *)rpl;
2235 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2236 if (peer2peer)
2237 isn += 4;
2238 rpl5->iss = cpu_to_be32(isn);
2239 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
Steve Wise92e50112014-04-24 14:31:59 -05002240 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002241
Steve Wisecfdda9d2010-04-21 15:30:06 -07002242 rpl->opt0 = cpu_to_be64(opt0);
2243 rpl->opt2 = cpu_to_be32(opt2);
Steve Wised4f1a5c2010-07-23 19:12:32 +00002244 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
Steve Wiseb38a0ad2013-08-06 21:04:37 +05302245 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002246 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2247
2248 return;
2249}
2250
Vipul Pandya830662f2013-07-04 16:10:47 +05302251static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
Steve Wisecfdda9d2010-04-21 15:30:06 -07002252{
Vipul Pandya830662f2013-07-04 16:10:47 +05302253 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002254 BUG_ON(skb_cloned(skb));
2255 skb_trim(skb, sizeof(struct cpl_tid_release));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002256 release_tid(&dev->rdev, hwtid, skb);
2257 return;
2258}
2259
Vipul Pandya830662f2013-07-04 16:10:47 +05302260static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
2261 __u8 *local_ip, __u8 *peer_ip,
Steve Wisecfdda9d2010-04-21 15:30:06 -07002262 __be16 *local_port, __be16 *peer_port)
2263{
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05302264 int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
2265 int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002266 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
Vipul Pandya830662f2013-07-04 16:10:47 +05302267 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002268 struct tcphdr *tcp = (struct tcphdr *)
2269 ((u8 *)(req + 1) + eth_len + ip_len);
2270
Vipul Pandya830662f2013-07-04 16:10:47 +05302271 if (ip->version == 4) {
2272 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
2273 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
2274 ntohs(tcp->dest));
2275 *iptype = 4;
2276 memcpy(peer_ip, &ip->saddr, 4);
2277 memcpy(local_ip, &ip->daddr, 4);
2278 } else {
2279 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
2280 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
2281 ntohs(tcp->dest));
2282 *iptype = 6;
2283 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
2284 memcpy(local_ip, ip6->daddr.s6_addr, 16);
2285 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002286 *peer_port = tcp->source;
2287 *local_port = tcp->dest;
2288
2289 return;
2290}
2291
2292static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2293{
Vipul Pandya793dad92012-12-10 09:30:56 +00002294 struct c4iw_ep *child_ep = NULL, *parent_ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002295 struct cpl_pass_accept_req *req = cplhdr(skb);
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002296 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002297 struct tid_info *t = dev->rdev.lldi.tids;
2298 unsigned int hwtid = GET_TID(req);
2299 struct dst_entry *dst;
Vipul Pandya830662f2013-07-04 16:10:47 +05302300 __u8 local_ip[16], peer_ip[16];
Steve Wisecfdda9d2010-04-21 15:30:06 -07002301 __be16 local_port, peer_port;
David Miller3786cf12011-12-02 16:52:31 +00002302 int err;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002303 u16 peer_mss = ntohs(req->tcpopt.mss);
Vipul Pandya830662f2013-07-04 16:10:47 +05302304 int iptype;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302305 unsigned short hdrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002306
2307 parent_ep = lookup_stid(t, stid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002308 if (!parent_ep) {
2309 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2310 goto reject;
2311 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00002312
Steve Wisecfdda9d2010-04-21 15:30:06 -07002313 if (state_read(&parent_ep->com) != LISTEN) {
2314 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
2315 __func__);
2316 goto reject;
2317 }
2318
Vipul Pandya830662f2013-07-04 16:10:47 +05302319 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
2320
Steve Wisecfdda9d2010-04-21 15:30:06 -07002321 /* Find output route */
Vipul Pandya830662f2013-07-04 16:10:47 +05302322 if (iptype == 4) {
2323 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2324 , __func__, parent_ep, hwtid,
2325 local_ip, peer_ip, ntohs(local_port),
2326 ntohs(peer_port), peer_mss);
2327 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2328 local_port, peer_port,
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002329 PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
Vipul Pandya830662f2013-07-04 16:10:47 +05302330 } else {
2331 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2332 , __func__, parent_ep, hwtid,
2333 local_ip, peer_ip, ntohs(local_port),
2334 ntohs(peer_port), peer_mss);
2335 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002336 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
Vipul Pandya830662f2013-07-04 16:10:47 +05302337 ((struct sockaddr_in6 *)
2338 &parent_ep->com.local_addr)->sin6_scope_id);
2339 }
2340 if (!dst) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002341 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2342 __func__);
2343 goto reject;
2344 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002345
2346 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2347 if (!child_ep) {
2348 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2349 __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002350 dst_release(dst);
2351 goto reject;
2352 }
David Miller3786cf12011-12-02 16:52:31 +00002353
Vipul Pandya830662f2013-07-04 16:10:47 +05302354 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
David Miller3786cf12011-12-02 16:52:31 +00002355 if (err) {
2356 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2357 __func__);
2358 dst_release(dst);
2359 kfree(child_ep);
2360 goto reject;
2361 }
2362
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302363 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
2364 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2365 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2366 child_ep->mtu = peer_mss + hdrs;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002367
Steve Wisecfdda9d2010-04-21 15:30:06 -07002368 state_set(&child_ep->com, CONNECTING);
2369 child_ep->com.dev = dev;
2370 child_ep->com.cm_id = NULL;
Steve Wise5b6b8fe2015-04-21 16:28:41 -04002371
2372 /*
2373 * The mapped_local and mapped_remote addresses get setup with
2374 * the actual 4-tuple. The local address will be based on the
2375 * actual local address of the connection, but on the port number
2376 * of the parent listening endpoint. The remote address is
2377 * setup based on a query to the IWPM since we don't know what it
2378 * originally was before mapping. If no mapping was done, then
2379 * mapped_remote == remote, and mapped_local == local.
2380 */
Vipul Pandya830662f2013-07-04 16:10:47 +05302381 if (iptype == 4) {
2382 struct sockaddr_in *sin = (struct sockaddr_in *)
Steve Wise5b6b8fe2015-04-21 16:28:41 -04002383 &child_ep->com.mapped_local_addr;
2384
Vipul Pandya830662f2013-07-04 16:10:47 +05302385 sin->sin_family = PF_INET;
2386 sin->sin_port = local_port;
2387 sin->sin_addr.s_addr = *(__be32 *)local_ip;
Steve Wise5b6b8fe2015-04-21 16:28:41 -04002388
2389 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2390 sin->sin_family = PF_INET;
2391 sin->sin_port = ((struct sockaddr_in *)
2392 &parent_ep->com.local_addr)->sin_port;
2393 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2394
2395 sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05302396 sin->sin_family = PF_INET;
2397 sin->sin_port = peer_port;
2398 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2399 } else {
2400 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
Steve Wise5b6b8fe2015-04-21 16:28:41 -04002401 &child_ep->com.mapped_local_addr;
2402
Vipul Pandya830662f2013-07-04 16:10:47 +05302403 sin6->sin6_family = PF_INET6;
2404 sin6->sin6_port = local_port;
2405 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
Steve Wise5b6b8fe2015-04-21 16:28:41 -04002406
2407 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2408 sin6->sin6_family = PF_INET6;
2409 sin6->sin6_port = ((struct sockaddr_in6 *)
2410 &parent_ep->com.local_addr)->sin6_port;
2411 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2412
2413 sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05302414 sin6->sin6_family = PF_INET6;
2415 sin6->sin6_port = peer_port;
2416 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2417 }
Steve Wise5b6b8fe2015-04-21 16:28:41 -04002418 memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
2419 sizeof(child_ep->com.remote_addr));
2420 get_remote_addr(child_ep);
2421
Steve Wisecfdda9d2010-04-21 15:30:06 -07002422 c4iw_get_ep(&parent_ep->com);
2423 child_ep->parent_ep = parent_ep;
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08002424 child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002425 child_ep->dst = dst;
2426 child_ep->hwtid = hwtid;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002427
2428 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
David Miller3786cf12011-12-02 16:52:31 +00002429 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002430
2431 init_timer(&child_ep->timer);
2432 cxgb4_insert_tid(t, child_ep, hwtid);
Vipul Pandyab3de6cf2013-01-07 13:11:59 +00002433 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
Vipul Pandya830662f2013-07-04 16:10:47 +05302434 accept_cr(child_ep, skb, req);
Vipul Pandya793dad92012-12-10 09:30:56 +00002435 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002436 goto out;
2437reject:
Vipul Pandya830662f2013-07-04 16:10:47 +05302438 reject_cr(dev, hwtid, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002439out:
2440 return 0;
2441}
2442
2443static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2444{
2445 struct c4iw_ep *ep;
2446 struct cpl_pass_establish *req = cplhdr(skb);
2447 struct tid_info *t = dev->rdev.lldi.tids;
2448 unsigned int tid = GET_TID(req);
2449
2450 ep = lookup_tid(t, tid);
2451 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2452 ep->snd_seq = be32_to_cpu(req->snd_isn);
2453 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2454
Vipul Pandya1cab7752012-12-10 09:30:55 +00002455 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2456 ntohs(req->tcp_opt));
2457
Steve Wisecfdda9d2010-04-21 15:30:06 -07002458 set_emss(ep, ntohs(req->tcp_opt));
2459
2460 dst_confirm(ep->dst);
2461 state_set(&ep->com, MPA_REQ_WAIT);
2462 start_ep_timer(ep);
2463 send_flowc(ep, skb);
Vipul Pandya793dad92012-12-10 09:30:56 +00002464 set_bit(PASS_ESTAB, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002465
2466 return 0;
2467}
2468
2469static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2470{
2471 struct cpl_peer_close *hdr = cplhdr(skb);
2472 struct c4iw_ep *ep;
2473 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002474 int disconnect = 1;
2475 int release = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002476 struct tid_info *t = dev->rdev.lldi.tids;
2477 unsigned int tid = GET_TID(hdr);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002478 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002479
2480 ep = lookup_tid(t, tid);
2481 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2482 dst_confirm(ep->dst);
2483
Vipul Pandya793dad92012-12-10 09:30:56 +00002484 set_bit(PEER_CLOSE, &ep->com.history);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002485 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002486 switch (ep->com.state) {
2487 case MPA_REQ_WAIT:
2488 __state_set(&ep->com, CLOSING);
2489 break;
2490 case MPA_REQ_SENT:
2491 __state_set(&ep->com, CLOSING);
2492 connect_reply_upcall(ep, -ECONNRESET);
2493 break;
2494 case MPA_REQ_RCVD:
2495
2496 /*
2497 * We're gonna mark this puppy DEAD, but keep
2498 * the reference on it until the ULP accepts or
2499 * rejects the CR. Also wake up anyone waiting
2500 * in rdma connection migration (see c4iw_accept_cr()).
2501 */
2502 __state_set(&ep->com, CLOSING);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002503 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
Steve Wised9594d92011-05-09 22:06:22 -07002504 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002505 break;
2506 case MPA_REP_SENT:
2507 __state_set(&ep->com, CLOSING);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002508 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
Steve Wised9594d92011-05-09 22:06:22 -07002509 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002510 break;
2511 case FPDU_MODE:
Steve Wiseca5a2202010-07-23 19:12:37 +00002512 start_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002513 __state_set(&ep->com, CLOSING);
Steve Wise30c95c22011-05-09 22:06:22 -07002514 attrs.next_state = C4IW_QP_STATE_CLOSING;
Steve Wise8da7e7a2011-06-14 20:59:27 +00002515 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wise30c95c22011-05-09 22:06:22 -07002516 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002517 if (ret != -ECONNRESET) {
2518 peer_close_upcall(ep);
2519 disconnect = 1;
2520 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002521 break;
2522 case ABORTING:
2523 disconnect = 0;
2524 break;
2525 case CLOSING:
2526 __state_set(&ep->com, MORIBUND);
2527 disconnect = 0;
2528 break;
2529 case MORIBUND:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002530 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002531 if (ep->com.cm_id && ep->com.qp) {
2532 attrs.next_state = C4IW_QP_STATE_IDLE;
2533 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2534 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2535 }
Steve Wisebe13b2d2014-03-21 20:40:33 +05302536 close_complete_upcall(ep, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002537 __state_set(&ep->com, DEAD);
2538 release = 1;
2539 disconnect = 0;
2540 break;
2541 case DEAD:
2542 disconnect = 0;
2543 break;
2544 default:
2545 BUG_ON(1);
2546 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002547 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002548 if (disconnect)
2549 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2550 if (release)
2551 release_ep_resources(ep);
2552 return 0;
2553}
2554
Steve Wisecfdda9d2010-04-21 15:30:06 -07002555static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2556{
2557 struct cpl_abort_req_rss *req = cplhdr(skb);
2558 struct c4iw_ep *ep;
2559 struct cpl_abort_rpl *rpl;
2560 struct sk_buff *rpl_skb;
2561 struct c4iw_qp_attributes attrs;
2562 int ret;
2563 int release = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002564 struct tid_info *t = dev->rdev.lldi.tids;
2565 unsigned int tid = GET_TID(req);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002566
2567 ep = lookup_tid(t, tid);
Steve Wise7a2cea22014-03-14 21:52:07 +05302568 if (is_neg_adv(req->status)) {
Hariprasad Shenaidd92b122014-07-21 20:55:13 +05302569 dev_warn(&dev->rdev.lldi.pdev->dev,
2570 "Negative advice on abort - tid %u status %d (%s)\n",
2571 ep->hwtid, req->status, neg_adv_str(req->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002572 return 0;
2573 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002574 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2575 ep->com.state);
Vipul Pandya793dad92012-12-10 09:30:56 +00002576 set_bit(PEER_ABORT, &ep->com.history);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002577
2578 /*
2579 * Wake up any threads in rdma_init() or rdma_fini().
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302580 * However, this is not needed if com state is just
2581 * MPA_REQ_SENT
Steve Wise2f5b48c2010-09-10 11:15:36 -05002582 */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302583 if (ep->com.state != MPA_REQ_SENT)
2584 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002585
2586 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002587 switch (ep->com.state) {
2588 case CONNECTING:
2589 break;
2590 case MPA_REQ_WAIT:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002591 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002592 break;
2593 case MPA_REQ_SENT:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002594 (void)stop_ep_timer(ep);
Vipul Pandyafe7e0a42013-01-07 13:11:57 +00002595 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302596 connect_reply_upcall(ep, -ECONNRESET);
2597 else {
2598 /*
2599 * we just don't send notification upwards because we
2600 * want to retry with mpa_v1 without upper layers even
2601 * knowing it.
2602 *
2603 * do some housekeeping so as to re-initiate the
2604 * connection
2605 */
2606 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2607 mpa_rev);
2608 ep->retry_with_mpa_v1 = 1;
2609 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002610 break;
2611 case MPA_REP_SENT:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002612 break;
2613 case MPA_REQ_RCVD:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002614 break;
2615 case MORIBUND:
2616 case CLOSING:
Steve Wiseca5a2202010-07-23 19:12:37 +00002617 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002618 /*FALLTHROUGH*/
2619 case FPDU_MODE:
2620 if (ep->com.cm_id && ep->com.qp) {
2621 attrs.next_state = C4IW_QP_STATE_ERROR;
2622 ret = c4iw_modify_qp(ep->com.qp->rhp,
2623 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2624 &attrs, 1);
2625 if (ret)
2626 printk(KERN_ERR MOD
2627 "%s - qp <- error failed!\n",
2628 __func__);
2629 }
2630 peer_abort_upcall(ep);
2631 break;
2632 case ABORTING:
2633 break;
2634 case DEAD:
2635 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002636 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002637 return 0;
2638 default:
2639 BUG_ON(1);
2640 break;
2641 }
2642 dst_confirm(ep->dst);
2643 if (ep->com.state != ABORTING) {
2644 __state_set(&ep->com, DEAD);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302645 /* we don't release if we want to retry with mpa_v1 */
2646 if (!ep->retry_with_mpa_v1)
2647 release = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002648 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002649 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002650
2651 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2652 if (!rpl_skb) {
2653 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2654 __func__);
2655 release = 1;
2656 goto out;
2657 }
2658 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2659 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2660 INIT_TP_WR(rpl, ep->hwtid);
2661 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2662 rpl->cmd = CPL_ABORT_NO_RST;
2663 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2664out:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002665 if (release)
2666 release_ep_resources(ep);
Vipul Pandyafe7e0a42013-01-07 13:11:57 +00002667 else if (ep->retry_with_mpa_v1) {
2668 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302669 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2670 dst_release(ep->dst);
2671 cxgb4_l2t_release(ep->l2t);
2672 c4iw_reconnect(ep);
2673 }
2674
Steve Wisecfdda9d2010-04-21 15:30:06 -07002675 return 0;
2676}
2677
2678static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2679{
2680 struct c4iw_ep *ep;
2681 struct c4iw_qp_attributes attrs;
2682 struct cpl_close_con_rpl *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002683 int release = 0;
2684 struct tid_info *t = dev->rdev.lldi.tids;
2685 unsigned int tid = GET_TID(rpl);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002686
2687 ep = lookup_tid(t, tid);
2688
2689 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2690 BUG_ON(!ep);
2691
2692 /* The cm_id may be null if we failed to connect */
Steve Wise2f5b48c2010-09-10 11:15:36 -05002693 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002694 switch (ep->com.state) {
2695 case CLOSING:
2696 __state_set(&ep->com, MORIBUND);
2697 break;
2698 case MORIBUND:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002699 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002700 if ((ep->com.cm_id) && (ep->com.qp)) {
2701 attrs.next_state = C4IW_QP_STATE_IDLE;
2702 c4iw_modify_qp(ep->com.qp->rhp,
2703 ep->com.qp,
2704 C4IW_QP_ATTR_NEXT_STATE,
2705 &attrs, 1);
2706 }
Steve Wisebe13b2d2014-03-21 20:40:33 +05302707 close_complete_upcall(ep, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002708 __state_set(&ep->com, DEAD);
2709 release = 1;
2710 break;
2711 case ABORTING:
2712 case DEAD:
2713 break;
2714 default:
2715 BUG_ON(1);
2716 break;
2717 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002718 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002719 if (release)
2720 release_ep_resources(ep);
2721 return 0;
2722}
2723
2724static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2725{
Steve Wise0e42c1f2010-09-10 11:15:09 -05002726 struct cpl_rdma_terminate *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002727 struct tid_info *t = dev->rdev.lldi.tids;
Steve Wise0e42c1f2010-09-10 11:15:09 -05002728 unsigned int tid = GET_TID(rpl);
2729 struct c4iw_ep *ep;
2730 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002731
2732 ep = lookup_tid(t, tid);
Steve Wise0e42c1f2010-09-10 11:15:09 -05002733 BUG_ON(!ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002734
Steve Wise30c95c22011-05-09 22:06:22 -07002735 if (ep && ep->com.qp) {
Steve Wise0e42c1f2010-09-10 11:15:09 -05002736 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2737 ep->com.qp->wq.sq.qid);
2738 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2739 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2740 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2741 } else
Steve Wise30c95c22011-05-09 22:06:22 -07002742 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002743
Steve Wisecfdda9d2010-04-21 15:30:06 -07002744 return 0;
2745}
2746
2747/*
2748 * Upcall from the adapter indicating data has been transmitted.
2749 * For us its just the single MPA request or reply. We can now free
2750 * the skb holding the mpa message.
2751 */
2752static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2753{
2754 struct c4iw_ep *ep;
2755 struct cpl_fw4_ack *hdr = cplhdr(skb);
2756 u8 credits = hdr->credits;
2757 unsigned int tid = GET_TID(hdr);
2758 struct tid_info *t = dev->rdev.lldi.tids;
2759
2760
2761 ep = lookup_tid(t, tid);
2762 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2763 if (credits == 0) {
Joe Perchesaa1ad262010-10-25 19:44:22 -07002764 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2765 __func__, ep, ep->hwtid, state_read(&ep->com));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002766 return 0;
2767 }
2768
2769 dst_confirm(ep->dst);
2770 if (ep->mpa_skb) {
2771 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2772 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2773 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2774 kfree_skb(ep->mpa_skb);
2775 ep->mpa_skb = NULL;
2776 }
2777 return 0;
2778}
2779
Steve Wisecfdda9d2010-04-21 15:30:06 -07002780int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2781{
Steve Wisea7db89e2014-03-21 20:40:35 +05302782 int err = 0;
2783 int disconnect = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002784 struct c4iw_ep *ep = to_ep(cm_id);
2785 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2786
Steve Wisea7db89e2014-03-21 20:40:35 +05302787 mutex_lock(&ep->com.mutex);
2788 if (ep->com.state == DEAD) {
2789 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002790 c4iw_put_ep(&ep->com);
2791 return -ECONNRESET;
2792 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002793 set_bit(ULP_REJECT, &ep->com.history);
Steve Wisea7db89e2014-03-21 20:40:35 +05302794 BUG_ON(ep->com.state != MPA_REQ_RCVD);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002795 if (mpa_rev == 0)
2796 abort_connection(ep, NULL, GFP_KERNEL);
2797 else {
2798 err = send_mpa_reject(ep, pdata, pdata_len);
Steve Wisea7db89e2014-03-21 20:40:35 +05302799 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002800 }
Steve Wisea7db89e2014-03-21 20:40:35 +05302801 mutex_unlock(&ep->com.mutex);
2802 if (disconnect)
2803 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002804 c4iw_put_ep(&ep->com);
2805 return 0;
2806}
2807
2808int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2809{
2810 int err;
2811 struct c4iw_qp_attributes attrs;
2812 enum c4iw_qp_attr_mask mask;
2813 struct c4iw_ep *ep = to_ep(cm_id);
2814 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2815 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2816
2817 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wisea7db89e2014-03-21 20:40:35 +05302818
2819 mutex_lock(&ep->com.mutex);
2820 if (ep->com.state == DEAD) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002821 err = -ECONNRESET;
2822 goto err;
2823 }
2824
Steve Wisea7db89e2014-03-21 20:40:35 +05302825 BUG_ON(ep->com.state != MPA_REQ_RCVD);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002826 BUG_ON(!qp);
2827
Vipul Pandya793dad92012-12-10 09:30:56 +00002828 set_bit(ULP_ACCEPT, &ep->com.history);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302829 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
2830 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002831 abort_connection(ep, NULL, GFP_KERNEL);
2832 err = -EINVAL;
2833 goto err;
2834 }
2835
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302836 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2837 if (conn_param->ord > ep->ird) {
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302838 if (RELAXED_IRD_NEGOTIATION) {
2839 ep->ord = ep->ird;
2840 } else {
2841 ep->ird = conn_param->ird;
2842 ep->ord = conn_param->ord;
2843 send_mpa_reject(ep, conn_param->private_data,
2844 conn_param->private_data_len);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302845 abort_connection(ep, NULL, GFP_KERNEL);
2846 err = -ENOMEM;
2847 goto err;
2848 }
2849 }
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302850 if (conn_param->ird < ep->ord) {
2851 if (RELAXED_IRD_NEGOTIATION &&
2852 ep->ord <= h->rdev.lldi.max_ordird_qp) {
2853 conn_param->ird = ep->ord;
2854 } else {
2855 abort_connection(ep, NULL, GFP_KERNEL);
2856 err = -ENOMEM;
2857 goto err;
2858 }
2859 }
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302860 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002861 ep->ird = conn_param->ird;
2862 ep->ord = conn_param->ord;
2863
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302864 if (ep->mpa_attr.version == 1) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302865 if (peer2peer && ep->ird == 0)
2866 ep->ird = 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302867 } else {
2868 if (peer2peer &&
2869 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2870 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
2871 ep->ird = 1;
2872 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002873
2874 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2875
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302876 cm_id->add_ref(cm_id);
2877 ep->com.cm_id = cm_id;
2878 ep->com.qp = qp;
Vipul Pandya325abea2013-01-07 13:11:53 +00002879 ref_qp(ep);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302880
Steve Wisecfdda9d2010-04-21 15:30:06 -07002881 /* bind QP to EP and move to RTS */
2882 attrs.mpa_attr = ep->mpa_attr;
2883 attrs.max_ird = ep->ird;
2884 attrs.max_ord = ep->ord;
2885 attrs.llp_stream_handle = ep;
2886 attrs.next_state = C4IW_QP_STATE_RTS;
2887
2888 /* bind QP and TID with INIT_WR */
2889 mask = C4IW_QP_ATTR_NEXT_STATE |
2890 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2891 C4IW_QP_ATTR_MPA_ATTR |
2892 C4IW_QP_ATTR_MAX_IRD |
2893 C4IW_QP_ATTR_MAX_ORD;
2894
2895 err = c4iw_modify_qp(ep->com.qp->rhp,
2896 ep->com.qp, mask, &attrs, 1);
2897 if (err)
2898 goto err1;
2899 err = send_mpa_reply(ep, conn_param->private_data,
2900 conn_param->private_data_len);
2901 if (err)
2902 goto err1;
2903
Steve Wisea7db89e2014-03-21 20:40:35 +05302904 __state_set(&ep->com, FPDU_MODE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002905 established_upcall(ep);
Steve Wisea7db89e2014-03-21 20:40:35 +05302906 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002907 c4iw_put_ep(&ep->com);
2908 return 0;
2909err1:
2910 ep->com.cm_id = NULL;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302911 abort_connection(ep, NULL, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002912 cm_id->rem_ref(cm_id);
2913err:
Steve Wisea7db89e2014-03-21 20:40:35 +05302914 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002915 c4iw_put_ep(&ep->com);
2916 return err;
2917}
2918
Vipul Pandya830662f2013-07-04 16:10:47 +05302919static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2920{
2921 struct in_device *ind;
2922 int found = 0;
2923 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
2924 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
2925
2926 ind = in_dev_get(dev->rdev.lldi.ports[0]);
2927 if (!ind)
2928 return -EADDRNOTAVAIL;
2929 for_primary_ifa(ind) {
2930 laddr->sin_addr.s_addr = ifa->ifa_address;
2931 raddr->sin_addr.s_addr = ifa->ifa_address;
2932 found = 1;
2933 break;
2934 }
2935 endfor_ifa(ind);
2936 in_dev_put(ind);
2937 return found ? 0 : -EADDRNOTAVAIL;
2938}
2939
2940static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
2941 unsigned char banned_flags)
2942{
2943 struct inet6_dev *idev;
2944 int err = -EADDRNOTAVAIL;
2945
2946 rcu_read_lock();
2947 idev = __in6_dev_get(dev);
2948 if (idev != NULL) {
2949 struct inet6_ifaddr *ifp;
2950
2951 read_lock_bh(&idev->lock);
2952 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2953 if (ifp->scope == IFA_LINK &&
2954 !(ifp->flags & banned_flags)) {
2955 memcpy(addr, &ifp->addr, 16);
2956 err = 0;
2957 break;
2958 }
2959 }
2960 read_unlock_bh(&idev->lock);
2961 }
2962 rcu_read_unlock();
2963 return err;
2964}
2965
2966static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2967{
2968 struct in6_addr uninitialized_var(addr);
2969 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
2970 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
2971
2972 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
2973 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
2974 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
2975 return 0;
2976 }
2977 return -EADDRNOTAVAIL;
2978}
2979
Steve Wisecfdda9d2010-04-21 15:30:06 -07002980int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2981{
Steve Wisecfdda9d2010-04-21 15:30:06 -07002982 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2983 struct c4iw_ep *ep;
David Miller3786cf12011-12-02 16:52:31 +00002984 int err = 0;
Steve Wise9eccfe12014-03-26 17:08:09 -05002985 struct sockaddr_in *laddr;
2986 struct sockaddr_in *raddr;
2987 struct sockaddr_in6 *laddr6;
2988 struct sockaddr_in6 *raddr6;
2989 struct iwpm_dev_data pm_reg_msg;
2990 struct iwpm_sa_data pm_msg;
Vipul Pandya830662f2013-07-04 16:10:47 +05302991 __u8 *ra;
2992 int iptype;
Steve Wise9eccfe12014-03-26 17:08:09 -05002993 int iwpm_err = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002994
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302995 if ((conn_param->ord > cur_max_read_depth(dev)) ||
2996 (conn_param->ird > cur_max_read_depth(dev))) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002997 err = -EINVAL;
2998 goto out;
2999 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07003000 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3001 if (!ep) {
3002 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3003 err = -ENOMEM;
3004 goto out;
3005 }
3006 init_timer(&ep->timer);
3007 ep->plen = conn_param->private_data_len;
3008 if (ep->plen)
3009 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3010 conn_param->private_data, ep->plen);
3011 ep->ird = conn_param->ird;
3012 ep->ord = conn_param->ord;
3013
3014 if (peer2peer && ep->ord == 0)
3015 ep->ord = 1;
3016
3017 cm_id->add_ref(cm_id);
3018 ep->com.dev = dev;
3019 ep->com.cm_id = cm_id;
3020 ep->com.qp = get_qhp(dev, conn_param->qpn);
Vipul Pandya830662f2013-07-04 16:10:47 +05303021 if (!ep->com.qp) {
3022 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
3023 err = -EINVAL;
Steve Wise9eccfe12014-03-26 17:08:09 -05003024 goto fail1;
Vipul Pandya830662f2013-07-04 16:10:47 +05303025 }
Vipul Pandya325abea2013-01-07 13:11:53 +00003026 ref_qp(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003027 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
3028 ep->com.qp, cm_id);
3029
3030 /*
3031 * Allocate an active TID to initiate a TCP connection.
3032 */
3033 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3034 if (ep->atid == -1) {
3035 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
3036 err = -ENOMEM;
Steve Wise9eccfe12014-03-26 17:08:09 -05003037 goto fail1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003038 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003039 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003040
Steve Wise9eccfe12014-03-26 17:08:09 -05003041 memcpy(&ep->com.local_addr, &cm_id->local_addr,
3042 sizeof(ep->com.local_addr));
3043 memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
3044 sizeof(ep->com.remote_addr));
3045
3046 /* No port mapper available, go with the specified peer information */
3047 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
3048 sizeof(ep->com.mapped_local_addr));
3049 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
3050 sizeof(ep->com.mapped_remote_addr));
3051
3052 c4iw_form_reg_msg(dev, &pm_reg_msg);
3053 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
3054 if (iwpm_err) {
3055 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3056 __func__, iwpm_err);
3057 }
3058 if (iwpm_valid_pid() && !iwpm_err) {
3059 c4iw_form_pm_msg(ep, &pm_msg);
3060 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
3061 if (iwpm_err)
3062 PDBG("%s: Port Mapper query fail (err = %d).\n",
3063 __func__, iwpm_err);
3064 else
3065 c4iw_record_pm_msg(ep, &pm_msg);
3066 }
3067 if (iwpm_create_mapinfo(&ep->com.local_addr,
3068 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
3069 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
3070 err = -ENOMEM;
3071 goto fail1;
3072 }
3073 print_addr(&ep->com, __func__, "add_query/create_mapinfo");
3074 set_bit(RELEASE_MAPINFO, &ep->com.flags);
3075
3076 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
3077 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
3078 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
3079 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
3080
Vipul Pandya830662f2013-07-04 16:10:47 +05303081 if (cm_id->remote_addr.ss_family == AF_INET) {
3082 iptype = 4;
3083 ra = (__u8 *)&raddr->sin_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003084
Vipul Pandya830662f2013-07-04 16:10:47 +05303085 /*
3086 * Handle loopback requests to INADDR_ANY.
3087 */
3088 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
3089 err = pick_local_ipaddrs(dev, cm_id);
3090 if (err)
Steve Wise9eccfe12014-03-26 17:08:09 -05003091 goto fail1;
Vipul Pandya830662f2013-07-04 16:10:47 +05303092 }
3093
3094 /* find a route */
3095 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3096 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
3097 ra, ntohs(raddr->sin_port));
3098 ep->dst = find_route(dev, laddr->sin_addr.s_addr,
3099 raddr->sin_addr.s_addr, laddr->sin_port,
3100 raddr->sin_port, 0);
3101 } else {
3102 iptype = 6;
3103 ra = (__u8 *)&raddr6->sin6_addr;
3104
3105 /*
3106 * Handle loopback requests to INADDR_ANY.
3107 */
3108 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3109 err = pick_local_ip6addrs(dev, cm_id);
3110 if (err)
Steve Wise9eccfe12014-03-26 17:08:09 -05003111 goto fail1;
Vipul Pandya830662f2013-07-04 16:10:47 +05303112 }
3113
3114 /* find a route */
3115 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3116 __func__, laddr6->sin6_addr.s6_addr,
3117 ntohs(laddr6->sin6_port),
3118 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3119 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
3120 raddr6->sin6_addr.s6_addr,
3121 laddr6->sin6_port, raddr6->sin6_port, 0,
3122 raddr6->sin6_scope_id);
3123 }
3124 if (!ep->dst) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07003125 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
3126 err = -EHOSTUNREACH;
Steve Wise9eccfe12014-03-26 17:08:09 -05003127 goto fail2;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003128 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07003129
Vipul Pandya830662f2013-07-04 16:10:47 +05303130 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
David Miller3786cf12011-12-02 16:52:31 +00003131 if (err) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07003132 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
Steve Wise9eccfe12014-03-26 17:08:09 -05003133 goto fail3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003134 }
3135
3136 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3137 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3138 ep->l2t->idx);
3139
3140 state_set(&ep->com, CONNECTING);
3141 ep->tos = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003142
3143 /* send connect request to rnic */
3144 err = send_connect(ep);
3145 if (!err)
3146 goto out;
3147
3148 cxgb4_l2t_release(ep->l2t);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003149fail3:
Steve Wise9eccfe12014-03-26 17:08:09 -05003150 dst_release(ep->dst);
3151fail2:
Vipul Pandya793dad92012-12-10 09:30:56 +00003152 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003153 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
Steve Wise9eccfe12014-03-26 17:08:09 -05003154fail1:
Steve Wisecfdda9d2010-04-21 15:30:06 -07003155 cm_id->rem_ref(cm_id);
3156 c4iw_put_ep(&ep->com);
3157out:
3158 return err;
3159}
3160
Vipul Pandya830662f2013-07-04 16:10:47 +05303161static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3162{
3163 int err;
Steve Wise9eccfe12014-03-26 17:08:09 -05003164 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3165 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05303166
3167 c4iw_init_wr_wait(&ep->com.wr_wait);
3168 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3169 ep->stid, &sin6->sin6_addr,
3170 sin6->sin6_port,
3171 ep->com.dev->rdev.lldi.rxq_ids[0]);
3172 if (!err)
3173 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3174 &ep->com.wr_wait,
3175 0, 0, __func__);
Hariprasad Se6b11162014-12-08 15:02:47 +05303176 else if (err > 0)
3177 err = net_xmit_errno(err);
Vipul Pandya830662f2013-07-04 16:10:47 +05303178 if (err)
3179 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3180 err, ep->stid,
3181 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3182 return err;
3183}
3184
3185static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3186{
3187 int err;
Steve Wise9eccfe12014-03-26 17:08:09 -05003188 struct sockaddr_in *sin = (struct sockaddr_in *)
3189 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05303190
3191 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3192 do {
3193 err = cxgb4_create_server_filter(
3194 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3195 sin->sin_addr.s_addr, sin->sin_port, 0,
3196 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3197 if (err == -EBUSY) {
3198 set_current_state(TASK_UNINTERRUPTIBLE);
3199 schedule_timeout(usecs_to_jiffies(100));
3200 }
3201 } while (err == -EBUSY);
3202 } else {
3203 c4iw_init_wr_wait(&ep->com.wr_wait);
3204 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3205 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3206 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3207 if (!err)
3208 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3209 &ep->com.wr_wait,
3210 0, 0, __func__);
Hariprasad Se6b11162014-12-08 15:02:47 +05303211 else if (err > 0)
3212 err = net_xmit_errno(err);
Vipul Pandya830662f2013-07-04 16:10:47 +05303213 }
3214 if (err)
3215 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3216 , err, ep->stid,
3217 &sin->sin_addr, ntohs(sin->sin_port));
3218 return err;
3219}
3220
Steve Wisecfdda9d2010-04-21 15:30:06 -07003221int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3222{
3223 int err = 0;
3224 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3225 struct c4iw_listen_ep *ep;
Steve Wise9eccfe12014-03-26 17:08:09 -05003226 struct iwpm_dev_data pm_reg_msg;
3227 struct iwpm_sa_data pm_msg;
3228 int iwpm_err = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003229
Steve Wisecfdda9d2010-04-21 15:30:06 -07003230 might_sleep();
3231
3232 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3233 if (!ep) {
3234 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3235 err = -ENOMEM;
3236 goto fail1;
3237 }
3238 PDBG("%s ep %p\n", __func__, ep);
3239 cm_id->add_ref(cm_id);
3240 ep->com.cm_id = cm_id;
3241 ep->com.dev = dev;
3242 ep->backlog = backlog;
Steve Wise24d44a32013-07-04 16:10:44 +05303243 memcpy(&ep->com.local_addr, &cm_id->local_addr,
3244 sizeof(ep->com.local_addr));
Steve Wisecfdda9d2010-04-21 15:30:06 -07003245
3246 /*
3247 * Allocate a server TID.
3248 */
Kumar Sanghvi8c044692013-12-18 16:38:25 +05303249 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3250 ep->com.local_addr.ss_family == AF_INET)
Vipul Pandya830662f2013-07-04 16:10:47 +05303251 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3252 cm_id->local_addr.ss_family, ep);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003253 else
Vipul Pandya830662f2013-07-04 16:10:47 +05303254 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3255 cm_id->local_addr.ss_family, ep);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003256
Steve Wisecfdda9d2010-04-21 15:30:06 -07003257 if (ep->stid == -1) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003258 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003259 err = -ENOMEM;
3260 goto fail2;
3261 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003262 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
Steve Wise9eccfe12014-03-26 17:08:09 -05003263
3264 /* No port mapper available, go with the specified info */
3265 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
3266 sizeof(ep->com.mapped_local_addr));
3267
3268 c4iw_form_reg_msg(dev, &pm_reg_msg);
3269 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
3270 if (iwpm_err) {
3271 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3272 __func__, iwpm_err);
3273 }
3274 if (iwpm_valid_pid() && !iwpm_err) {
3275 memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
3276 sizeof(ep->com.local_addr));
3277 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
3278 if (iwpm_err)
3279 PDBG("%s: Port Mapper query fail (err = %d).\n",
3280 __func__, iwpm_err);
3281 else
3282 memcpy(&ep->com.mapped_local_addr,
3283 &pm_msg.mapped_loc_addr,
3284 sizeof(ep->com.mapped_local_addr));
3285 }
3286 if (iwpm_create_mapinfo(&ep->com.local_addr,
3287 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
3288 err = -ENOMEM;
3289 goto fail3;
3290 }
3291 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
3292
3293 set_bit(RELEASE_MAPINFO, &ep->com.flags);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003294 state_set(&ep->com, LISTEN);
Vipul Pandya830662f2013-07-04 16:10:47 +05303295 if (ep->com.local_addr.ss_family == AF_INET)
3296 err = create_server4(dev, ep);
3297 else
3298 err = create_server6(dev, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003299 if (!err) {
3300 cm_id->provider_data = ep;
3301 goto out;
3302 }
Steve Wise9eccfe12014-03-26 17:08:09 -05003303
3304fail3:
Vipul Pandya830662f2013-07-04 16:10:47 +05303305 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3306 ep->com.local_addr.ss_family);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003307fail2:
3308 cm_id->rem_ref(cm_id);
3309 c4iw_put_ep(&ep->com);
3310fail1:
3311out:
3312 return err;
3313}
3314
3315int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3316{
3317 int err;
3318 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3319
3320 PDBG("%s ep %p\n", __func__, ep);
3321
3322 might_sleep();
3323 state_set(&ep->com, DEAD);
Vipul Pandya830662f2013-07-04 16:10:47 +05303324 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3325 ep->com.local_addr.ss_family == AF_INET) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00003326 err = cxgb4_remove_server_filter(
3327 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3328 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3329 } else {
3330 c4iw_init_wr_wait(&ep->com.wr_wait);
Vipul Pandya830662f2013-07-04 16:10:47 +05303331 err = cxgb4_remove_server(
3332 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3333 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003334 if (err)
3335 goto done;
3336 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3337 0, 0, __func__);
3338 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003339 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
Vipul Pandya830662f2013-07-04 16:10:47 +05303340 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3341 ep->com.local_addr.ss_family);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003342done:
Steve Wisecfdda9d2010-04-21 15:30:06 -07003343 cm_id->rem_ref(cm_id);
3344 c4iw_put_ep(&ep->com);
3345 return err;
3346}
3347
3348int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3349{
3350 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003351 int close = 0;
3352 int fatal = 0;
3353 struct c4iw_rdev *rdev;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003354
Steve Wise2f5b48c2010-09-10 11:15:36 -05003355 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003356
3357 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
3358 states[ep->com.state], abrupt);
3359
3360 rdev = &ep->com.dev->rdev;
3361 if (c4iw_fatal_error(rdev)) {
3362 fatal = 1;
Steve Wisebe13b2d2014-03-21 20:40:33 +05303363 close_complete_upcall(ep, -EIO);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003364 ep->com.state = DEAD;
3365 }
3366 switch (ep->com.state) {
3367 case MPA_REQ_WAIT:
3368 case MPA_REQ_SENT:
3369 case MPA_REQ_RCVD:
3370 case MPA_REP_SENT:
3371 case FPDU_MODE:
3372 close = 1;
3373 if (abrupt)
3374 ep->com.state = ABORTING;
3375 else {
3376 ep->com.state = CLOSING;
Steve Wiseca5a2202010-07-23 19:12:37 +00003377 start_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003378 }
3379 set_bit(CLOSE_SENT, &ep->com.flags);
3380 break;
3381 case CLOSING:
3382 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3383 close = 1;
3384 if (abrupt) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003385 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003386 ep->com.state = ABORTING;
3387 } else
3388 ep->com.state = MORIBUND;
3389 }
3390 break;
3391 case MORIBUND:
3392 case ABORTING:
3393 case DEAD:
3394 PDBG("%s ignoring disconnect ep %p state %u\n",
3395 __func__, ep, ep->com.state);
3396 break;
3397 default:
3398 BUG();
3399 break;
3400 }
3401
Steve Wisecfdda9d2010-04-21 15:30:06 -07003402 if (close) {
Steve Wise8da7e7a2011-06-14 20:59:27 +00003403 if (abrupt) {
Vipul Pandya793dad92012-12-10 09:30:56 +00003404 set_bit(EP_DISC_ABORT, &ep->com.history);
Steve Wisebe13b2d2014-03-21 20:40:33 +05303405 close_complete_upcall(ep, -ECONNRESET);
Steve Wise8da7e7a2011-06-14 20:59:27 +00003406 ret = send_abort(ep, NULL, gfp);
Vipul Pandya793dad92012-12-10 09:30:56 +00003407 } else {
3408 set_bit(EP_DISC_CLOSE, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003409 ret = send_halfclose(ep, gfp);
Vipul Pandya793dad92012-12-10 09:30:56 +00003410 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07003411 if (ret)
3412 fatal = 1;
3413 }
Steve Wise8da7e7a2011-06-14 20:59:27 +00003414 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003415 if (fatal)
3416 release_ep_resources(ep);
3417 return ret;
3418}
3419
Vipul Pandya1cab7752012-12-10 09:30:55 +00003420static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3421 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3422{
3423 struct c4iw_ep *ep;
Vipul Pandya793dad92012-12-10 09:30:56 +00003424 int atid = be32_to_cpu(req->tid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003425
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003426 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3427 (__force u32) req->tid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003428 if (!ep)
3429 return;
3430
3431 switch (req->retval) {
3432 case FW_ENOMEM:
Vipul Pandya793dad92012-12-10 09:30:56 +00003433 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3434 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3435 send_fw_act_open_req(ep, atid);
3436 return;
3437 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003438 case FW_EADDRINUSE:
Vipul Pandya793dad92012-12-10 09:30:56 +00003439 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3440 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3441 send_fw_act_open_req(ep, atid);
3442 return;
3443 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003444 break;
3445 default:
3446 pr_info("%s unexpected ofld conn wr retval %d\n",
3447 __func__, req->retval);
3448 break;
3449 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003450 pr_err("active ofld_connect_wr failure %d atid %d\n",
3451 req->retval, atid);
3452 mutex_lock(&dev->rdev.stats.lock);
3453 dev->rdev.stats.act_ofld_conn_fails++;
3454 mutex_unlock(&dev->rdev.stats.lock);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003455 connect_reply_upcall(ep, status2errno(req->retval));
Vipul Pandya793dad92012-12-10 09:30:56 +00003456 state_set(&ep->com, DEAD);
3457 remove_handle(dev, &dev->atid_idr, atid);
3458 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3459 dst_release(ep->dst);
3460 cxgb4_l2t_release(ep->l2t);
3461 c4iw_put_ep(&ep->com);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003462}
3463
3464static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3465 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3466{
3467 struct sk_buff *rpl_skb;
3468 struct cpl_pass_accept_req *cpl;
3469 int ret;
3470
Paul Bolle710a3112013-02-05 20:51:30 +00003471 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003472 BUG_ON(!rpl_skb);
3473 if (req->retval) {
3474 PDBG("%s passive open failure %d\n", __func__, req->retval);
Vipul Pandya793dad92012-12-10 09:30:56 +00003475 mutex_lock(&dev->rdev.stats.lock);
3476 dev->rdev.stats.pas_ofld_conn_fails++;
3477 mutex_unlock(&dev->rdev.stats.lock);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003478 kfree_skb(rpl_skb);
3479 } else {
3480 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3481 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003482 (__force u32) htonl(
3483 (__force u32) req->tid)));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003484 ret = pass_accept_req(dev, rpl_skb);
3485 if (!ret)
3486 kfree_skb(rpl_skb);
3487 }
3488 return;
3489}
3490
3491static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
Steve Wise2f5b48c2010-09-10 11:15:36 -05003492{
3493 struct cpl_fw6_msg *rpl = cplhdr(skb);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003494 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3495
3496 switch (rpl->type) {
3497 case FW6_TYPE_CQE:
3498 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3499 break;
3500 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3501 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3502 switch (req->t_state) {
3503 case TCP_SYN_SENT:
3504 active_ofld_conn_reply(dev, skb, req);
3505 break;
3506 case TCP_SYN_RECV:
3507 passive_ofld_conn_reply(dev, skb, req);
3508 break;
3509 default:
3510 pr_err("%s unexpected ofld conn wr state %d\n",
3511 __func__, req->t_state);
3512 break;
3513 }
3514 break;
3515 }
3516 return 0;
3517}
3518
3519static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3520{
3521 u32 l2info;
Vipul Pandyaf079af72013-03-14 05:08:58 +00003522 u16 vlantag, len, hdr_len, eth_hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003523 u8 intf;
3524 struct cpl_rx_pkt *cpl = cplhdr(skb);
3525 struct cpl_pass_accept_req *req;
3526 struct tcp_options_received tmp_opt;
Vipul Pandyaf079af72013-03-14 05:08:58 +00003527 struct c4iw_dev *dev;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003528
Vipul Pandyaf079af72013-03-14 05:08:58 +00003529 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003530 /* Store values from cpl_rx_pkt in temporary location. */
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003531 vlantag = (__force u16) cpl->vlan;
3532 len = (__force u16) cpl->len;
3533 l2info = (__force u32) cpl->l2info;
3534 hdr_len = (__force u16) cpl->hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003535 intf = cpl->iff;
3536
3537 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3538
3539 /*
3540 * We need to parse the TCP options from SYN packet.
3541 * to generate cpl_pass_accept_req.
3542 */
3543 memset(&tmp_opt, 0, sizeof(tmp_opt));
3544 tcp_clear_options(&tmp_opt);
Christoph Paasch1a2c6182013-03-17 08:23:34 +00003545 tcp_parse_options(skb, &tmp_opt, 0, NULL);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003546
3547 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3548 memset(req, 0, sizeof(*req));
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05303549 req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3550 SYN_MAC_IDX_V(RX_MACIDX_G(
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003551 (__force int) htonl(l2info))) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05303552 SYN_XACT_MATCH_F);
Vipul Pandyaf079af72013-03-14 05:08:58 +00003553 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -08003554 RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
3555 RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05303556 req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003557 (__force int) htonl(l2info))) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05303558 TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003559 (__force int) htons(hdr_len))) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05303560 IP_HDR_LEN_V(RX_IPHDR_LEN_G(
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003561 (__force int) htons(hdr_len))) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05303562 ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003563 req->vlan = (__force __be16) vlantag;
3564 req->len = (__force __be16) len;
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003565 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3566 PASS_OPEN_TOS_V(tos));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003567 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3568 if (tmp_opt.wscale_ok)
3569 req->tcpopt.wsf = tmp_opt.snd_wscale;
3570 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3571 if (tmp_opt.sack_ok)
3572 req->tcpopt.sack = 1;
3573 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3574 return;
3575}
3576
3577static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3578 __be32 laddr, __be16 lport,
3579 __be32 raddr, __be16 rport,
3580 u32 rcv_isn, u32 filter, u16 window,
3581 u32 rss_qid, u8 port_id)
3582{
3583 struct sk_buff *req_skb;
3584 struct fw_ofld_connection_wr *req;
3585 struct cpl_pass_accept_req *cpl = cplhdr(skb);
Steve Wise1ce1d472014-03-21 20:40:31 +05303586 int ret;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003587
3588 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3589 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3590 memset(req, 0, sizeof(*req));
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003591 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05303592 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05303593 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003594 req->le.filter = (__force __be32) filter;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003595 req->le.lport = lport;
3596 req->le.pport = rport;
3597 req->le.u.ipv4.lip = laddr;
3598 req->le.u.ipv4.pip = raddr;
3599 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3600 req->tcb.rcv_adv = htons(window);
3601 req->tcb.t_state_to_astid =
Hariprasad Shenai77a80e22014-11-21 12:52:01 +05303602 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3603 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3604 FW_OFLD_CONNECTION_WR_ASTID_V(
Hariprasad Shenai6c53e932015-01-08 21:38:15 -08003605 PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003606
3607 /*
3608 * We store the qid in opt2 which will be used by the firmware
3609 * to send us the wr response.
3610 */
Anish Bhattd7990b02014-11-12 17:15:57 -08003611 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003612
3613 /*
3614 * We initialize the MSS index in TCB to 0xF.
3615 * So that when driver sends cpl_pass_accept_rpl
3616 * TCB picks up the correct value. If this was 0
3617 * TP will ignore any value > 0 for MSS index.
3618 */
Anish Bhattd7990b02014-11-12 17:15:57 -08003619 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
Hariprasad S6198dd82015-04-22 01:44:59 +05303620 req->cookie = (uintptr_t)skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003621
3622 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
Steve Wise1ce1d472014-03-21 20:40:31 +05303623 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3624 if (ret < 0) {
3625 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3626 ret);
3627 kfree_skb(skb);
3628 kfree_skb(req_skb);
3629 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003630}
3631
3632/*
3633 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3634 * messages when a filter is being used instead of server to
3635 * redirect a syn packet. When packets hit filter they are redirected
3636 * to the offload queue and driver tries to establish the connection
3637 * using firmware work request.
3638 */
3639static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3640{
3641 int stid;
3642 unsigned int filter;
3643 struct ethhdr *eh = NULL;
3644 struct vlan_ethhdr *vlan_eh = NULL;
3645 struct iphdr *iph;
3646 struct tcphdr *tcph;
3647 struct rss_header *rss = (void *)skb->data;
3648 struct cpl_rx_pkt *cpl = (void *)skb->data;
3649 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3650 struct l2t_entry *e;
3651 struct dst_entry *dst;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003652 struct c4iw_ep *lep;
3653 u16 window;
3654 struct port_info *pi;
3655 struct net_device *pdev;
Vipul Pandyaf079af72013-03-14 05:08:58 +00003656 u16 rss_qid, eth_hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003657 int step;
3658 u32 tx_chan;
3659 struct neighbour *neigh;
3660
3661 /* Drop all non-SYN packets */
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -08003662 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
Vipul Pandya1cab7752012-12-10 09:30:55 +00003663 goto reject;
3664
3665 /*
3666 * Drop all packets which did not hit the filter.
3667 * Unlikely to happen.
3668 */
3669 if (!(rss->filter_hit && rss->filter_tid))
3670 goto reject;
3671
3672 /*
3673 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3674 */
Kumar Sanghvia4ea0252013-12-18 16:38:24 +05303675 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003676
3677 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3678 if (!lep) {
3679 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3680 goto reject;
3681 }
3682
Vipul Pandyaf079af72013-03-14 05:08:58 +00003683 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
Hariprasad Shenaibdc590b2015-01-08 21:38:16 -08003684 RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
3685 RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
Vipul Pandyaf079af72013-03-14 05:08:58 +00003686 if (eth_hdr_len == ETH_HLEN) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00003687 eh = (struct ethhdr *)(req + 1);
3688 iph = (struct iphdr *)(eh + 1);
3689 } else {
3690 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3691 iph = (struct iphdr *)(vlan_eh + 1);
3692 skb->vlan_tci = ntohs(cpl->vlan);
3693 }
3694
3695 if (iph->version != 0x4)
3696 goto reject;
3697
3698 tcph = (struct tcphdr *)(iph + 1);
3699 skb_set_network_header(skb, (void *)iph - (void *)rss);
3700 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3701 skb_get(skb);
3702
3703 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3704 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3705 ntohs(tcph->source), iph->tos);
3706
Vipul Pandya830662f2013-07-04 16:10:47 +05303707 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3708 iph->tos);
3709 if (!dst) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00003710 pr_err("%s - failed to find dst entry!\n",
3711 __func__);
3712 goto reject;
3713 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003714 neigh = dst_neigh_lookup_skb(dst, skb);
3715
Zhouyi Zhouaaa0c232013-03-14 17:21:50 +00003716 if (!neigh) {
3717 pr_err("%s - failed to allocate neigh!\n",
3718 __func__);
3719 goto free_dst;
3720 }
3721
Vipul Pandya1cab7752012-12-10 09:30:55 +00003722 if (neigh->dev->flags & IFF_LOOPBACK) {
3723 pdev = ip_dev_find(&init_net, iph->daddr);
3724 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3725 pdev, 0);
3726 pi = (struct port_info *)netdev_priv(pdev);
3727 tx_chan = cxgb4_port_chan(pdev);
3728 dev_put(pdev);
3729 } else {
Vipul Pandya830662f2013-07-04 16:10:47 +05303730 pdev = get_real_dev(neigh->dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003731 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
Vipul Pandya830662f2013-07-04 16:10:47 +05303732 pdev, 0);
3733 pi = (struct port_info *)netdev_priv(pdev);
3734 tx_chan = cxgb4_port_chan(pdev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003735 }
Steve Wiseebf00062014-03-19 17:44:40 +05303736 neigh_release(neigh);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003737 if (!e) {
3738 pr_err("%s - failed to allocate l2t entry!\n",
3739 __func__);
3740 goto free_dst;
3741 }
3742
3743 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3744 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003745 window = (__force u16) htons((__force u16)tcph->window);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003746
3747 /* Calcuate filter portion for LE region. */
Kumar Sanghvi41b4f862013-12-18 16:38:26 +05303748 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3749 dev->rdev.lldi.ports[0],
3750 e));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003751
3752 /*
3753 * Synthesize the cpl_pass_accept_req. We have everything except the
3754 * TID. Once firmware sends a reply with TID we update the TID field
3755 * in cpl and pass it through the regular cpl_pass_accept_req path.
3756 */
3757 build_cpl_pass_accept_req(skb, stid, iph->tos);
3758 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3759 tcph->source, ntohl(tcph->seq), filter, window,
3760 rss_qid, pi->port_id);
3761 cxgb4_l2t_release(e);
3762free_dst:
3763 dst_release(dst);
3764reject:
Steve Wise2f5b48c2010-09-10 11:15:36 -05003765 return 0;
3766}
3767
Steve Wisecfdda9d2010-04-21 15:30:06 -07003768/*
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003769 * These are the real handlers that are called from a
3770 * work queue.
3771 */
3772static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3773 [CPL_ACT_ESTABLISH] = act_establish,
3774 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3775 [CPL_RX_DATA] = rx_data,
3776 [CPL_ABORT_RPL_RSS] = abort_rpl,
3777 [CPL_ABORT_RPL] = abort_rpl,
3778 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3779 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3780 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3781 [CPL_PASS_ESTABLISH] = pass_establish,
3782 [CPL_PEER_CLOSE] = peer_close,
3783 [CPL_ABORT_REQ_RSS] = peer_abort,
3784 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3785 [CPL_RDMA_TERMINATE] = terminate,
Steve Wise2f5b48c2010-09-10 11:15:36 -05003786 [CPL_FW4_ACK] = fw4_ack,
Vipul Pandya1cab7752012-12-10 09:30:55 +00003787 [CPL_FW6_MSG] = deferred_fw6_msg,
3788 [CPL_RX_PKT] = rx_pkt
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003789};
3790
3791static void process_timeout(struct c4iw_ep *ep)
3792{
3793 struct c4iw_qp_attributes attrs;
3794 int abort = 1;
3795
Steve Wise2f5b48c2010-09-10 11:15:36 -05003796 mutex_lock(&ep->com.mutex);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003797 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3798 ep->com.state);
Vipul Pandya793dad92012-12-10 09:30:56 +00003799 set_bit(TIMEDOUT, &ep->com.history);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003800 switch (ep->com.state) {
3801 case MPA_REQ_SENT:
3802 __state_set(&ep->com, ABORTING);
3803 connect_reply_upcall(ep, -ETIMEDOUT);
3804 break;
3805 case MPA_REQ_WAIT:
3806 __state_set(&ep->com, ABORTING);
3807 break;
3808 case CLOSING:
3809 case MORIBUND:
3810 if (ep->com.cm_id && ep->com.qp) {
3811 attrs.next_state = C4IW_QP_STATE_ERROR;
3812 c4iw_modify_qp(ep->com.qp->rhp,
3813 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3814 &attrs, 1);
3815 }
3816 __state_set(&ep->com, ABORTING);
Steve Wisebe13b2d2014-03-21 20:40:33 +05303817 close_complete_upcall(ep, -ETIMEDOUT);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003818 break;
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003819 case ABORTING:
3820 case DEAD:
3821
3822 /*
3823 * These states are expected if the ep timed out at the same
3824 * time as another thread was calling stop_ep_timer().
3825 * So we silently do nothing for these states.
3826 */
3827 abort = 0;
3828 break;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003829 default:
Julia Lawall76f267b2012-11-03 10:58:27 +00003830 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003831 __func__, ep, ep->hwtid, ep->com.state);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003832 abort = 0;
3833 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003834 if (abort)
3835 abort_connection(ep, NULL, GFP_KERNEL);
Steve Wisecc18b932014-04-24 14:31:53 -05003836 mutex_unlock(&ep->com.mutex);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003837 c4iw_put_ep(&ep->com);
3838}
3839
3840static void process_timedout_eps(void)
3841{
3842 struct c4iw_ep *ep;
3843
3844 spin_lock_irq(&timeout_lock);
3845 while (!list_empty(&timeout_list)) {
3846 struct list_head *tmp;
3847
3848 tmp = timeout_list.next;
3849 list_del(tmp);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003850 tmp->next = NULL;
3851 tmp->prev = NULL;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003852 spin_unlock_irq(&timeout_lock);
3853 ep = list_entry(tmp, struct c4iw_ep, entry);
3854 process_timeout(ep);
3855 spin_lock_irq(&timeout_lock);
3856 }
3857 spin_unlock_irq(&timeout_lock);
3858}
3859
3860static void process_work(struct work_struct *work)
3861{
3862 struct sk_buff *skb = NULL;
3863 struct c4iw_dev *dev;
Dan Carpenterc1d73562010-05-31 14:00:53 +00003864 struct cpl_act_establish *rpl;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003865 unsigned int opcode;
3866 int ret;
3867
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003868 process_timedout_eps();
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003869 while ((skb = skb_dequeue(&rxq))) {
3870 rpl = cplhdr(skb);
3871 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3872 opcode = rpl->ot.opcode;
3873
3874 BUG_ON(!work_handlers[opcode]);
3875 ret = work_handlers[opcode](dev, skb);
3876 if (!ret)
3877 kfree_skb(skb);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003878 process_timedout_eps();
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003879 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003880}
3881
3882static DECLARE_WORK(skb_work, process_work);
3883
3884static void ep_timeout(unsigned long arg)
3885{
3886 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003887 int kickit = 0;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003888
3889 spin_lock(&timeout_lock);
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003890 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003891 /*
3892 * Only insert if it is not already on the list.
3893 */
3894 if (!ep->entry.next) {
3895 list_add_tail(&ep->entry, &timeout_list);
3896 kickit = 1;
3897 }
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003898 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003899 spin_unlock(&timeout_lock);
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003900 if (kickit)
3901 queue_work(workq, &skb_work);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003902}
3903
3904/*
Steve Wisecfdda9d2010-04-21 15:30:06 -07003905 * All the CM events are handled on a work queue to have a safe context.
3906 */
3907static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3908{
3909
3910 /*
3911 * Save dev in the skb->cb area.
3912 */
3913 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3914
3915 /*
3916 * Queue the skb and schedule the worker thread.
3917 */
3918 skb_queue_tail(&rxq, skb);
3919 queue_work(workq, &skb_work);
3920 return 0;
3921}
3922
3923static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3924{
3925 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3926
3927 if (rpl->status != CPL_ERR_NONE) {
3928 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3929 "for tid %u\n", rpl->status, GET_TID(rpl));
3930 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05003931 kfree_skb(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003932 return 0;
3933}
3934
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003935static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3936{
3937 struct cpl_fw6_msg *rpl = cplhdr(skb);
3938 struct c4iw_wr_wait *wr_waitp;
3939 int ret;
3940
3941 PDBG("%s type %u\n", __func__, rpl->type);
3942
3943 switch (rpl->type) {
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003944 case FW6_TYPE_WR_RPL:
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003945 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
Roland Dreierc8e081a2010-09-27 17:51:04 -07003946 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003947 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
Steve Wised9594d92011-05-09 22:06:22 -07003948 if (wr_waitp)
3949 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
Steve Wise2f5b48c2010-09-10 11:15:36 -05003950 kfree_skb(skb);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003951 break;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003952 case FW6_TYPE_CQE:
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003953 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
Vipul Pandya1cab7752012-12-10 09:30:55 +00003954 sched(dev, skb);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003955 break;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003956 default:
3957 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3958 rpl->type);
Steve Wise2f5b48c2010-09-10 11:15:36 -05003959 kfree_skb(skb);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003960 break;
3961 }
3962 return 0;
3963}
3964
Steve Wise8da7e7a2011-06-14 20:59:27 +00003965static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3966{
3967 struct cpl_abort_req_rss *req = cplhdr(skb);
3968 struct c4iw_ep *ep;
3969 struct tid_info *t = dev->rdev.lldi.tids;
3970 unsigned int tid = GET_TID(req);
3971
3972 ep = lookup_tid(t, tid);
Steve Wise14b92222012-04-30 15:31:29 -05003973 if (!ep) {
3974 printk(KERN_WARNING MOD
3975 "Abort on non-existent endpoint, tid %d\n", tid);
3976 kfree_skb(skb);
3977 return 0;
3978 }
Steve Wise7a2cea22014-03-14 21:52:07 +05303979 if (is_neg_adv(req->status)) {
Hariprasad Shenaidd92b122014-07-21 20:55:13 +05303980 dev_warn(&dev->rdev.lldi.pdev->dev,
3981 "Negative advice on abort - tid %u status %d (%s)\n",
3982 ep->hwtid, req->status, neg_adv_str(req->status));
Steve Wise8da7e7a2011-06-14 20:59:27 +00003983 kfree_skb(skb);
3984 return 0;
3985 }
3986 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3987 ep->com.state);
3988
3989 /*
3990 * Wake up any threads in rdma_init() or rdma_fini().
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00003991 * However, if we are on MPAv2 and want to retry with MPAv1
3992 * then, don't wake up yet.
Steve Wise8da7e7a2011-06-14 20:59:27 +00003993 */
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00003994 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3995 if (ep->com.state != MPA_REQ_SENT)
3996 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3997 } else
3998 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wise8da7e7a2011-06-14 20:59:27 +00003999 sched(dev, skb);
4000 return 0;
4001}
4002
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07004003/*
4004 * Most upcalls from the T4 Core go to sched() to
4005 * schedule the processing on a work queue.
4006 */
4007c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4008 [CPL_ACT_ESTABLISH] = sched,
4009 [CPL_ACT_OPEN_RPL] = sched,
4010 [CPL_RX_DATA] = sched,
4011 [CPL_ABORT_RPL_RSS] = sched,
4012 [CPL_ABORT_RPL] = sched,
4013 [CPL_PASS_OPEN_RPL] = sched,
4014 [CPL_CLOSE_LISTSRV_RPL] = sched,
4015 [CPL_PASS_ACCEPT_REQ] = sched,
4016 [CPL_PASS_ESTABLISH] = sched,
4017 [CPL_PEER_CLOSE] = sched,
4018 [CPL_CLOSE_CON_RPL] = sched,
Steve Wise8da7e7a2011-06-14 20:59:27 +00004019 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07004020 [CPL_RDMA_TERMINATE] = sched,
4021 [CPL_FW4_ACK] = sched,
4022 [CPL_SET_TCB_RPL] = set_tcb_rpl,
Vipul Pandya1cab7752012-12-10 09:30:55 +00004023 [CPL_FW6_MSG] = fw6_msg,
4024 [CPL_RX_PKT] = sched
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07004025};
4026
Steve Wisecfdda9d2010-04-21 15:30:06 -07004027int __init c4iw_cm_init(void)
4028{
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07004029 spin_lock_init(&timeout_lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07004030 skb_queue_head_init(&rxq);
4031
4032 workq = create_singlethread_workqueue("iw_cxgb4");
4033 if (!workq)
4034 return -ENOMEM;
4035
Steve Wisecfdda9d2010-04-21 15:30:06 -07004036 return 0;
4037}
4038
Steve Wise46c13762014-06-20 14:26:25 -05004039void c4iw_cm_term(void)
Steve Wisecfdda9d2010-04-21 15:30:06 -07004040{
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07004041 WARN_ON(!list_empty(&timeout_list));
Steve Wisecfdda9d2010-04-21 15:30:06 -07004042 flush_workqueue(workq);
4043 destroy_workqueue(workq);
4044}