blob: d62a0f9dd11a4d498282dc22f93cb00639bc52a2 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
Steve Wise9eccfe12014-03-26 17:08:09 -05002 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
Steve Wisecfdda9d2010-04-21 15:30:06 -07003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
Vipul Pandya1cab7752012-12-10 09:30:55 +000041#include <linux/if_vlan.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070042
43#include <net/neighbour.h>
44#include <net/netevent.h>
45#include <net/route.h>
Vipul Pandya1cab7752012-12-10 09:30:55 +000046#include <net/tcp.h>
Vipul Pandya830662f2013-07-04 16:10:47 +053047#include <net/ip6_route.h>
48#include <net/addrconf.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070049
Steve Wise11b8e222014-05-16 12:42:46 -050050#include <rdma/ib_addr.h>
51
Steve Wisecfdda9d2010-04-21 15:30:06 -070052#include "iw_cxgb4.h"
53
54static char *states[] = {
55 "idle",
56 "listen",
57 "connecting",
58 "mpa_wait_req",
59 "mpa_req_sent",
60 "mpa_req_rcvd",
61 "mpa_rep_sent",
62 "fpdu_mode",
63 "aborting",
64 "closing",
65 "moribund",
66 "dead",
67 NULL,
68};
69
Vipul Pandya5be78ee2012-12-10 09:30:54 +000070static int nocong;
71module_param(nocong, int, 0644);
72MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
73
74static int enable_ecn;
75module_param(enable_ecn, int, 0644);
76MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
77
Steve Wiseb52fe092011-03-11 22:30:01 +000078static int dack_mode = 1;
Steve Wiseba6d3922010-06-23 15:46:49 +000079module_param(dack_mode, int, 0644);
Steve Wiseb52fe092011-03-11 22:30:01 +000080MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
Steve Wiseba6d3922010-06-23 15:46:49 +000081
Roland Dreierbe4c9ba2010-05-05 14:45:40 -070082int c4iw_max_read_depth = 8;
83module_param(c4iw_max_read_depth, int, 0644);
84MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
85
Steve Wisecfdda9d2010-04-21 15:30:06 -070086static int enable_tcp_timestamps;
87module_param(enable_tcp_timestamps, int, 0644);
88MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
89
90static int enable_tcp_sack;
91module_param(enable_tcp_sack, int, 0644);
92MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
93
94static int enable_tcp_window_scaling = 1;
95module_param(enable_tcp_window_scaling, int, 0644);
96MODULE_PARM_DESC(enable_tcp_window_scaling,
97 "Enable tcp window scaling (default=1)");
98
99int c4iw_debug;
100module_param(c4iw_debug, int, 0644);
101MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
102
Steve Wisedf2d5132014-03-19 17:44:44 +0530103static int peer2peer = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700104module_param(peer2peer, int, 0644);
Steve Wisedf2d5132014-03-19 17:44:44 +0530105MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700106
107static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
108module_param(p2p_type, int, 0644);
109MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
110 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
111
112static int ep_timeout_secs = 60;
113module_param(ep_timeout_secs, int, 0644);
114MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
115 "in seconds (default=60)");
116
117static int mpa_rev = 1;
118module_param(mpa_rev, int, 0644);
119MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530120 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
121 " compliant (default=1)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700122
123static int markers_enabled;
124module_param(markers_enabled, int, 0644);
125MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
126
127static int crc_enabled = 1;
128module_param(crc_enabled, int, 0644);
129MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
130
131static int rcv_win = 256 * 1024;
132module_param(rcv_win, int, 0644);
133MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
134
Steve Wise98ae68b2010-09-10 11:15:41 -0500135static int snd_win = 128 * 1024;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700136module_param(snd_win, int, 0644);
Steve Wise98ae68b2010-09-10 11:15:41 -0500137MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700138
Steve Wisecfdda9d2010-04-21 15:30:06 -0700139static struct workqueue_struct *workq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700140
141static struct sk_buff_head rxq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700142
143static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
144static void ep_timeout(unsigned long arg);
145static void connect_reply_upcall(struct c4iw_ep *ep, int status);
146
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700147static LIST_HEAD(timeout_list);
148static spinlock_t timeout_lock;
149
Vipul Pandya325abea2013-01-07 13:11:53 +0000150static void deref_qp(struct c4iw_ep *ep)
151{
152 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
153 clear_bit(QP_REFERENCED, &ep->com.flags);
154}
155
156static void ref_qp(struct c4iw_ep *ep)
157{
158 set_bit(QP_REFERENCED, &ep->com.flags);
159 c4iw_qp_add_ref(&ep->com.qp->ibqp);
160}
161
Steve Wisecfdda9d2010-04-21 15:30:06 -0700162static void start_ep_timer(struct c4iw_ep *ep)
163{
164 PDBG("%s ep %p\n", __func__, ep);
165 if (timer_pending(&ep->timer)) {
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000166 pr_err("%s timer already started! ep %p\n",
167 __func__, ep);
168 return;
169 }
170 clear_bit(TIMEOUT, &ep->com.flags);
171 c4iw_get_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700172 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
173 ep->timer.data = (unsigned long)ep;
174 ep->timer.function = ep_timeout;
175 add_timer(&ep->timer);
176}
177
Steve Wiseb33bd0c2014-04-09 09:38:25 -0500178static int stop_ep_timer(struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700179{
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000180 PDBG("%s ep %p stopping\n", __func__, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700181 del_timer_sync(&ep->timer);
Steve Wiseb33bd0c2014-04-09 09:38:25 -0500182 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000183 c4iw_put_ep(&ep->com);
Steve Wiseb33bd0c2014-04-09 09:38:25 -0500184 return 0;
185 }
186 return 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700187}
188
189static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
190 struct l2t_entry *l2e)
191{
192 int error = 0;
193
194 if (c4iw_fatal_error(rdev)) {
195 kfree_skb(skb);
196 PDBG("%s - device in error state - dropping\n", __func__);
197 return -EIO;
198 }
199 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
200 if (error < 0)
201 kfree_skb(skb);
Steve Wise74594862010-09-10 11:14:58 -0500202 return error < 0 ? error : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700203}
204
205int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
206{
207 int error = 0;
208
209 if (c4iw_fatal_error(rdev)) {
210 kfree_skb(skb);
211 PDBG("%s - device in error state - dropping\n", __func__);
212 return -EIO;
213 }
214 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
215 if (error < 0)
216 kfree_skb(skb);
Steve Wise74594862010-09-10 11:14:58 -0500217 return error < 0 ? error : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700218}
219
220static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
221{
222 struct cpl_tid_release *req;
223
224 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
225 if (!skb)
226 return;
227 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
228 INIT_TP_WR(req, hwtid);
229 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
230 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
231 c4iw_ofld_send(rdev, skb);
232 return;
233}
234
235static void set_emss(struct c4iw_ep *ep, u16 opt)
236{
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530237 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
238 sizeof(struct iphdr) - sizeof(struct tcphdr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700239 ep->mss = ep->emss;
240 if (GET_TCPOPT_TSTAMP(opt))
241 ep->emss -= 12;
242 if (ep->emss < 128)
243 ep->emss = 128;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530244 if (ep->emss & 7)
245 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
246 GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700247 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
248 ep->mss, ep->emss);
249}
250
251static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
252{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700253 enum c4iw_ep_state state;
254
Steve Wise2f5b48c2010-09-10 11:15:36 -0500255 mutex_lock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700256 state = epc->state;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500257 mutex_unlock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700258 return state;
259}
260
261static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
262{
263 epc->state = new;
264}
265
266static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
267{
Steve Wise2f5b48c2010-09-10 11:15:36 -0500268 mutex_lock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700269 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
270 __state_set(epc, new);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500271 mutex_unlock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700272 return;
273}
274
275static void *alloc_ep(int size, gfp_t gfp)
276{
277 struct c4iw_ep_common *epc;
278
279 epc = kzalloc(size, gfp);
280 if (epc) {
281 kref_init(&epc->kref);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500282 mutex_init(&epc->mutex);
Steve Wiseaadc4df2010-09-10 11:15:25 -0500283 c4iw_init_wr_wait(&epc->wr_wait);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700284 }
285 PDBG("%s alloc ep %p\n", __func__, epc);
286 return epc;
287}
288
289void _c4iw_free_ep(struct kref *kref)
290{
291 struct c4iw_ep *ep;
292
293 ep = container_of(kref, struct c4iw_ep, com.kref);
294 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
Vipul Pandya325abea2013-01-07 13:11:53 +0000295 if (test_bit(QP_REFERENCED, &ep->com.flags))
296 deref_qp(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700297 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
Vipul Pandyafe7e0a42013-01-07 13:11:57 +0000298 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700299 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
300 dst_release(ep->dst);
301 cxgb4_l2t_release(ep->l2t);
302 }
Steve Wise9eccfe12014-03-26 17:08:09 -0500303 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
304 print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
305 iwpm_remove_mapinfo(&ep->com.local_addr,
306 &ep->com.mapped_local_addr);
307 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
308 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700309 kfree(ep);
310}
311
312static void release_ep_resources(struct c4iw_ep *ep)
313{
314 set_bit(RELEASE_RESOURCES, &ep->com.flags);
315 c4iw_put_ep(&ep->com);
316}
317
Steve Wisecfdda9d2010-04-21 15:30:06 -0700318static int status2errno(int status)
319{
320 switch (status) {
321 case CPL_ERR_NONE:
322 return 0;
323 case CPL_ERR_CONN_RESET:
324 return -ECONNRESET;
325 case CPL_ERR_ARP_MISS:
326 return -EHOSTUNREACH;
327 case CPL_ERR_CONN_TIMEDOUT:
328 return -ETIMEDOUT;
329 case CPL_ERR_TCAM_FULL:
330 return -ENOMEM;
331 case CPL_ERR_CONN_EXIST:
332 return -EADDRINUSE;
333 default:
334 return -EIO;
335 }
336}
337
338/*
339 * Try and reuse skbs already allocated...
340 */
341static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
342{
343 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
344 skb_trim(skb, 0);
345 skb_get(skb);
346 skb_reset_transport_header(skb);
347 } else {
348 skb = alloc_skb(len, gfp);
349 }
Steve Wiseb38a0ad2013-08-06 21:04:37 +0530350 t4_set_arp_err_handler(skb, NULL, NULL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700351 return skb;
352}
353
Vipul Pandya830662f2013-07-04 16:10:47 +0530354static struct net_device *get_real_dev(struct net_device *egress_dev)
355{
Steve Wise11b8e222014-05-16 12:42:46 -0500356 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
Vipul Pandya830662f2013-07-04 16:10:47 +0530357}
358
359static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
360{
361 int i;
362
363 egress_dev = get_real_dev(egress_dev);
364 for (i = 0; i < dev->rdev.lldi.nports; i++)
365 if (dev->rdev.lldi.ports[i] == egress_dev)
366 return 1;
367 return 0;
368}
369
370static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
371 __u8 *peer_ip, __be16 local_port,
372 __be16 peer_port, u8 tos,
373 __u32 sin6_scope_id)
374{
375 struct dst_entry *dst = NULL;
376
377 if (IS_ENABLED(CONFIG_IPV6)) {
378 struct flowi6 fl6;
379
380 memset(&fl6, 0, sizeof(fl6));
381 memcpy(&fl6.daddr, peer_ip, 16);
382 memcpy(&fl6.saddr, local_ip, 16);
383 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
384 fl6.flowi6_oif = sin6_scope_id;
385 dst = ip6_route_output(&init_net, NULL, &fl6);
386 if (!dst)
387 goto out;
388 if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
389 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
390 dst_release(dst);
391 dst = NULL;
392 }
393 }
394
395out:
396 return dst;
397}
398
399static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700400 __be32 peer_ip, __be16 local_port,
401 __be16 peer_port, u8 tos)
402{
403 struct rtable *rt;
David S. Miller31e4543d2011-05-03 20:25:42 -0700404 struct flowi4 fl4;
Vipul Pandya830662f2013-07-04 16:10:47 +0530405 struct neighbour *n;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700406
David S. Miller31e4543d2011-05-03 20:25:42 -0700407 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
David S. Miller78fbfd82011-03-12 00:00:52 -0500408 peer_port, local_port, IPPROTO_TCP,
409 tos, 0);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800410 if (IS_ERR(rt))
Steve Wisecfdda9d2010-04-21 15:30:06 -0700411 return NULL;
Vipul Pandya830662f2013-07-04 16:10:47 +0530412 n = dst_neigh_lookup(&rt->dst, &peer_ip);
413 if (!n)
414 return NULL;
Steve Wisef8e81902014-03-19 17:44:39 +0530415 if (!our_interface(dev, n->dev) &&
416 !(n->dev->flags & IFF_LOOPBACK)) {
Vipul Pandya830662f2013-07-04 16:10:47 +0530417 dst_release(&rt->dst);
418 return NULL;
419 }
420 neigh_release(n);
421 return &rt->dst;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700422}
423
424static void arp_failure_discard(void *handle, struct sk_buff *skb)
425{
426 PDBG("%s c4iw_dev %p\n", __func__, handle);
427 kfree_skb(skb);
428}
429
430/*
431 * Handle an ARP failure for an active open.
432 */
433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
434{
435 printk(KERN_ERR MOD "ARP failure duing connect\n");
436 kfree_skb(skb);
437}
438
439/*
440 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
441 * and send it along.
442 */
443static void abort_arp_failure(void *handle, struct sk_buff *skb)
444{
445 struct c4iw_rdev *rdev = handle;
446 struct cpl_abort_req *req = cplhdr(skb);
447
448 PDBG("%s rdev %p\n", __func__, rdev);
449 req->cmd = CPL_ABORT_NO_RST;
450 c4iw_ofld_send(rdev, skb);
451}
452
453static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
454{
455 unsigned int flowclen = 80;
456 struct fw_flowc_wr *flowc;
457 int i;
458
459 skb = get_skb(skb, flowclen, GFP_KERNEL);
460 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
461
462 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
463 FW_FLOWC_WR_NPARAMS(8));
464 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
465 16)) | FW_WR_FLOWID(ep->hwtid));
466
467 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
Hariprasad Shenai35b1de52014-06-27 19:23:47 +0530468 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
469 (ep->com.dev->rdev.lldi.pf));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700470 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
471 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
472 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
473 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
474 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
475 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
476 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
477 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
478 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
479 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
480 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530481 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700482 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
483 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
484 /* Pad WR to 16 byte boundary */
485 flowc->mnemval[8].mnemonic = 0;
486 flowc->mnemval[8].val = 0;
487 for (i = 0; i < 9; i++) {
488 flowc->mnemval[i].r4[0] = 0;
489 flowc->mnemval[i].r4[1] = 0;
490 flowc->mnemval[i].r4[2] = 0;
491 }
492
493 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
494 c4iw_ofld_send(&ep->com.dev->rdev, skb);
495}
496
497static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
498{
499 struct cpl_close_con_req *req;
500 struct sk_buff *skb;
501 int wrlen = roundup(sizeof *req, 16);
502
503 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
504 skb = get_skb(NULL, wrlen, gfp);
505 if (!skb) {
506 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
507 return -ENOMEM;
508 }
509 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
510 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
511 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
512 memset(req, 0, wrlen);
513 INIT_TP_WR(req, ep->hwtid);
514 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
515 ep->hwtid));
516 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
517}
518
519static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
520{
521 struct cpl_abort_req *req;
522 int wrlen = roundup(sizeof *req, 16);
523
524 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
525 skb = get_skb(skb, wrlen, gfp);
526 if (!skb) {
527 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
528 __func__);
529 return -ENOMEM;
530 }
531 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
532 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
533 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
534 memset(req, 0, wrlen);
535 INIT_TP_WR(req, ep->hwtid);
536 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
537 req->cmd = CPL_ABORT_SEND_RST;
538 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
539}
540
Steve Wise9eccfe12014-03-26 17:08:09 -0500541/*
542 * c4iw_form_pm_msg - Form a port mapper message with mapping info
543 */
544static void c4iw_form_pm_msg(struct c4iw_ep *ep,
545 struct iwpm_sa_data *pm_msg)
546{
547 memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
548 sizeof(ep->com.local_addr));
549 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
550 sizeof(ep->com.remote_addr));
551}
552
553/*
554 * c4iw_form_reg_msg - Form a port mapper message with dev info
555 */
556static void c4iw_form_reg_msg(struct c4iw_dev *dev,
557 struct iwpm_dev_data *pm_msg)
558{
559 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
560 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
561 IWPM_IFNAME_SIZE);
562}
563
564static void c4iw_record_pm_msg(struct c4iw_ep *ep,
565 struct iwpm_sa_data *pm_msg)
566{
567 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
568 sizeof(ep->com.mapped_local_addr));
569 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
570 sizeof(ep->com.mapped_remote_addr));
571}
572
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530573static void best_mtu(const unsigned short *mtus, unsigned short mtu,
574 unsigned int *idx, int use_ts)
575{
576 unsigned short hdr_size = sizeof(struct iphdr) +
577 sizeof(struct tcphdr) +
578 (use_ts ? 12 : 0);
579 unsigned short data_size = mtu - hdr_size;
580
581 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
582}
583
Steve Wisecfdda9d2010-04-21 15:30:06 -0700584static int send_connect(struct c4iw_ep *ep)
585{
586 struct cpl_act_open_req *req;
Vipul Pandyaf079af72013-03-14 05:08:58 +0000587 struct cpl_t5_act_open_req *t5_req;
Vipul Pandya830662f2013-07-04 16:10:47 +0530588 struct cpl_act_open_req6 *req6;
589 struct cpl_t5_act_open_req6 *t5_req6;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700590 struct sk_buff *skb;
591 u64 opt0;
592 u32 opt2;
593 unsigned int mtu_idx;
594 int wscale;
Vipul Pandya830662f2013-07-04 16:10:47 +0530595 int wrlen;
596 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
597 sizeof(struct cpl_act_open_req) :
598 sizeof(struct cpl_t5_act_open_req);
599 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
600 sizeof(struct cpl_act_open_req6) :
601 sizeof(struct cpl_t5_act_open_req6);
Steve Wise9eccfe12014-03-26 17:08:09 -0500602 struct sockaddr_in *la = (struct sockaddr_in *)
603 &ep->com.mapped_local_addr;
604 struct sockaddr_in *ra = (struct sockaddr_in *)
605 &ep->com.mapped_remote_addr;
606 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
607 &ep->com.mapped_local_addr;
608 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
609 &ep->com.mapped_remote_addr;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530610 int win;
Vipul Pandya830662f2013-07-04 16:10:47 +0530611
612 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
613 roundup(sizev4, 16) :
614 roundup(sizev6, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700615
616 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
617
618 skb = get_skb(NULL, wrlen, GFP_KERNEL);
619 if (!skb) {
620 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
621 __func__);
622 return -ENOMEM;
623 }
Steve Wised4f1a5c2010-07-23 19:12:32 +0000624 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700625
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530626 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
627 enable_tcp_timestamps);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700628 wscale = compute_wscale(rcv_win);
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530629
630 /*
631 * Specify the largest window that will fit in opt0. The
632 * remainder will be specified in the rx_data_ack.
633 */
634 win = ep->rcv_win >> 10;
635 if (win > RCV_BUFSIZ_MASK)
636 win = RCV_BUFSIZ_MASK;
637
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000638 opt0 = (nocong ? NO_CONG(1) : 0) |
639 KEEP_ALIVE(1) |
Steve Wiseba6d3922010-06-23 15:46:49 +0000640 DELACK(1) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700641 WND_SCALE(wscale) |
642 MSS_IDX(mtu_idx) |
643 L2T_IDX(ep->l2t->idx) |
644 TX_CHAN(ep->tx_chan) |
645 SMAC_SEL(ep->smac_idx) |
646 DSCP(ep->tos) |
Steve Wiseb48f3b92011-03-11 22:30:21 +0000647 ULP_MODE(ULP_MODE_TCPDDP) |
Hariprasad Shenaib408ff22014-06-06 21:40:44 +0530648 RCV_BUFSIZ(win);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700649 opt2 = RX_CHANNEL(0) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000650 CCTRL_ECN(enable_ecn) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700651 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
652 if (enable_tcp_timestamps)
653 opt2 |= TSTAMPS_EN(1);
654 if (enable_tcp_sack)
655 opt2 |= SACK_EN(1);
656 if (wscale && enable_tcp_window_scaling)
657 opt2 |= WND_SCALE_EN(1);
Steve Wise92e50112014-04-24 14:31:59 -0500658 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
659 opt2 |= T5_OPT_2_VALID;
660 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
661 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700662 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
663
Vipul Pandyaf079af72013-03-14 05:08:58 +0000664 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
Vipul Pandya830662f2013-07-04 16:10:47 +0530665 if (ep->com.remote_addr.ss_family == AF_INET) {
666 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
667 INIT_TP_WR(req, 0);
668 OPCODE_TID(req) = cpu_to_be32(
Vipul Pandyaf079af72013-03-14 05:08:58 +0000669 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
670 ((ep->rss_qid << 14) | ep->atid)));
Vipul Pandya830662f2013-07-04 16:10:47 +0530671 req->local_port = la->sin_port;
672 req->peer_port = ra->sin_port;
673 req->local_ip = la->sin_addr.s_addr;
674 req->peer_ip = ra->sin_addr.s_addr;
675 req->opt0 = cpu_to_be64(opt0);
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530676 req->params = cpu_to_be32(cxgb4_select_ntuple(
677 ep->com.dev->rdev.lldi.ports[0],
678 ep->l2t));
Vipul Pandya830662f2013-07-04 16:10:47 +0530679 req->opt2 = cpu_to_be32(opt2);
680 } else {
681 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
682
683 INIT_TP_WR(req6, 0);
684 OPCODE_TID(req6) = cpu_to_be32(
685 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
686 ((ep->rss_qid<<14)|ep->atid)));
687 req6->local_port = la6->sin6_port;
688 req6->peer_port = ra6->sin6_port;
689 req6->local_ip_hi = *((__be64 *)
690 (la6->sin6_addr.s6_addr));
691 req6->local_ip_lo = *((__be64 *)
692 (la6->sin6_addr.s6_addr + 8));
693 req6->peer_ip_hi = *((__be64 *)
694 (ra6->sin6_addr.s6_addr));
695 req6->peer_ip_lo = *((__be64 *)
696 (ra6->sin6_addr.s6_addr + 8));
697 req6->opt0 = cpu_to_be64(opt0);
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530698 req6->params = cpu_to_be32(cxgb4_select_ntuple(
699 ep->com.dev->rdev.lldi.ports[0],
700 ep->l2t));
Vipul Pandya830662f2013-07-04 16:10:47 +0530701 req6->opt2 = cpu_to_be32(opt2);
702 }
703 } else {
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530704 u32 isn = (prandom_u32() & ~7UL) - 1;
705
706 opt2 |= T5_OPT_2_VALID;
707 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
708 if (peer2peer)
709 isn += 4;
710
Vipul Pandya830662f2013-07-04 16:10:47 +0530711 if (ep->com.remote_addr.ss_family == AF_INET) {
712 t5_req = (struct cpl_t5_act_open_req *)
713 skb_put(skb, wrlen);
714 INIT_TP_WR(t5_req, 0);
715 OPCODE_TID(t5_req) = cpu_to_be32(
716 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
717 ((ep->rss_qid << 14) | ep->atid)));
718 t5_req->local_port = la->sin_port;
719 t5_req->peer_port = ra->sin_port;
720 t5_req->local_ip = la->sin_addr.s_addr;
721 t5_req->peer_ip = ra->sin_addr.s_addr;
722 t5_req->opt0 = cpu_to_be64(opt0);
723 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530724 cxgb4_select_ntuple(
725 ep->com.dev->rdev.lldi.ports[0],
726 ep->l2t)));
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530727 t5_req->rsvd = cpu_to_be32(isn);
728 PDBG("%s snd_isn %u\n", __func__,
729 be32_to_cpu(t5_req->rsvd));
Vipul Pandya830662f2013-07-04 16:10:47 +0530730 t5_req->opt2 = cpu_to_be32(opt2);
731 } else {
732 t5_req6 = (struct cpl_t5_act_open_req6 *)
733 skb_put(skb, wrlen);
734 INIT_TP_WR(t5_req6, 0);
735 OPCODE_TID(t5_req6) = cpu_to_be32(
736 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
737 ((ep->rss_qid<<14)|ep->atid)));
738 t5_req6->local_port = la6->sin6_port;
739 t5_req6->peer_port = ra6->sin6_port;
740 t5_req6->local_ip_hi = *((__be64 *)
741 (la6->sin6_addr.s6_addr));
742 t5_req6->local_ip_lo = *((__be64 *)
743 (la6->sin6_addr.s6_addr + 8));
744 t5_req6->peer_ip_hi = *((__be64 *)
745 (ra6->sin6_addr.s6_addr));
746 t5_req6->peer_ip_lo = *((__be64 *)
747 (ra6->sin6_addr.s6_addr + 8));
748 t5_req6->opt0 = cpu_to_be64(opt0);
749 t5_req6->params = (__force __be64)cpu_to_be32(
Kumar Sanghvi41b4f862013-12-18 16:38:26 +0530750 cxgb4_select_ntuple(
751 ep->com.dev->rdev.lldi.ports[0],
752 ep->l2t));
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +0530753 t5_req6->rsvd = cpu_to_be32(isn);
754 PDBG("%s snd_isn %u\n", __func__,
755 be32_to_cpu(t5_req6->rsvd));
Vipul Pandya830662f2013-07-04 16:10:47 +0530756 t5_req6->opt2 = cpu_to_be32(opt2);
757 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000758 }
759
Vipul Pandya793dad92012-12-10 09:30:56 +0000760 set_bit(ACT_OPEN_REQ, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700761 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
762}
763
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530764static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
765 u8 mpa_rev_to_use)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700766{
767 int mpalen, wrlen;
768 struct fw_ofld_tx_data_wr *req;
769 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530770 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700771
772 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
773
774 BUG_ON(skb_cloned(skb));
775
776 mpalen = sizeof(*mpa) + ep->plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530777 if (mpa_rev_to_use == 2)
778 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700779 wrlen = roundup(mpalen + sizeof *req, 16);
780 skb = get_skb(skb, wrlen, GFP_KERNEL);
781 if (!skb) {
782 connect_reply_upcall(ep, -ENOMEM);
783 return;
784 }
785 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
786
787 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
788 memset(req, 0, wrlen);
789 req->op_to_immdlen = cpu_to_be32(
790 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
791 FW_WR_COMPL(1) |
792 FW_WR_IMMDLEN(mpalen));
793 req->flowid_len16 = cpu_to_be32(
794 FW_WR_FLOWID(ep->hwtid) |
795 FW_WR_LEN16(wrlen >> 4));
796 req->plen = cpu_to_be32(mpalen);
797 req->tunnel_to_proxy = cpu_to_be32(
798 FW_OFLD_TX_DATA_WR_FLUSH(1) |
799 FW_OFLD_TX_DATA_WR_SHOVE(1));
800
801 mpa = (struct mpa_message *)(req + 1);
802 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
803 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530804 (markers_enabled ? MPA_MARKERS : 0) |
805 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700806 mpa->private_data_size = htons(ep->plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530807 mpa->revision = mpa_rev_to_use;
Kumar Sanghvi01b225e2011-11-28 22:09:15 +0530808 if (mpa_rev_to_use == 1) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530809 ep->tried_with_mpa_v1 = 1;
Kumar Sanghvi01b225e2011-11-28 22:09:15 +0530810 ep->retry_with_mpa_v1 = 0;
811 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700812
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530813 if (mpa_rev_to_use == 2) {
Roland Dreierf747c342012-07-05 14:16:54 -0700814 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
815 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530816 mpa_v2_params.ird = htons((u16)ep->ird);
817 mpa_v2_params.ord = htons((u16)ep->ord);
818
819 if (peer2peer) {
820 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
821 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
822 mpa_v2_params.ord |=
823 htons(MPA_V2_RDMA_WRITE_RTR);
824 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
825 mpa_v2_params.ord |=
826 htons(MPA_V2_RDMA_READ_RTR);
827 }
828 memcpy(mpa->private_data, &mpa_v2_params,
829 sizeof(struct mpa_v2_conn_params));
830
831 if (ep->plen)
832 memcpy(mpa->private_data +
833 sizeof(struct mpa_v2_conn_params),
834 ep->mpa_pkt + sizeof(*mpa), ep->plen);
835 } else
836 if (ep->plen)
837 memcpy(mpa->private_data,
838 ep->mpa_pkt + sizeof(*mpa), ep->plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700839
840 /*
841 * Reference the mpa skb. This ensures the data area
842 * will remain in memory until the hw acks the tx.
843 * Function fw4_ack() will deref it.
844 */
845 skb_get(skb);
846 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
847 BUG_ON(ep->mpa_skb);
848 ep->mpa_skb = skb;
849 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
850 start_ep_timer(ep);
Steve Wisea7db89e2014-03-21 20:40:35 +0530851 __state_set(&ep->com, MPA_REQ_SENT);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700852 ep->mpa_attr.initiator = 1;
Steve Wise9c88aa02014-03-21 20:40:34 +0530853 ep->snd_seq += mpalen;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700854 return;
855}
856
857static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
858{
859 int mpalen, wrlen;
860 struct fw_ofld_tx_data_wr *req;
861 struct mpa_message *mpa;
862 struct sk_buff *skb;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530863 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700864
865 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
866
867 mpalen = sizeof(*mpa) + plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530868 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
869 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700870 wrlen = roundup(mpalen + sizeof *req, 16);
871
872 skb = get_skb(NULL, wrlen, GFP_KERNEL);
873 if (!skb) {
874 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
875 return -ENOMEM;
876 }
877 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
878
879 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
880 memset(req, 0, wrlen);
881 req->op_to_immdlen = cpu_to_be32(
882 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
883 FW_WR_COMPL(1) |
884 FW_WR_IMMDLEN(mpalen));
885 req->flowid_len16 = cpu_to_be32(
886 FW_WR_FLOWID(ep->hwtid) |
887 FW_WR_LEN16(wrlen >> 4));
888 req->plen = cpu_to_be32(mpalen);
889 req->tunnel_to_proxy = cpu_to_be32(
890 FW_OFLD_TX_DATA_WR_FLUSH(1) |
891 FW_OFLD_TX_DATA_WR_SHOVE(1));
892
893 mpa = (struct mpa_message *)(req + 1);
894 memset(mpa, 0, sizeof(*mpa));
895 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
896 mpa->flags = MPA_REJECT;
Vipul Pandyafe7e0a42013-01-07 13:11:57 +0000897 mpa->revision = ep->mpa_attr.version;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700898 mpa->private_data_size = htons(plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530899
900 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
901 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
Roland Dreierf747c342012-07-05 14:16:54 -0700902 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
903 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530904 mpa_v2_params.ird = htons(((u16)ep->ird) |
905 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
906 0));
907 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
908 (p2p_type ==
909 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
910 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
911 FW_RI_INIT_P2PTYPE_READ_REQ ?
912 MPA_V2_RDMA_READ_RTR : 0) : 0));
913 memcpy(mpa->private_data, &mpa_v2_params,
914 sizeof(struct mpa_v2_conn_params));
915
916 if (ep->plen)
917 memcpy(mpa->private_data +
918 sizeof(struct mpa_v2_conn_params), pdata, plen);
919 } else
920 if (plen)
921 memcpy(mpa->private_data, pdata, plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700922
923 /*
924 * Reference the mpa skb again. This ensures the data area
925 * will remain in memory until the hw acks the tx.
926 * Function fw4_ack() will deref it.
927 */
928 skb_get(skb);
929 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
930 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
931 BUG_ON(ep->mpa_skb);
932 ep->mpa_skb = skb;
Steve Wise9c88aa02014-03-21 20:40:34 +0530933 ep->snd_seq += mpalen;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700934 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
935}
936
937static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
938{
939 int mpalen, wrlen;
940 struct fw_ofld_tx_data_wr *req;
941 struct mpa_message *mpa;
942 struct sk_buff *skb;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530943 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700944
945 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
946
947 mpalen = sizeof(*mpa) + plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530948 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
949 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700950 wrlen = roundup(mpalen + sizeof *req, 16);
951
952 skb = get_skb(NULL, wrlen, GFP_KERNEL);
953 if (!skb) {
954 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
955 return -ENOMEM;
956 }
957 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
958
959 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
960 memset(req, 0, wrlen);
961 req->op_to_immdlen = cpu_to_be32(
962 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
963 FW_WR_COMPL(1) |
964 FW_WR_IMMDLEN(mpalen));
965 req->flowid_len16 = cpu_to_be32(
966 FW_WR_FLOWID(ep->hwtid) |
967 FW_WR_LEN16(wrlen >> 4));
968 req->plen = cpu_to_be32(mpalen);
969 req->tunnel_to_proxy = cpu_to_be32(
970 FW_OFLD_TX_DATA_WR_FLUSH(1) |
971 FW_OFLD_TX_DATA_WR_SHOVE(1));
972
973 mpa = (struct mpa_message *)(req + 1);
974 memset(mpa, 0, sizeof(*mpa));
975 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
976 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
977 (markers_enabled ? MPA_MARKERS : 0);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530978 mpa->revision = ep->mpa_attr.version;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700979 mpa->private_data_size = htons(plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530980
981 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
982 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
Roland Dreierf747c342012-07-05 14:16:54 -0700983 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
984 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530985 mpa_v2_params.ird = htons((u16)ep->ird);
986 mpa_v2_params.ord = htons((u16)ep->ord);
987 if (peer2peer && (ep->mpa_attr.p2p_type !=
988 FW_RI_INIT_P2PTYPE_DISABLED)) {
989 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
990
991 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
992 mpa_v2_params.ord |=
993 htons(MPA_V2_RDMA_WRITE_RTR);
994 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
995 mpa_v2_params.ord |=
996 htons(MPA_V2_RDMA_READ_RTR);
997 }
998
999 memcpy(mpa->private_data, &mpa_v2_params,
1000 sizeof(struct mpa_v2_conn_params));
1001
1002 if (ep->plen)
1003 memcpy(mpa->private_data +
1004 sizeof(struct mpa_v2_conn_params), pdata, plen);
1005 } else
1006 if (plen)
1007 memcpy(mpa->private_data, pdata, plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001008
1009 /*
1010 * Reference the mpa skb. This ensures the data area
1011 * will remain in memory until the hw acks the tx.
1012 * Function fw4_ack() will deref it.
1013 */
1014 skb_get(skb);
1015 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
1016 ep->mpa_skb = skb;
Steve Wisea7db89e2014-03-21 20:40:35 +05301017 __state_set(&ep->com, MPA_REP_SENT);
Steve Wise9c88aa02014-03-21 20:40:34 +05301018 ep->snd_seq += mpalen;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001019 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1020}
1021
1022static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1023{
1024 struct c4iw_ep *ep;
1025 struct cpl_act_establish *req = cplhdr(skb);
1026 unsigned int tid = GET_TID(req);
1027 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
1028 struct tid_info *t = dev->rdev.lldi.tids;
1029
1030 ep = lookup_atid(t, atid);
1031
1032 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
1033 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1034
Steve Wisea7db89e2014-03-21 20:40:35 +05301035 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001036 dst_confirm(ep->dst);
1037
1038 /* setup the hwtid for this connection */
1039 ep->hwtid = tid;
1040 cxgb4_insert_tid(t, ep, tid);
Vipul Pandya793dad92012-12-10 09:30:56 +00001041 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001042
1043 ep->snd_seq = be32_to_cpu(req->snd_isn);
1044 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1045
1046 set_emss(ep, ntohs(req->tcp_opt));
1047
1048 /* dealloc the atid */
Vipul Pandya793dad92012-12-10 09:30:56 +00001049 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001050 cxgb4_free_atid(t, atid);
Vipul Pandya793dad92012-12-10 09:30:56 +00001051 set_bit(ACT_ESTAB, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001052
1053 /* start MPA negotiation */
1054 send_flowc(ep, NULL);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301055 if (ep->retry_with_mpa_v1)
1056 send_mpa_req(ep, skb, 1);
1057 else
1058 send_mpa_req(ep, skb, mpa_rev);
Steve Wisea7db89e2014-03-21 20:40:35 +05301059 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001060 return 0;
1061}
1062
Steve Wisebe13b2d2014-03-21 20:40:33 +05301063static void close_complete_upcall(struct c4iw_ep *ep, int status)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001064{
1065 struct iw_cm_event event;
1066
1067 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1068 memset(&event, 0, sizeof(event));
1069 event.event = IW_CM_EVENT_CLOSE;
Steve Wisebe13b2d2014-03-21 20:40:33 +05301070 event.status = status;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001071 if (ep->com.cm_id) {
1072 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1073 ep, ep->com.cm_id, ep->hwtid);
1074 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1075 ep->com.cm_id->rem_ref(ep->com.cm_id);
1076 ep->com.cm_id = NULL;
Vipul Pandya793dad92012-12-10 09:30:56 +00001077 set_bit(CLOSE_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001078 }
1079}
1080
1081static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
1082{
1083 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wisecc18b932014-04-24 14:31:53 -05001084 __state_set(&ep->com, ABORTING);
Vipul Pandya793dad92012-12-10 09:30:56 +00001085 set_bit(ABORT_CONN, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001086 return send_abort(ep, skb, gfp);
1087}
1088
1089static void peer_close_upcall(struct c4iw_ep *ep)
1090{
1091 struct iw_cm_event event;
1092
1093 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1094 memset(&event, 0, sizeof(event));
1095 event.event = IW_CM_EVENT_DISCONNECT;
1096 if (ep->com.cm_id) {
1097 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1098 ep, ep->com.cm_id, ep->hwtid);
1099 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
Vipul Pandya793dad92012-12-10 09:30:56 +00001100 set_bit(DISCONN_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001101 }
1102}
1103
1104static void peer_abort_upcall(struct c4iw_ep *ep)
1105{
1106 struct iw_cm_event event;
1107
1108 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1109 memset(&event, 0, sizeof(event));
1110 event.event = IW_CM_EVENT_CLOSE;
1111 event.status = -ECONNRESET;
1112 if (ep->com.cm_id) {
1113 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
1114 ep->com.cm_id, ep->hwtid);
1115 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1116 ep->com.cm_id->rem_ref(ep->com.cm_id);
1117 ep->com.cm_id = NULL;
Vipul Pandya793dad92012-12-10 09:30:56 +00001118 set_bit(ABORT_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001119 }
1120}
1121
1122static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1123{
1124 struct iw_cm_event event;
1125
1126 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
1127 memset(&event, 0, sizeof(event));
1128 event.event = IW_CM_EVENT_CONNECT_REPLY;
1129 event.status = status;
Steve Wise24d44a32013-07-04 16:10:44 +05301130 memcpy(&event.local_addr, &ep->com.local_addr,
1131 sizeof(ep->com.local_addr));
1132 memcpy(&event.remote_addr, &ep->com.remote_addr,
1133 sizeof(ep->com.remote_addr));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001134
1135 if ((status == 0) || (status == -ECONNREFUSED)) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301136 if (!ep->tried_with_mpa_v1) {
1137 /* this means MPA_v2 is used */
1138 event.private_data_len = ep->plen -
1139 sizeof(struct mpa_v2_conn_params);
1140 event.private_data = ep->mpa_pkt +
1141 sizeof(struct mpa_message) +
1142 sizeof(struct mpa_v2_conn_params);
1143 } else {
1144 /* this means MPA_v1 is used */
1145 event.private_data_len = ep->plen;
1146 event.private_data = ep->mpa_pkt +
1147 sizeof(struct mpa_message);
1148 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001149 }
Roland Dreier85963e42010-07-19 13:13:09 -07001150
1151 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
1152 ep->hwtid, status);
Vipul Pandya793dad92012-12-10 09:30:56 +00001153 set_bit(CONN_RPL_UPCALL, &ep->com.history);
Roland Dreier85963e42010-07-19 13:13:09 -07001154 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1155
Steve Wisecfdda9d2010-04-21 15:30:06 -07001156 if (status < 0) {
1157 ep->com.cm_id->rem_ref(ep->com.cm_id);
1158 ep->com.cm_id = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001159 }
1160}
1161
Steve Wisebe13b2d2014-03-21 20:40:33 +05301162static int connect_request_upcall(struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001163{
1164 struct iw_cm_event event;
Steve Wisebe13b2d2014-03-21 20:40:33 +05301165 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001166
1167 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1168 memset(&event, 0, sizeof(event));
1169 event.event = IW_CM_EVENT_CONNECT_REQUEST;
Steve Wise24d44a32013-07-04 16:10:44 +05301170 memcpy(&event.local_addr, &ep->com.local_addr,
1171 sizeof(ep->com.local_addr));
1172 memcpy(&event.remote_addr, &ep->com.remote_addr,
1173 sizeof(ep->com.remote_addr));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001174 event.provider_data = ep;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301175 if (!ep->tried_with_mpa_v1) {
1176 /* this means MPA_v2 is used */
1177 event.ord = ep->ord;
1178 event.ird = ep->ird;
1179 event.private_data_len = ep->plen -
1180 sizeof(struct mpa_v2_conn_params);
1181 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1182 sizeof(struct mpa_v2_conn_params);
1183 } else {
1184 /* this means MPA_v1 is used. Send max supported */
1185 event.ord = c4iw_max_read_depth;
1186 event.ird = c4iw_max_read_depth;
1187 event.private_data_len = ep->plen;
1188 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1189 }
Steve Wisebe13b2d2014-03-21 20:40:33 +05301190 c4iw_get_ep(&ep->com);
1191 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1192 &event);
1193 if (ret)
1194 c4iw_put_ep(&ep->com);
Vipul Pandya793dad92012-12-10 09:30:56 +00001195 set_bit(CONNREQ_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001196 c4iw_put_ep(&ep->parent_ep->com);
Steve Wisebe13b2d2014-03-21 20:40:33 +05301197 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001198}
1199
1200static void established_upcall(struct c4iw_ep *ep)
1201{
1202 struct iw_cm_event event;
1203
1204 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1205 memset(&event, 0, sizeof(event));
1206 event.event = IW_CM_EVENT_ESTABLISHED;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301207 event.ird = ep->ird;
1208 event.ord = ep->ord;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001209 if (ep->com.cm_id) {
1210 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1211 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
Vipul Pandya793dad92012-12-10 09:30:56 +00001212 set_bit(ESTAB_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001213 }
1214}
1215
1216static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1217{
1218 struct cpl_rx_data_ack *req;
1219 struct sk_buff *skb;
1220 int wrlen = roundup(sizeof *req, 16);
1221
1222 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1223 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1224 if (!skb) {
1225 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1226 return 0;
1227 }
1228
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301229 /*
1230 * If we couldn't specify the entire rcv window at connection setup
1231 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1232 * then add the overage in to the credits returned.
1233 */
1234 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
1235 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
1236
Steve Wisecfdda9d2010-04-21 15:30:06 -07001237 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1238 memset(req, 0, wrlen);
1239 INIT_TP_WR(req, ep->hwtid);
1240 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1241 ep->hwtid));
Steve Wiseba6d3922010-06-23 15:46:49 +00001242 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
1243 F_RX_DACK_CHANGE |
1244 V_RX_DACK_MODE(dack_mode));
Steve Wised4f1a5c2010-07-23 19:12:32 +00001245 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001246 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1247 return credits;
1248}
1249
Steve Wisecc18b932014-04-24 14:31:53 -05001250static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001251{
1252 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301253 struct mpa_v2_conn_params *mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001254 u16 plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301255 u16 resp_ird, resp_ord;
1256 u8 rtr_mismatch = 0, insuff_ird = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001257 struct c4iw_qp_attributes attrs;
1258 enum c4iw_qp_attr_mask mask;
1259 int err;
Steve Wisecc18b932014-04-24 14:31:53 -05001260 int disconnect = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001261
1262 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1263
1264 /*
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001265 * Stop mpa timer. If it expired, then
1266 * we ignore the MPA reply. process_timeout()
1267 * will abort the connection.
Steve Wisecfdda9d2010-04-21 15:30:06 -07001268 */
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001269 if (stop_ep_timer(ep))
Steve Wisecc18b932014-04-24 14:31:53 -05001270 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001271
1272 /*
1273 * If we get more than the supported amount of private data
1274 * then we must fail this connection.
1275 */
1276 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1277 err = -EINVAL;
1278 goto err;
1279 }
1280
1281 /*
1282 * copy the new data into our accumulation buffer.
1283 */
1284 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1285 skb->len);
1286 ep->mpa_pkt_len += skb->len;
1287
1288 /*
1289 * if we don't even have the mpa message, then bail.
1290 */
1291 if (ep->mpa_pkt_len < sizeof(*mpa))
Steve Wisecc18b932014-04-24 14:31:53 -05001292 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001293 mpa = (struct mpa_message *) ep->mpa_pkt;
1294
1295 /* Validate MPA header. */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301296 if (mpa->revision > mpa_rev) {
1297 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1298 " Received = %d\n", __func__, mpa_rev, mpa->revision);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001299 err = -EPROTO;
1300 goto err;
1301 }
1302 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1303 err = -EPROTO;
1304 goto err;
1305 }
1306
1307 plen = ntohs(mpa->private_data_size);
1308
1309 /*
1310 * Fail if there's too much private data.
1311 */
1312 if (plen > MPA_MAX_PRIVATE_DATA) {
1313 err = -EPROTO;
1314 goto err;
1315 }
1316
1317 /*
1318 * If plen does not account for pkt size
1319 */
1320 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1321 err = -EPROTO;
1322 goto err;
1323 }
1324
1325 ep->plen = (u8) plen;
1326
1327 /*
1328 * If we don't have all the pdata yet, then bail.
1329 * We'll continue process when more data arrives.
1330 */
1331 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
Steve Wisecc18b932014-04-24 14:31:53 -05001332 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001333
1334 if (mpa->flags & MPA_REJECT) {
1335 err = -ECONNREFUSED;
1336 goto err;
1337 }
1338
1339 /*
1340 * If we get here we have accumulated the entire mpa
1341 * start reply message including private data. And
1342 * the MPA header is valid.
1343 */
Steve Wisec529fb52014-03-21 20:40:37 +05301344 __state_set(&ep->com, FPDU_MODE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001345 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1346 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1347 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301348 ep->mpa_attr.version = mpa->revision;
1349 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1350
1351 if (mpa->revision == 2) {
1352 ep->mpa_attr.enhanced_rdma_conn =
1353 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1354 if (ep->mpa_attr.enhanced_rdma_conn) {
1355 mpa_v2_params = (struct mpa_v2_conn_params *)
1356 (ep->mpa_pkt + sizeof(*mpa));
1357 resp_ird = ntohs(mpa_v2_params->ird) &
1358 MPA_V2_IRD_ORD_MASK;
1359 resp_ord = ntohs(mpa_v2_params->ord) &
1360 MPA_V2_IRD_ORD_MASK;
1361
1362 /*
1363 * This is a double-check. Ideally, below checks are
1364 * not required since ird/ord stuff has been taken
1365 * care of in c4iw_accept_cr
1366 */
1367 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1368 err = -ENOMEM;
1369 ep->ird = resp_ord;
1370 ep->ord = resp_ird;
1371 insuff_ird = 1;
1372 }
1373
1374 if (ntohs(mpa_v2_params->ird) &
1375 MPA_V2_PEER2PEER_MODEL) {
1376 if (ntohs(mpa_v2_params->ord) &
1377 MPA_V2_RDMA_WRITE_RTR)
1378 ep->mpa_attr.p2p_type =
1379 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1380 else if (ntohs(mpa_v2_params->ord) &
1381 MPA_V2_RDMA_READ_RTR)
1382 ep->mpa_attr.p2p_type =
1383 FW_RI_INIT_P2PTYPE_READ_REQ;
1384 }
1385 }
1386 } else if (mpa->revision == 1)
1387 if (peer2peer)
1388 ep->mpa_attr.p2p_type = p2p_type;
1389
Steve Wisecfdda9d2010-04-21 15:30:06 -07001390 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301391 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1392 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1393 ep->mpa_attr.recv_marker_enabled,
1394 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1395 ep->mpa_attr.p2p_type, p2p_type);
1396
1397 /*
1398 * If responder's RTR does not match with that of initiator, assign
1399 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1400 * generated when moving QP to RTS state.
1401 * A TERM message will be sent after QP has moved to RTS state
1402 */
Kumar Sanghvi91018f82012-02-25 17:45:02 -08001403 if ((ep->mpa_attr.version == 2) && peer2peer &&
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301404 (ep->mpa_attr.p2p_type != p2p_type)) {
1405 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1406 rtr_mismatch = 1;
1407 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001408
1409 attrs.mpa_attr = ep->mpa_attr;
1410 attrs.max_ird = ep->ird;
1411 attrs.max_ord = ep->ord;
1412 attrs.llp_stream_handle = ep;
1413 attrs.next_state = C4IW_QP_STATE_RTS;
1414
1415 mask = C4IW_QP_ATTR_NEXT_STATE |
1416 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1417 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1418
1419 /* bind QP and TID with INIT_WR */
1420 err = c4iw_modify_qp(ep->com.qp->rhp,
1421 ep->com.qp, mask, &attrs, 1);
1422 if (err)
1423 goto err;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301424
1425 /*
1426 * If responder's RTR requirement did not match with what initiator
1427 * supports, generate TERM message
1428 */
1429 if (rtr_mismatch) {
1430 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1431 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1432 attrs.ecode = MPA_NOMATCH_RTR;
1433 attrs.next_state = C4IW_QP_STATE_TERMINATE;
Steve Wisecc18b932014-04-24 14:31:53 -05001434 attrs.send_term = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301435 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wisecc18b932014-04-24 14:31:53 -05001436 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301437 err = -ENOMEM;
Steve Wisecc18b932014-04-24 14:31:53 -05001438 disconnect = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301439 goto out;
1440 }
1441
1442 /*
1443 * Generate TERM if initiator IRD is not sufficient for responder
1444 * provided ORD. Currently, we do the same behaviour even when
1445 * responder provided IRD is also not sufficient as regards to
1446 * initiator ORD.
1447 */
1448 if (insuff_ird) {
1449 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1450 __func__);
1451 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1452 attrs.ecode = MPA_INSUFF_IRD;
1453 attrs.next_state = C4IW_QP_STATE_TERMINATE;
Steve Wisecc18b932014-04-24 14:31:53 -05001454 attrs.send_term = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301455 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wisecc18b932014-04-24 14:31:53 -05001456 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301457 err = -ENOMEM;
Steve Wisecc18b932014-04-24 14:31:53 -05001458 disconnect = 1;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301459 goto out;
1460 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001461 goto out;
1462err:
Steve Wisec529fb52014-03-21 20:40:37 +05301463 __state_set(&ep->com, ABORTING);
Steve Wiseb21ef162010-06-10 19:02:55 +00001464 send_abort(ep, skb, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001465out:
1466 connect_reply_upcall(ep, err);
Steve Wisecc18b932014-04-24 14:31:53 -05001467 return disconnect;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001468}
1469
1470static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1471{
1472 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301473 struct mpa_v2_conn_params *mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001474 u16 plen;
1475
1476 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1477
Steve Wisecfdda9d2010-04-21 15:30:06 -07001478 /*
1479 * If we get more than the supported amount of private data
1480 * then we must fail this connection.
1481 */
1482 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001483 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001484 abort_connection(ep, skb, GFP_KERNEL);
1485 return;
1486 }
1487
1488 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1489
1490 /*
1491 * Copy the new data into our accumulation buffer.
1492 */
1493 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1494 skb->len);
1495 ep->mpa_pkt_len += skb->len;
1496
1497 /*
1498 * If we don't even have the mpa message, then bail.
1499 * We'll continue process when more data arrives.
1500 */
1501 if (ep->mpa_pkt_len < sizeof(*mpa))
1502 return;
1503
1504 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001505 mpa = (struct mpa_message *) ep->mpa_pkt;
1506
1507 /*
1508 * Validate MPA Header.
1509 */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301510 if (mpa->revision > mpa_rev) {
1511 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1512 " Received = %d\n", __func__, mpa_rev, mpa->revision);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001513 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001514 abort_connection(ep, skb, GFP_KERNEL);
1515 return;
1516 }
1517
1518 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001519 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001520 abort_connection(ep, skb, GFP_KERNEL);
1521 return;
1522 }
1523
1524 plen = ntohs(mpa->private_data_size);
1525
1526 /*
1527 * Fail if there's too much private data.
1528 */
1529 if (plen > MPA_MAX_PRIVATE_DATA) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001530 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001531 abort_connection(ep, skb, GFP_KERNEL);
1532 return;
1533 }
1534
1535 /*
1536 * If plen does not account for pkt size
1537 */
1538 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001539 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001540 abort_connection(ep, skb, GFP_KERNEL);
1541 return;
1542 }
1543 ep->plen = (u8) plen;
1544
1545 /*
1546 * If we don't have all the pdata yet, then bail.
1547 */
1548 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1549 return;
1550
1551 /*
1552 * If we get here we have accumulated the entire mpa
1553 * start reply message including private data.
1554 */
1555 ep->mpa_attr.initiator = 0;
1556 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1557 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1558 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301559 ep->mpa_attr.version = mpa->revision;
1560 if (mpa->revision == 1)
1561 ep->tried_with_mpa_v1 = 1;
1562 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1563
1564 if (mpa->revision == 2) {
1565 ep->mpa_attr.enhanced_rdma_conn =
1566 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1567 if (ep->mpa_attr.enhanced_rdma_conn) {
1568 mpa_v2_params = (struct mpa_v2_conn_params *)
1569 (ep->mpa_pkt + sizeof(*mpa));
1570 ep->ird = ntohs(mpa_v2_params->ird) &
1571 MPA_V2_IRD_ORD_MASK;
1572 ep->ord = ntohs(mpa_v2_params->ord) &
1573 MPA_V2_IRD_ORD_MASK;
1574 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1575 if (peer2peer) {
1576 if (ntohs(mpa_v2_params->ord) &
1577 MPA_V2_RDMA_WRITE_RTR)
1578 ep->mpa_attr.p2p_type =
1579 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1580 else if (ntohs(mpa_v2_params->ord) &
1581 MPA_V2_RDMA_READ_RTR)
1582 ep->mpa_attr.p2p_type =
1583 FW_RI_INIT_P2PTYPE_READ_REQ;
1584 }
1585 }
1586 } else if (mpa->revision == 1)
1587 if (peer2peer)
1588 ep->mpa_attr.p2p_type = p2p_type;
1589
Steve Wisecfdda9d2010-04-21 15:30:06 -07001590 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1591 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1592 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1593 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1594 ep->mpa_attr.p2p_type);
1595
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001596 /*
1597 * If the endpoint timer already expired, then we ignore
1598 * the start request. process_timeout() will abort
1599 * the connection.
1600 */
1601 if (!stop_ep_timer(ep)) {
1602 __state_set(&ep->com, MPA_REQ_RCVD);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001603
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001604 /* drive upcall */
1605 mutex_lock(&ep->parent_ep->com.mutex);
1606 if (ep->parent_ep->com.state != DEAD) {
1607 if (connect_request_upcall(ep))
1608 abort_connection(ep, skb, GFP_KERNEL);
1609 } else {
Steve Wisebe13b2d2014-03-21 20:40:33 +05301610 abort_connection(ep, skb, GFP_KERNEL);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05001611 }
1612 mutex_unlock(&ep->parent_ep->com.mutex);
Steve Wisebe13b2d2014-03-21 20:40:33 +05301613 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001614 return;
1615}
1616
1617static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1618{
1619 struct c4iw_ep *ep;
1620 struct cpl_rx_data *hdr = cplhdr(skb);
1621 unsigned int dlen = ntohs(hdr->len);
1622 unsigned int tid = GET_TID(hdr);
1623 struct tid_info *t = dev->rdev.lldi.tids;
Vipul Pandya793dad92012-12-10 09:30:56 +00001624 __u8 status = hdr->status;
Steve Wisecc18b932014-04-24 14:31:53 -05001625 int disconnect = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001626
1627 ep = lookup_tid(t, tid);
Steve Wise977116c2014-03-21 20:40:36 +05301628 if (!ep)
1629 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001630 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1631 skb_pull(skb, sizeof(*hdr));
1632 skb_trim(skb, dlen);
Steve Wisec529fb52014-03-21 20:40:37 +05301633 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001634
Steve Wisecfdda9d2010-04-21 15:30:06 -07001635 /* update RX credits */
1636 update_rx_credits(ep, dlen);
1637
Steve Wisec529fb52014-03-21 20:40:37 +05301638 switch (ep->com.state) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001639 case MPA_REQ_SENT:
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001640 ep->rcv_seq += dlen;
Steve Wisecc18b932014-04-24 14:31:53 -05001641 disconnect = process_mpa_reply(ep, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001642 break;
1643 case MPA_REQ_WAIT:
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001644 ep->rcv_seq += dlen;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001645 process_mpa_request(ep, skb);
1646 break;
Vipul Pandya15579672013-01-07 13:11:52 +00001647 case FPDU_MODE: {
1648 struct c4iw_qp_attributes attrs;
1649 BUG_ON(!ep->com.qp);
Vipul Pandyae8e5b922013-01-07 13:11:55 +00001650 if (status)
Vipul Pandya15579672013-01-07 13:11:52 +00001651 pr_err("%s Unexpected streaming data." \
Vipul Pandya04236df2013-01-07 13:11:54 +00001652 " qpid %u ep %p state %d tid %u status %d\n",
1653 __func__, ep->com.qp->wq.sq.qid, ep,
Steve Wisec529fb52014-03-21 20:40:37 +05301654 ep->com.state, ep->hwtid, status);
Steve Wise97d7ec02013-08-06 21:04:34 +05301655 attrs.next_state = C4IW_QP_STATE_TERMINATE;
Vipul Pandya15579672013-01-07 13:11:52 +00001656 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wisecc18b932014-04-24 14:31:53 -05001657 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1658 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001659 break;
1660 }
Vipul Pandya15579672013-01-07 13:11:52 +00001661 default:
1662 break;
1663 }
Steve Wisec529fb52014-03-21 20:40:37 +05301664 mutex_unlock(&ep->com.mutex);
Steve Wisecc18b932014-04-24 14:31:53 -05001665 if (disconnect)
1666 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001667 return 0;
1668}
1669
1670static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1671{
1672 struct c4iw_ep *ep;
1673 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001674 int release = 0;
1675 unsigned int tid = GET_TID(rpl);
1676 struct tid_info *t = dev->rdev.lldi.tids;
1677
1678 ep = lookup_tid(t, tid);
Vipul Pandya49840372012-05-18 15:29:29 +05301679 if (!ep) {
1680 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1681 return 0;
1682 }
Wei Yongjun92dd6c32012-09-07 06:51:23 +00001683 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001684 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001685 switch (ep->com.state) {
1686 case ABORTING:
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001687 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001688 __state_set(&ep->com, DEAD);
1689 release = 1;
1690 break;
1691 default:
1692 printk(KERN_ERR "%s ep %p state %d\n",
1693 __func__, ep, ep->com.state);
1694 break;
1695 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001696 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001697
1698 if (release)
1699 release_ep_resources(ep);
1700 return 0;
1701}
1702
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001703static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1704{
1705 struct sk_buff *skb;
1706 struct fw_ofld_connection_wr *req;
1707 unsigned int mtu_idx;
1708 int wscale;
Vipul Pandya830662f2013-07-04 16:10:47 +05301709 struct sockaddr_in *sin;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301710 int win;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001711
1712 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1713 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1714 memset(req, 0, sizeof(*req));
1715 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1716 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
Kumar Sanghvi41b4f862013-12-18 16:38:26 +05301717 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1718 ep->com.dev->rdev.lldi.ports[0],
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001719 ep->l2t));
Steve Wise9eccfe12014-03-26 17:08:09 -05001720 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05301721 req->le.lport = sin->sin_port;
1722 req->le.u.ipv4.lip = sin->sin_addr.s_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -05001723 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05301724 req->le.pport = sin->sin_port;
1725 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001726 req->tcb.t_state_to_astid =
1727 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1728 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1729 req->tcb.cplrxdataack_cplpassacceptrpl =
1730 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001731 req->tcb.tx_max = (__force __be32) jiffies;
Vipul Pandya793dad92012-12-10 09:30:56 +00001732 req->tcb.rcv_adv = htons(1);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05301733 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1734 enable_tcp_timestamps);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001735 wscale = compute_wscale(rcv_win);
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301736
1737 /*
1738 * Specify the largest window that will fit in opt0. The
1739 * remainder will be specified in the rx_data_ack.
1740 */
1741 win = ep->rcv_win >> 10;
1742 if (win > RCV_BUFSIZ_MASK)
1743 win = RCV_BUFSIZ_MASK;
1744
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001745 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001746 (nocong ? NO_CONG(1) : 0) |
1747 KEEP_ALIVE(1) |
1748 DELACK(1) |
1749 WND_SCALE(wscale) |
1750 MSS_IDX(mtu_idx) |
1751 L2T_IDX(ep->l2t->idx) |
1752 TX_CHAN(ep->tx_chan) |
1753 SMAC_SEL(ep->smac_idx) |
1754 DSCP(ep->tos) |
1755 ULP_MODE(ULP_MODE_TCPDDP) |
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301756 RCV_BUFSIZ(win));
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001757 req->tcb.opt2 = (__force __be32) (PACE(1) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001758 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1759 RX_CHANNEL(0) |
1760 CCTRL_ECN(enable_ecn) |
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001761 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001762 if (enable_tcp_timestamps)
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001763 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001764 if (enable_tcp_sack)
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001765 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001766 if (wscale && enable_tcp_window_scaling)
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001767 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1768 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1769 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
Vipul Pandya793dad92012-12-10 09:30:56 +00001770 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1771 set_bit(ACT_OFLD_CONN, &ep->com.history);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001772 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1773}
1774
Steve Wisecfdda9d2010-04-21 15:30:06 -07001775/*
1776 * Return whether a failed active open has allocated a TID
1777 */
1778static inline int act_open_has_tid(int status)
1779{
1780 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1781 status != CPL_ERR_ARP_MISS;
1782}
1783
Steve Wise7a2cea22014-03-14 21:52:07 +05301784/* Returns whether a CPL status conveys negative advice.
1785 */
1786static int is_neg_adv(unsigned int status)
1787{
1788 return status == CPL_ERR_RTX_NEG_ADVICE ||
1789 status == CPL_ERR_PERSIST_NEG_ADVICE ||
1790 status == CPL_ERR_KEEPALV_NEG_ADVICE;
1791}
1792
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301793static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1794{
1795 ep->snd_win = snd_win;
1796 ep->rcv_win = rcv_win;
1797 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
1798}
1799
Vipul Pandya793dad92012-12-10 09:30:56 +00001800#define ACT_OPEN_RETRY_COUNT 2
1801
Vipul Pandya830662f2013-07-04 16:10:47 +05301802static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
1803 struct dst_entry *dst, struct c4iw_dev *cdev,
1804 bool clear_mpa_v1)
1805{
1806 struct neighbour *n;
1807 int err, step;
1808 struct net_device *pdev;
1809
1810 n = dst_neigh_lookup(dst, peer_ip);
1811 if (!n)
1812 return -ENODEV;
1813
1814 rcu_read_lock();
1815 err = -ENOMEM;
1816 if (n->dev->flags & IFF_LOOPBACK) {
1817 if (iptype == 4)
1818 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
1819 else if (IS_ENABLED(CONFIG_IPV6))
1820 for_each_netdev(&init_net, pdev) {
1821 if (ipv6_chk_addr(&init_net,
1822 (struct in6_addr *)peer_ip,
1823 pdev, 1))
1824 break;
1825 }
1826 else
1827 pdev = NULL;
1828
1829 if (!pdev) {
1830 err = -ENODEV;
1831 goto out;
1832 }
1833 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1834 n, pdev, 0);
1835 if (!ep->l2t)
1836 goto out;
1837 ep->mtu = pdev->mtu;
1838 ep->tx_chan = cxgb4_port_chan(pdev);
1839 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1840 step = cdev->rdev.lldi.ntxq /
1841 cdev->rdev.lldi.nchan;
1842 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1843 step = cdev->rdev.lldi.nrxq /
1844 cdev->rdev.lldi.nchan;
1845 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1846 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1847 cxgb4_port_idx(pdev) * step];
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301848 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
Vipul Pandya830662f2013-07-04 16:10:47 +05301849 dev_put(pdev);
1850 } else {
1851 pdev = get_real_dev(n->dev);
1852 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1853 n, pdev, 0);
1854 if (!ep->l2t)
1855 goto out;
1856 ep->mtu = dst_mtu(dst);
Steve Wise11b8e222014-05-16 12:42:46 -05001857 ep->tx_chan = cxgb4_port_chan(pdev);
1858 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
Vipul Pandya830662f2013-07-04 16:10:47 +05301859 step = cdev->rdev.lldi.ntxq /
1860 cdev->rdev.lldi.nchan;
Steve Wise11b8e222014-05-16 12:42:46 -05001861 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1862 ep->ctrlq_idx = cxgb4_port_idx(pdev);
Vipul Pandya830662f2013-07-04 16:10:47 +05301863 step = cdev->rdev.lldi.nrxq /
1864 cdev->rdev.lldi.nchan;
1865 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
Steve Wise11b8e222014-05-16 12:42:46 -05001866 cxgb4_port_idx(pdev) * step];
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05301867 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
Vipul Pandya830662f2013-07-04 16:10:47 +05301868
1869 if (clear_mpa_v1) {
1870 ep->retry_with_mpa_v1 = 0;
1871 ep->tried_with_mpa_v1 = 0;
1872 }
1873 }
1874 err = 0;
1875out:
1876 rcu_read_unlock();
1877
1878 neigh_release(n);
1879
1880 return err;
1881}
1882
Vipul Pandya793dad92012-12-10 09:30:56 +00001883static int c4iw_reconnect(struct c4iw_ep *ep)
1884{
1885 int err = 0;
Steve Wise24d44a32013-07-04 16:10:44 +05301886 struct sockaddr_in *laddr = (struct sockaddr_in *)
1887 &ep->com.cm_id->local_addr;
1888 struct sockaddr_in *raddr = (struct sockaddr_in *)
1889 &ep->com.cm_id->remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05301890 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
1891 &ep->com.cm_id->local_addr;
1892 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
1893 &ep->com.cm_id->remote_addr;
1894 int iptype;
1895 __u8 *ra;
Vipul Pandya793dad92012-12-10 09:30:56 +00001896
1897 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1898 init_timer(&ep->timer);
1899
1900 /*
1901 * Allocate an active TID to initiate a TCP connection.
1902 */
1903 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1904 if (ep->atid == -1) {
1905 pr_err("%s - cannot alloc atid.\n", __func__);
1906 err = -ENOMEM;
1907 goto fail2;
1908 }
1909 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1910
1911 /* find a route */
Vipul Pandya830662f2013-07-04 16:10:47 +05301912 if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
1913 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
1914 raddr->sin_addr.s_addr, laddr->sin_port,
1915 raddr->sin_port, 0);
1916 iptype = 4;
1917 ra = (__u8 *)&raddr->sin_addr;
1918 } else {
1919 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
1920 raddr6->sin6_addr.s6_addr,
1921 laddr6->sin6_port, raddr6->sin6_port, 0,
1922 raddr6->sin6_scope_id);
1923 iptype = 6;
1924 ra = (__u8 *)&raddr6->sin6_addr;
1925 }
1926 if (!ep->dst) {
Vipul Pandya793dad92012-12-10 09:30:56 +00001927 pr_err("%s - cannot find route.\n", __func__);
1928 err = -EHOSTUNREACH;
1929 goto fail3;
1930 }
Vipul Pandya830662f2013-07-04 16:10:47 +05301931 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
1932 if (err) {
Vipul Pandya793dad92012-12-10 09:30:56 +00001933 pr_err("%s - cannot alloc l2e.\n", __func__);
Vipul Pandya793dad92012-12-10 09:30:56 +00001934 goto fail4;
1935 }
1936
1937 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1938 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1939 ep->l2t->idx);
1940
1941 state_set(&ep->com, CONNECTING);
1942 ep->tos = 0;
1943
1944 /* send connect request to rnic */
1945 err = send_connect(ep);
1946 if (!err)
1947 goto out;
1948
1949 cxgb4_l2t_release(ep->l2t);
1950fail4:
1951 dst_release(ep->dst);
1952fail3:
1953 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1954 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1955fail2:
1956 /*
1957 * remember to send notification to upper layer.
1958 * We are in here so the upper layer is not aware that this is
1959 * re-connect attempt and so, upper layer is still waiting for
1960 * response of 1st connect request.
1961 */
1962 connect_reply_upcall(ep, -ECONNRESET);
1963 c4iw_put_ep(&ep->com);
1964out:
1965 return err;
1966}
1967
Steve Wisecfdda9d2010-04-21 15:30:06 -07001968static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1969{
1970 struct c4iw_ep *ep;
1971 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1972 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1973 ntohl(rpl->atid_status)));
1974 struct tid_info *t = dev->rdev.lldi.tids;
1975 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
Vipul Pandya830662f2013-07-04 16:10:47 +05301976 struct sockaddr_in *la;
1977 struct sockaddr_in *ra;
1978 struct sockaddr_in6 *la6;
1979 struct sockaddr_in6 *ra6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001980
1981 ep = lookup_atid(t, atid);
Steve Wise9eccfe12014-03-26 17:08:09 -05001982 la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
1983 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
1984 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
1985 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001986
1987 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1988 status, status2errno(status));
1989
Steve Wise7a2cea22014-03-14 21:52:07 +05301990 if (is_neg_adv(status)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001991 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1992 atid);
1993 return 0;
1994 }
1995
Vipul Pandya793dad92012-12-10 09:30:56 +00001996 set_bit(ACT_OPEN_RPL, &ep->com.history);
1997
Vipul Pandyad716a2a2012-05-18 15:29:31 +05301998 /*
1999 * Log interesting failures.
2000 */
2001 switch (status) {
2002 case CPL_ERR_CONN_RESET:
2003 case CPL_ERR_CONN_TIMEDOUT:
2004 break;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002005 case CPL_ERR_TCAM_FULL:
Vipul Pandya830662f2013-07-04 16:10:47 +05302006 mutex_lock(&dev->rdev.stats.lock);
Vipul Pandya3b174d92013-03-14 05:09:03 +00002007 dev->rdev.stats.tcam_full++;
Vipul Pandya830662f2013-07-04 16:10:47 +05302008 mutex_unlock(&dev->rdev.stats.lock);
2009 if (ep->com.local_addr.ss_family == AF_INET &&
2010 dev->rdev.lldi.enable_fw_ofld_conn) {
Vipul Pandya793dad92012-12-10 09:30:56 +00002011 send_fw_act_open_req(ep,
2012 GET_TID_TID(GET_AOPEN_ATID(
2013 ntohl(rpl->atid_status))));
2014 return 0;
2015 }
2016 break;
2017 case CPL_ERR_CONN_EXIST:
2018 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2019 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2020 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2021 atid);
2022 cxgb4_free_atid(t, atid);
2023 dst_release(ep->dst);
2024 cxgb4_l2t_release(ep->l2t);
2025 c4iw_reconnect(ep);
2026 return 0;
2027 }
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002028 break;
Vipul Pandyad716a2a2012-05-18 15:29:31 +05302029 default:
Vipul Pandya830662f2013-07-04 16:10:47 +05302030 if (ep->com.local_addr.ss_family == AF_INET) {
2031 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2032 atid, status, status2errno(status),
2033 &la->sin_addr.s_addr, ntohs(la->sin_port),
2034 &ra->sin_addr.s_addr, ntohs(ra->sin_port));
2035 } else {
2036 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2037 atid, status, status2errno(status),
2038 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2039 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2040 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +05302041 break;
2042 }
2043
Steve Wisecfdda9d2010-04-21 15:30:06 -07002044 connect_reply_upcall(ep, status2errno(status));
2045 state_set(&ep->com, DEAD);
2046
2047 if (status && act_open_has_tid(status))
2048 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
2049
Vipul Pandya793dad92012-12-10 09:30:56 +00002050 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002051 cxgb4_free_atid(t, atid);
2052 dst_release(ep->dst);
2053 cxgb4_l2t_release(ep->l2t);
2054 c4iw_put_ep(&ep->com);
2055
2056 return 0;
2057}
2058
2059static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2060{
2061 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2062 struct tid_info *t = dev->rdev.lldi.tids;
2063 unsigned int stid = GET_TID(rpl);
2064 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2065
2066 if (!ep) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00002067 PDBG("%s stid %d lookup failure!\n", __func__, stid);
2068 goto out;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002069 }
2070 PDBG("%s ep %p status %d error %d\n", __func__, ep,
2071 rpl->status, status2errno(rpl->status));
Steve Wised9594d92011-05-09 22:06:22 -07002072 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002073
Vipul Pandya1cab7752012-12-10 09:30:55 +00002074out:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002075 return 0;
2076}
2077
Steve Wisecfdda9d2010-04-21 15:30:06 -07002078static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2079{
2080 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2081 struct tid_info *t = dev->rdev.lldi.tids;
2082 unsigned int stid = GET_TID(rpl);
2083 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
2084
2085 PDBG("%s ep %p\n", __func__, ep);
Steve Wised9594d92011-05-09 22:06:22 -07002086 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002087 return 0;
2088}
2089
Vipul Pandya830662f2013-07-04 16:10:47 +05302090static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
Steve Wisecfdda9d2010-04-21 15:30:06 -07002091 struct cpl_pass_accept_req *req)
2092{
2093 struct cpl_pass_accept_rpl *rpl;
2094 unsigned int mtu_idx;
2095 u64 opt0;
2096 u32 opt2;
2097 int wscale;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302098 struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05302099 int win;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002100
2101 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2102 BUG_ON(skb_cloned(skb));
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302103
Steve Wisecfdda9d2010-04-21 15:30:06 -07002104 skb_get(skb);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302105 rpl = cplhdr(skb);
2106 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2107 skb_trim(skb, roundup(sizeof(*rpl5), 16));
2108 rpl5 = (void *)rpl;
2109 INIT_TP_WR(rpl5, ep->hwtid);
2110 } else {
2111 skb_trim(skb, sizeof(*rpl));
2112 INIT_TP_WR(rpl, ep->hwtid);
2113 }
2114 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2115 ep->hwtid));
2116
2117 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2118 enable_tcp_timestamps && req->tcpopt.tstamp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002119 wscale = compute_wscale(rcv_win);
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05302120
2121 /*
2122 * Specify the largest window that will fit in opt0. The
2123 * remainder will be specified in the rx_data_ack.
2124 */
2125 win = ep->rcv_win >> 10;
2126 if (win > RCV_BUFSIZ_MASK)
2127 win = RCV_BUFSIZ_MASK;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002128 opt0 = (nocong ? NO_CONG(1) : 0) |
2129 KEEP_ALIVE(1) |
Steve Wiseba6d3922010-06-23 15:46:49 +00002130 DELACK(1) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07002131 WND_SCALE(wscale) |
2132 MSS_IDX(mtu_idx) |
2133 L2T_IDX(ep->l2t->idx) |
2134 TX_CHAN(ep->tx_chan) |
2135 SMAC_SEL(ep->smac_idx) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002136 DSCP(ep->tos >> 2) |
Steve Wiseb48f3b92011-03-11 22:30:21 +00002137 ULP_MODE(ULP_MODE_TCPDDP) |
Hariprasad Shenaib408ff22014-06-06 21:40:44 +05302138 RCV_BUFSIZ(win);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002139 opt2 = RX_CHANNEL(0) |
2140 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
2141
2142 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2143 opt2 |= TSTAMPS_EN(1);
2144 if (enable_tcp_sack && req->tcpopt.sack)
2145 opt2 |= SACK_EN(1);
2146 if (wscale && enable_tcp_window_scaling)
2147 opt2 |= WND_SCALE_EN(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00002148 if (enable_ecn) {
2149 const struct tcphdr *tcph;
2150 u32 hlen = ntohl(req->hdr_len);
2151
2152 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
2153 G_IP_HDR_LEN(hlen);
2154 if (tcph->ece && tcph->cwr)
2155 opt2 |= CCTRL_ECN(1);
2156 }
Steve Wise92e50112014-04-24 14:31:59 -05002157 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302158 u32 isn = (prandom_u32() & ~7UL) - 1;
Steve Wise92e50112014-04-24 14:31:59 -05002159 opt2 |= T5_OPT_2_VALID;
2160 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302161 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2162 rpl5 = (void *)rpl;
2163 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2164 if (peer2peer)
2165 isn += 4;
2166 rpl5->iss = cpu_to_be32(isn);
2167 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
Steve Wise92e50112014-04-24 14:31:59 -05002168 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002169
Steve Wisecfdda9d2010-04-21 15:30:06 -07002170 rpl->opt0 = cpu_to_be64(opt0);
2171 rpl->opt2 = cpu_to_be32(opt2);
Steve Wised4f1a5c2010-07-23 19:12:32 +00002172 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
Steve Wiseb38a0ad2013-08-06 21:04:37 +05302173 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002174 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2175
2176 return;
2177}
2178
Vipul Pandya830662f2013-07-04 16:10:47 +05302179static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
Steve Wisecfdda9d2010-04-21 15:30:06 -07002180{
Vipul Pandya830662f2013-07-04 16:10:47 +05302181 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002182 BUG_ON(skb_cloned(skb));
2183 skb_trim(skb, sizeof(struct cpl_tid_release));
2184 skb_get(skb);
2185 release_tid(&dev->rdev, hwtid, skb);
2186 return;
2187}
2188
Vipul Pandya830662f2013-07-04 16:10:47 +05302189static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
2190 __u8 *local_ip, __u8 *peer_ip,
Steve Wisecfdda9d2010-04-21 15:30:06 -07002191 __be16 *local_port, __be16 *peer_port)
2192{
2193 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
2194 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
2195 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
Vipul Pandya830662f2013-07-04 16:10:47 +05302196 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002197 struct tcphdr *tcp = (struct tcphdr *)
2198 ((u8 *)(req + 1) + eth_len + ip_len);
2199
Vipul Pandya830662f2013-07-04 16:10:47 +05302200 if (ip->version == 4) {
2201 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
2202 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
2203 ntohs(tcp->dest));
2204 *iptype = 4;
2205 memcpy(peer_ip, &ip->saddr, 4);
2206 memcpy(local_ip, &ip->daddr, 4);
2207 } else {
2208 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
2209 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
2210 ntohs(tcp->dest));
2211 *iptype = 6;
2212 memcpy(peer_ip, ip6->saddr.s6_addr, 16);
2213 memcpy(local_ip, ip6->daddr.s6_addr, 16);
2214 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002215 *peer_port = tcp->source;
2216 *local_port = tcp->dest;
2217
2218 return;
2219}
2220
2221static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2222{
Vipul Pandya793dad92012-12-10 09:30:56 +00002223 struct c4iw_ep *child_ep = NULL, *parent_ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002224 struct cpl_pass_accept_req *req = cplhdr(skb);
2225 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
2226 struct tid_info *t = dev->rdev.lldi.tids;
2227 unsigned int hwtid = GET_TID(req);
2228 struct dst_entry *dst;
Vipul Pandya830662f2013-07-04 16:10:47 +05302229 __u8 local_ip[16], peer_ip[16];
Steve Wisecfdda9d2010-04-21 15:30:06 -07002230 __be16 local_port, peer_port;
David Miller3786cf12011-12-02 16:52:31 +00002231 int err;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002232 u16 peer_mss = ntohs(req->tcpopt.mss);
Vipul Pandya830662f2013-07-04 16:10:47 +05302233 int iptype;
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302234 unsigned short hdrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002235
2236 parent_ep = lookup_stid(t, stid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002237 if (!parent_ep) {
2238 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2239 goto reject;
2240 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00002241
Steve Wisecfdda9d2010-04-21 15:30:06 -07002242 if (state_read(&parent_ep->com) != LISTEN) {
2243 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
2244 __func__);
2245 goto reject;
2246 }
2247
Vipul Pandya830662f2013-07-04 16:10:47 +05302248 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
2249
Steve Wisecfdda9d2010-04-21 15:30:06 -07002250 /* Find output route */
Vipul Pandya830662f2013-07-04 16:10:47 +05302251 if (iptype == 4) {
2252 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2253 , __func__, parent_ep, hwtid,
2254 local_ip, peer_ip, ntohs(local_port),
2255 ntohs(peer_port), peer_mss);
2256 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
2257 local_port, peer_port,
2258 GET_POPEN_TOS(ntohl(req->tos_stid)));
2259 } else {
2260 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2261 , __func__, parent_ep, hwtid,
2262 local_ip, peer_ip, ntohs(local_port),
2263 ntohs(peer_port), peer_mss);
2264 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
2265 PASS_OPEN_TOS(ntohl(req->tos_stid)),
2266 ((struct sockaddr_in6 *)
2267 &parent_ep->com.local_addr)->sin6_scope_id);
2268 }
2269 if (!dst) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002270 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
2271 __func__);
2272 goto reject;
2273 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002274
2275 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2276 if (!child_ep) {
2277 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2278 __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002279 dst_release(dst);
2280 goto reject;
2281 }
David Miller3786cf12011-12-02 16:52:31 +00002282
Vipul Pandya830662f2013-07-04 16:10:47 +05302283 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
David Miller3786cf12011-12-02 16:52:31 +00002284 if (err) {
2285 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
2286 __func__);
2287 dst_release(dst);
2288 kfree(child_ep);
2289 goto reject;
2290 }
2291
Hariprasad Shenai92e7ae72014-06-06 21:40:43 +05302292 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
2293 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2294 if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2295 child_ep->mtu = peer_mss + hdrs;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002296
Steve Wisecfdda9d2010-04-21 15:30:06 -07002297 state_set(&child_ep->com, CONNECTING);
2298 child_ep->com.dev = dev;
2299 child_ep->com.cm_id = NULL;
Vipul Pandya830662f2013-07-04 16:10:47 +05302300 if (iptype == 4) {
2301 struct sockaddr_in *sin = (struct sockaddr_in *)
2302 &child_ep->com.local_addr;
2303 sin->sin_family = PF_INET;
2304 sin->sin_port = local_port;
2305 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2306 sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2307 sin->sin_family = PF_INET;
2308 sin->sin_port = peer_port;
2309 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2310 } else {
2311 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2312 &child_ep->com.local_addr;
2313 sin6->sin6_family = PF_INET6;
2314 sin6->sin6_port = local_port;
2315 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2316 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2317 sin6->sin6_family = PF_INET6;
2318 sin6->sin6_port = peer_port;
2319 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2320 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002321 c4iw_get_ep(&parent_ep->com);
2322 child_ep->parent_ep = parent_ep;
2323 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002324 child_ep->dst = dst;
2325 child_ep->hwtid = hwtid;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002326
2327 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
David Miller3786cf12011-12-02 16:52:31 +00002328 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002329
2330 init_timer(&child_ep->timer);
2331 cxgb4_insert_tid(t, child_ep, hwtid);
Vipul Pandyab3de6cf2013-01-07 13:11:59 +00002332 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
Vipul Pandya830662f2013-07-04 16:10:47 +05302333 accept_cr(child_ep, skb, req);
Vipul Pandya793dad92012-12-10 09:30:56 +00002334 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002335 goto out;
2336reject:
Vipul Pandya830662f2013-07-04 16:10:47 +05302337 reject_cr(dev, hwtid, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002338out:
2339 return 0;
2340}
2341
2342static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2343{
2344 struct c4iw_ep *ep;
2345 struct cpl_pass_establish *req = cplhdr(skb);
2346 struct tid_info *t = dev->rdev.lldi.tids;
2347 unsigned int tid = GET_TID(req);
2348
2349 ep = lookup_tid(t, tid);
2350 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2351 ep->snd_seq = be32_to_cpu(req->snd_isn);
2352 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2353
Vipul Pandya1cab7752012-12-10 09:30:55 +00002354 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2355 ntohs(req->tcp_opt));
2356
Steve Wisecfdda9d2010-04-21 15:30:06 -07002357 set_emss(ep, ntohs(req->tcp_opt));
2358
2359 dst_confirm(ep->dst);
2360 state_set(&ep->com, MPA_REQ_WAIT);
2361 start_ep_timer(ep);
2362 send_flowc(ep, skb);
Vipul Pandya793dad92012-12-10 09:30:56 +00002363 set_bit(PASS_ESTAB, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002364
2365 return 0;
2366}
2367
2368static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2369{
2370 struct cpl_peer_close *hdr = cplhdr(skb);
2371 struct c4iw_ep *ep;
2372 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002373 int disconnect = 1;
2374 int release = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002375 struct tid_info *t = dev->rdev.lldi.tids;
2376 unsigned int tid = GET_TID(hdr);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002377 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002378
2379 ep = lookup_tid(t, tid);
2380 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2381 dst_confirm(ep->dst);
2382
Vipul Pandya793dad92012-12-10 09:30:56 +00002383 set_bit(PEER_CLOSE, &ep->com.history);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002384 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002385 switch (ep->com.state) {
2386 case MPA_REQ_WAIT:
2387 __state_set(&ep->com, CLOSING);
2388 break;
2389 case MPA_REQ_SENT:
2390 __state_set(&ep->com, CLOSING);
2391 connect_reply_upcall(ep, -ECONNRESET);
2392 break;
2393 case MPA_REQ_RCVD:
2394
2395 /*
2396 * We're gonna mark this puppy DEAD, but keep
2397 * the reference on it until the ULP accepts or
2398 * rejects the CR. Also wake up anyone waiting
2399 * in rdma connection migration (see c4iw_accept_cr()).
2400 */
2401 __state_set(&ep->com, CLOSING);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002402 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
Steve Wised9594d92011-05-09 22:06:22 -07002403 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002404 break;
2405 case MPA_REP_SENT:
2406 __state_set(&ep->com, CLOSING);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002407 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
Steve Wised9594d92011-05-09 22:06:22 -07002408 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002409 break;
2410 case FPDU_MODE:
Steve Wiseca5a2202010-07-23 19:12:37 +00002411 start_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002412 __state_set(&ep->com, CLOSING);
Steve Wise30c95c22011-05-09 22:06:22 -07002413 attrs.next_state = C4IW_QP_STATE_CLOSING;
Steve Wise8da7e7a2011-06-14 20:59:27 +00002414 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wise30c95c22011-05-09 22:06:22 -07002415 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002416 if (ret != -ECONNRESET) {
2417 peer_close_upcall(ep);
2418 disconnect = 1;
2419 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002420 break;
2421 case ABORTING:
2422 disconnect = 0;
2423 break;
2424 case CLOSING:
2425 __state_set(&ep->com, MORIBUND);
2426 disconnect = 0;
2427 break;
2428 case MORIBUND:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002429 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002430 if (ep->com.cm_id && ep->com.qp) {
2431 attrs.next_state = C4IW_QP_STATE_IDLE;
2432 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2433 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2434 }
Steve Wisebe13b2d2014-03-21 20:40:33 +05302435 close_complete_upcall(ep, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002436 __state_set(&ep->com, DEAD);
2437 release = 1;
2438 disconnect = 0;
2439 break;
2440 case DEAD:
2441 disconnect = 0;
2442 break;
2443 default:
2444 BUG_ON(1);
2445 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002446 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002447 if (disconnect)
2448 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2449 if (release)
2450 release_ep_resources(ep);
2451 return 0;
2452}
2453
Steve Wisecfdda9d2010-04-21 15:30:06 -07002454static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2455{
2456 struct cpl_abort_req_rss *req = cplhdr(skb);
2457 struct c4iw_ep *ep;
2458 struct cpl_abort_rpl *rpl;
2459 struct sk_buff *rpl_skb;
2460 struct c4iw_qp_attributes attrs;
2461 int ret;
2462 int release = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002463 struct tid_info *t = dev->rdev.lldi.tids;
2464 unsigned int tid = GET_TID(req);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002465
2466 ep = lookup_tid(t, tid);
Steve Wise7a2cea22014-03-14 21:52:07 +05302467 if (is_neg_adv(req->status)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002468 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2469 ep->hwtid);
2470 return 0;
2471 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002472 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2473 ep->com.state);
Vipul Pandya793dad92012-12-10 09:30:56 +00002474 set_bit(PEER_ABORT, &ep->com.history);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002475
2476 /*
2477 * Wake up any threads in rdma_init() or rdma_fini().
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302478 * However, this is not needed if com state is just
2479 * MPA_REQ_SENT
Steve Wise2f5b48c2010-09-10 11:15:36 -05002480 */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302481 if (ep->com.state != MPA_REQ_SENT)
2482 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002483
2484 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002485 switch (ep->com.state) {
2486 case CONNECTING:
2487 break;
2488 case MPA_REQ_WAIT:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002489 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002490 break;
2491 case MPA_REQ_SENT:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002492 (void)stop_ep_timer(ep);
Vipul Pandyafe7e0a42013-01-07 13:11:57 +00002493 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302494 connect_reply_upcall(ep, -ECONNRESET);
2495 else {
2496 /*
2497 * we just don't send notification upwards because we
2498 * want to retry with mpa_v1 without upper layers even
2499 * knowing it.
2500 *
2501 * do some housekeeping so as to re-initiate the
2502 * connection
2503 */
2504 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2505 mpa_rev);
2506 ep->retry_with_mpa_v1 = 1;
2507 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002508 break;
2509 case MPA_REP_SENT:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002510 break;
2511 case MPA_REQ_RCVD:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002512 break;
2513 case MORIBUND:
2514 case CLOSING:
Steve Wiseca5a2202010-07-23 19:12:37 +00002515 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002516 /*FALLTHROUGH*/
2517 case FPDU_MODE:
2518 if (ep->com.cm_id && ep->com.qp) {
2519 attrs.next_state = C4IW_QP_STATE_ERROR;
2520 ret = c4iw_modify_qp(ep->com.qp->rhp,
2521 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2522 &attrs, 1);
2523 if (ret)
2524 printk(KERN_ERR MOD
2525 "%s - qp <- error failed!\n",
2526 __func__);
2527 }
2528 peer_abort_upcall(ep);
2529 break;
2530 case ABORTING:
2531 break;
2532 case DEAD:
2533 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002534 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002535 return 0;
2536 default:
2537 BUG_ON(1);
2538 break;
2539 }
2540 dst_confirm(ep->dst);
2541 if (ep->com.state != ABORTING) {
2542 __state_set(&ep->com, DEAD);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302543 /* we don't release if we want to retry with mpa_v1 */
2544 if (!ep->retry_with_mpa_v1)
2545 release = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002546 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002547 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002548
2549 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2550 if (!rpl_skb) {
2551 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2552 __func__);
2553 release = 1;
2554 goto out;
2555 }
2556 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2557 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2558 INIT_TP_WR(rpl, ep->hwtid);
2559 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2560 rpl->cmd = CPL_ABORT_NO_RST;
2561 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2562out:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002563 if (release)
2564 release_ep_resources(ep);
Vipul Pandyafe7e0a42013-01-07 13:11:57 +00002565 else if (ep->retry_with_mpa_v1) {
2566 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302567 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2568 dst_release(ep->dst);
2569 cxgb4_l2t_release(ep->l2t);
2570 c4iw_reconnect(ep);
2571 }
2572
Steve Wisecfdda9d2010-04-21 15:30:06 -07002573 return 0;
2574}
2575
2576static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2577{
2578 struct c4iw_ep *ep;
2579 struct c4iw_qp_attributes attrs;
2580 struct cpl_close_con_rpl *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002581 int release = 0;
2582 struct tid_info *t = dev->rdev.lldi.tids;
2583 unsigned int tid = GET_TID(rpl);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002584
2585 ep = lookup_tid(t, tid);
2586
2587 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2588 BUG_ON(!ep);
2589
2590 /* The cm_id may be null if we failed to connect */
Steve Wise2f5b48c2010-09-10 11:15:36 -05002591 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002592 switch (ep->com.state) {
2593 case CLOSING:
2594 __state_set(&ep->com, MORIBUND);
2595 break;
2596 case MORIBUND:
Steve Wiseb33bd0c2014-04-09 09:38:25 -05002597 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002598 if ((ep->com.cm_id) && (ep->com.qp)) {
2599 attrs.next_state = C4IW_QP_STATE_IDLE;
2600 c4iw_modify_qp(ep->com.qp->rhp,
2601 ep->com.qp,
2602 C4IW_QP_ATTR_NEXT_STATE,
2603 &attrs, 1);
2604 }
Steve Wisebe13b2d2014-03-21 20:40:33 +05302605 close_complete_upcall(ep, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002606 __state_set(&ep->com, DEAD);
2607 release = 1;
2608 break;
2609 case ABORTING:
2610 case DEAD:
2611 break;
2612 default:
2613 BUG_ON(1);
2614 break;
2615 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002616 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002617 if (release)
2618 release_ep_resources(ep);
2619 return 0;
2620}
2621
2622static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2623{
Steve Wise0e42c1f2010-09-10 11:15:09 -05002624 struct cpl_rdma_terminate *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002625 struct tid_info *t = dev->rdev.lldi.tids;
Steve Wise0e42c1f2010-09-10 11:15:09 -05002626 unsigned int tid = GET_TID(rpl);
2627 struct c4iw_ep *ep;
2628 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002629
2630 ep = lookup_tid(t, tid);
Steve Wise0e42c1f2010-09-10 11:15:09 -05002631 BUG_ON(!ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002632
Steve Wise30c95c22011-05-09 22:06:22 -07002633 if (ep && ep->com.qp) {
Steve Wise0e42c1f2010-09-10 11:15:09 -05002634 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2635 ep->com.qp->wq.sq.qid);
2636 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2637 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2638 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2639 } else
Steve Wise30c95c22011-05-09 22:06:22 -07002640 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002641
Steve Wisecfdda9d2010-04-21 15:30:06 -07002642 return 0;
2643}
2644
2645/*
2646 * Upcall from the adapter indicating data has been transmitted.
2647 * For us its just the single MPA request or reply. We can now free
2648 * the skb holding the mpa message.
2649 */
2650static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2651{
2652 struct c4iw_ep *ep;
2653 struct cpl_fw4_ack *hdr = cplhdr(skb);
2654 u8 credits = hdr->credits;
2655 unsigned int tid = GET_TID(hdr);
2656 struct tid_info *t = dev->rdev.lldi.tids;
2657
2658
2659 ep = lookup_tid(t, tid);
2660 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2661 if (credits == 0) {
Joe Perchesaa1ad262010-10-25 19:44:22 -07002662 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2663 __func__, ep, ep->hwtid, state_read(&ep->com));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002664 return 0;
2665 }
2666
2667 dst_confirm(ep->dst);
2668 if (ep->mpa_skb) {
2669 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2670 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2671 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2672 kfree_skb(ep->mpa_skb);
2673 ep->mpa_skb = NULL;
2674 }
2675 return 0;
2676}
2677
Steve Wisecfdda9d2010-04-21 15:30:06 -07002678int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2679{
Steve Wisea7db89e2014-03-21 20:40:35 +05302680 int err = 0;
2681 int disconnect = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002682 struct c4iw_ep *ep = to_ep(cm_id);
2683 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2684
Steve Wisea7db89e2014-03-21 20:40:35 +05302685 mutex_lock(&ep->com.mutex);
2686 if (ep->com.state == DEAD) {
2687 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002688 c4iw_put_ep(&ep->com);
2689 return -ECONNRESET;
2690 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002691 set_bit(ULP_REJECT, &ep->com.history);
Steve Wisea7db89e2014-03-21 20:40:35 +05302692 BUG_ON(ep->com.state != MPA_REQ_RCVD);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002693 if (mpa_rev == 0)
2694 abort_connection(ep, NULL, GFP_KERNEL);
2695 else {
2696 err = send_mpa_reject(ep, pdata, pdata_len);
Steve Wisea7db89e2014-03-21 20:40:35 +05302697 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002698 }
Steve Wisea7db89e2014-03-21 20:40:35 +05302699 mutex_unlock(&ep->com.mutex);
2700 if (disconnect)
2701 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002702 c4iw_put_ep(&ep->com);
2703 return 0;
2704}
2705
2706int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2707{
2708 int err;
2709 struct c4iw_qp_attributes attrs;
2710 enum c4iw_qp_attr_mask mask;
2711 struct c4iw_ep *ep = to_ep(cm_id);
2712 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2713 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2714
2715 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wisea7db89e2014-03-21 20:40:35 +05302716
2717 mutex_lock(&ep->com.mutex);
2718 if (ep->com.state == DEAD) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002719 err = -ECONNRESET;
2720 goto err;
2721 }
2722
Steve Wisea7db89e2014-03-21 20:40:35 +05302723 BUG_ON(ep->com.state != MPA_REQ_RCVD);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002724 BUG_ON(!qp);
2725
Vipul Pandya793dad92012-12-10 09:30:56 +00002726 set_bit(ULP_ACCEPT, &ep->com.history);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002727 if ((conn_param->ord > c4iw_max_read_depth) ||
2728 (conn_param->ird > c4iw_max_read_depth)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002729 abort_connection(ep, NULL, GFP_KERNEL);
2730 err = -EINVAL;
2731 goto err;
2732 }
2733
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302734 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2735 if (conn_param->ord > ep->ird) {
2736 ep->ird = conn_param->ird;
2737 ep->ord = conn_param->ord;
2738 send_mpa_reject(ep, conn_param->private_data,
2739 conn_param->private_data_len);
2740 abort_connection(ep, NULL, GFP_KERNEL);
2741 err = -ENOMEM;
2742 goto err;
2743 }
2744 if (conn_param->ird > ep->ord) {
2745 if (!ep->ord)
2746 conn_param->ird = 1;
2747 else {
2748 abort_connection(ep, NULL, GFP_KERNEL);
2749 err = -ENOMEM;
2750 goto err;
2751 }
2752 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002753
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302754 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002755 ep->ird = conn_param->ird;
2756 ep->ord = conn_param->ord;
2757
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302758 if (ep->mpa_attr.version != 2)
2759 if (peer2peer && ep->ird == 0)
2760 ep->ird = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002761
2762 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2763
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302764 cm_id->add_ref(cm_id);
2765 ep->com.cm_id = cm_id;
2766 ep->com.qp = qp;
Vipul Pandya325abea2013-01-07 13:11:53 +00002767 ref_qp(ep);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302768
Steve Wisecfdda9d2010-04-21 15:30:06 -07002769 /* bind QP to EP and move to RTS */
2770 attrs.mpa_attr = ep->mpa_attr;
2771 attrs.max_ird = ep->ird;
2772 attrs.max_ord = ep->ord;
2773 attrs.llp_stream_handle = ep;
2774 attrs.next_state = C4IW_QP_STATE_RTS;
2775
2776 /* bind QP and TID with INIT_WR */
2777 mask = C4IW_QP_ATTR_NEXT_STATE |
2778 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2779 C4IW_QP_ATTR_MPA_ATTR |
2780 C4IW_QP_ATTR_MAX_IRD |
2781 C4IW_QP_ATTR_MAX_ORD;
2782
2783 err = c4iw_modify_qp(ep->com.qp->rhp,
2784 ep->com.qp, mask, &attrs, 1);
2785 if (err)
2786 goto err1;
2787 err = send_mpa_reply(ep, conn_param->private_data,
2788 conn_param->private_data_len);
2789 if (err)
2790 goto err1;
2791
Steve Wisea7db89e2014-03-21 20:40:35 +05302792 __state_set(&ep->com, FPDU_MODE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002793 established_upcall(ep);
Steve Wisea7db89e2014-03-21 20:40:35 +05302794 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002795 c4iw_put_ep(&ep->com);
2796 return 0;
2797err1:
2798 ep->com.cm_id = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002799 cm_id->rem_ref(cm_id);
2800err:
Steve Wisea7db89e2014-03-21 20:40:35 +05302801 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002802 c4iw_put_ep(&ep->com);
2803 return err;
2804}
2805
Vipul Pandya830662f2013-07-04 16:10:47 +05302806static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2807{
2808 struct in_device *ind;
2809 int found = 0;
2810 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
2811 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
2812
2813 ind = in_dev_get(dev->rdev.lldi.ports[0]);
2814 if (!ind)
2815 return -EADDRNOTAVAIL;
2816 for_primary_ifa(ind) {
2817 laddr->sin_addr.s_addr = ifa->ifa_address;
2818 raddr->sin_addr.s_addr = ifa->ifa_address;
2819 found = 1;
2820 break;
2821 }
2822 endfor_ifa(ind);
2823 in_dev_put(ind);
2824 return found ? 0 : -EADDRNOTAVAIL;
2825}
2826
2827static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
2828 unsigned char banned_flags)
2829{
2830 struct inet6_dev *idev;
2831 int err = -EADDRNOTAVAIL;
2832
2833 rcu_read_lock();
2834 idev = __in6_dev_get(dev);
2835 if (idev != NULL) {
2836 struct inet6_ifaddr *ifp;
2837
2838 read_lock_bh(&idev->lock);
2839 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2840 if (ifp->scope == IFA_LINK &&
2841 !(ifp->flags & banned_flags)) {
2842 memcpy(addr, &ifp->addr, 16);
2843 err = 0;
2844 break;
2845 }
2846 }
2847 read_unlock_bh(&idev->lock);
2848 }
2849 rcu_read_unlock();
2850 return err;
2851}
2852
2853static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
2854{
2855 struct in6_addr uninitialized_var(addr);
2856 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
2857 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
2858
2859 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
2860 memcpy(la6->sin6_addr.s6_addr, &addr, 16);
2861 memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
2862 return 0;
2863 }
2864 return -EADDRNOTAVAIL;
2865}
2866
Steve Wisecfdda9d2010-04-21 15:30:06 -07002867int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2868{
Steve Wisecfdda9d2010-04-21 15:30:06 -07002869 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2870 struct c4iw_ep *ep;
David Miller3786cf12011-12-02 16:52:31 +00002871 int err = 0;
Steve Wise9eccfe12014-03-26 17:08:09 -05002872 struct sockaddr_in *laddr;
2873 struct sockaddr_in *raddr;
2874 struct sockaddr_in6 *laddr6;
2875 struct sockaddr_in6 *raddr6;
2876 struct iwpm_dev_data pm_reg_msg;
2877 struct iwpm_sa_data pm_msg;
Vipul Pandya830662f2013-07-04 16:10:47 +05302878 __u8 *ra;
2879 int iptype;
Steve Wise9eccfe12014-03-26 17:08:09 -05002880 int iwpm_err = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002881
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002882 if ((conn_param->ord > c4iw_max_read_depth) ||
2883 (conn_param->ird > c4iw_max_read_depth)) {
2884 err = -EINVAL;
2885 goto out;
2886 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002887 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2888 if (!ep) {
2889 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2890 err = -ENOMEM;
2891 goto out;
2892 }
2893 init_timer(&ep->timer);
2894 ep->plen = conn_param->private_data_len;
2895 if (ep->plen)
2896 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2897 conn_param->private_data, ep->plen);
2898 ep->ird = conn_param->ird;
2899 ep->ord = conn_param->ord;
2900
2901 if (peer2peer && ep->ord == 0)
2902 ep->ord = 1;
2903
2904 cm_id->add_ref(cm_id);
2905 ep->com.dev = dev;
2906 ep->com.cm_id = cm_id;
2907 ep->com.qp = get_qhp(dev, conn_param->qpn);
Vipul Pandya830662f2013-07-04 16:10:47 +05302908 if (!ep->com.qp) {
2909 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
2910 err = -EINVAL;
Steve Wise9eccfe12014-03-26 17:08:09 -05002911 goto fail1;
Vipul Pandya830662f2013-07-04 16:10:47 +05302912 }
Vipul Pandya325abea2013-01-07 13:11:53 +00002913 ref_qp(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002914 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
2915 ep->com.qp, cm_id);
2916
2917 /*
2918 * Allocate an active TID to initiate a TCP connection.
2919 */
2920 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2921 if (ep->atid == -1) {
2922 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2923 err = -ENOMEM;
Steve Wise9eccfe12014-03-26 17:08:09 -05002924 goto fail1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002925 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002926 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002927
Steve Wise9eccfe12014-03-26 17:08:09 -05002928 memcpy(&ep->com.local_addr, &cm_id->local_addr,
2929 sizeof(ep->com.local_addr));
2930 memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
2931 sizeof(ep->com.remote_addr));
2932
2933 /* No port mapper available, go with the specified peer information */
2934 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
2935 sizeof(ep->com.mapped_local_addr));
2936 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
2937 sizeof(ep->com.mapped_remote_addr));
2938
2939 c4iw_form_reg_msg(dev, &pm_reg_msg);
2940 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
2941 if (iwpm_err) {
2942 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
2943 __func__, iwpm_err);
2944 }
2945 if (iwpm_valid_pid() && !iwpm_err) {
2946 c4iw_form_pm_msg(ep, &pm_msg);
2947 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
2948 if (iwpm_err)
2949 PDBG("%s: Port Mapper query fail (err = %d).\n",
2950 __func__, iwpm_err);
2951 else
2952 c4iw_record_pm_msg(ep, &pm_msg);
2953 }
2954 if (iwpm_create_mapinfo(&ep->com.local_addr,
2955 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
2956 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
2957 err = -ENOMEM;
2958 goto fail1;
2959 }
2960 print_addr(&ep->com, __func__, "add_query/create_mapinfo");
2961 set_bit(RELEASE_MAPINFO, &ep->com.flags);
2962
2963 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
2964 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
2965 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
2966 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
2967
Vipul Pandya830662f2013-07-04 16:10:47 +05302968 if (cm_id->remote_addr.ss_family == AF_INET) {
2969 iptype = 4;
2970 ra = (__u8 *)&raddr->sin_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002971
Vipul Pandya830662f2013-07-04 16:10:47 +05302972 /*
2973 * Handle loopback requests to INADDR_ANY.
2974 */
2975 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
2976 err = pick_local_ipaddrs(dev, cm_id);
2977 if (err)
Steve Wise9eccfe12014-03-26 17:08:09 -05002978 goto fail1;
Vipul Pandya830662f2013-07-04 16:10:47 +05302979 }
2980
2981 /* find a route */
2982 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
2983 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
2984 ra, ntohs(raddr->sin_port));
2985 ep->dst = find_route(dev, laddr->sin_addr.s_addr,
2986 raddr->sin_addr.s_addr, laddr->sin_port,
2987 raddr->sin_port, 0);
2988 } else {
2989 iptype = 6;
2990 ra = (__u8 *)&raddr6->sin6_addr;
2991
2992 /*
2993 * Handle loopback requests to INADDR_ANY.
2994 */
2995 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
2996 err = pick_local_ip6addrs(dev, cm_id);
2997 if (err)
Steve Wise9eccfe12014-03-26 17:08:09 -05002998 goto fail1;
Vipul Pandya830662f2013-07-04 16:10:47 +05302999 }
3000
3001 /* find a route */
3002 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3003 __func__, laddr6->sin6_addr.s6_addr,
3004 ntohs(laddr6->sin6_port),
3005 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3006 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
3007 raddr6->sin6_addr.s6_addr,
3008 laddr6->sin6_port, raddr6->sin6_port, 0,
3009 raddr6->sin6_scope_id);
3010 }
3011 if (!ep->dst) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07003012 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
3013 err = -EHOSTUNREACH;
Steve Wise9eccfe12014-03-26 17:08:09 -05003014 goto fail2;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003015 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07003016
Vipul Pandya830662f2013-07-04 16:10:47 +05303017 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
David Miller3786cf12011-12-02 16:52:31 +00003018 if (err) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07003019 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
Steve Wise9eccfe12014-03-26 17:08:09 -05003020 goto fail3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003021 }
3022
3023 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3024 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3025 ep->l2t->idx);
3026
3027 state_set(&ep->com, CONNECTING);
3028 ep->tos = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003029
3030 /* send connect request to rnic */
3031 err = send_connect(ep);
3032 if (!err)
3033 goto out;
3034
3035 cxgb4_l2t_release(ep->l2t);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003036fail3:
Steve Wise9eccfe12014-03-26 17:08:09 -05003037 dst_release(ep->dst);
3038fail2:
Vipul Pandya793dad92012-12-10 09:30:56 +00003039 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003040 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
Steve Wise9eccfe12014-03-26 17:08:09 -05003041fail1:
Steve Wisecfdda9d2010-04-21 15:30:06 -07003042 cm_id->rem_ref(cm_id);
3043 c4iw_put_ep(&ep->com);
3044out:
3045 return err;
3046}
3047
Vipul Pandya830662f2013-07-04 16:10:47 +05303048static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3049{
3050 int err;
Steve Wise9eccfe12014-03-26 17:08:09 -05003051 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3052 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05303053
3054 c4iw_init_wr_wait(&ep->com.wr_wait);
3055 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3056 ep->stid, &sin6->sin6_addr,
3057 sin6->sin6_port,
3058 ep->com.dev->rdev.lldi.rxq_ids[0]);
3059 if (!err)
3060 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3061 &ep->com.wr_wait,
3062 0, 0, __func__);
3063 if (err)
3064 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3065 err, ep->stid,
3066 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3067 return err;
3068}
3069
3070static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3071{
3072 int err;
Steve Wise9eccfe12014-03-26 17:08:09 -05003073 struct sockaddr_in *sin = (struct sockaddr_in *)
3074 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +05303075
3076 if (dev->rdev.lldi.enable_fw_ofld_conn) {
3077 do {
3078 err = cxgb4_create_server_filter(
3079 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3080 sin->sin_addr.s_addr, sin->sin_port, 0,
3081 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3082 if (err == -EBUSY) {
3083 set_current_state(TASK_UNINTERRUPTIBLE);
3084 schedule_timeout(usecs_to_jiffies(100));
3085 }
3086 } while (err == -EBUSY);
3087 } else {
3088 c4iw_init_wr_wait(&ep->com.wr_wait);
3089 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3090 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3091 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3092 if (!err)
3093 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3094 &ep->com.wr_wait,
3095 0, 0, __func__);
3096 }
3097 if (err)
3098 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3099 , err, ep->stid,
3100 &sin->sin_addr, ntohs(sin->sin_port));
3101 return err;
3102}
3103
Steve Wisecfdda9d2010-04-21 15:30:06 -07003104int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3105{
3106 int err = 0;
3107 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3108 struct c4iw_listen_ep *ep;
Steve Wise9eccfe12014-03-26 17:08:09 -05003109 struct iwpm_dev_data pm_reg_msg;
3110 struct iwpm_sa_data pm_msg;
3111 int iwpm_err = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003112
Steve Wisecfdda9d2010-04-21 15:30:06 -07003113 might_sleep();
3114
3115 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3116 if (!ep) {
3117 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
3118 err = -ENOMEM;
3119 goto fail1;
3120 }
3121 PDBG("%s ep %p\n", __func__, ep);
3122 cm_id->add_ref(cm_id);
3123 ep->com.cm_id = cm_id;
3124 ep->com.dev = dev;
3125 ep->backlog = backlog;
Steve Wise24d44a32013-07-04 16:10:44 +05303126 memcpy(&ep->com.local_addr, &cm_id->local_addr,
3127 sizeof(ep->com.local_addr));
Steve Wisecfdda9d2010-04-21 15:30:06 -07003128
3129 /*
3130 * Allocate a server TID.
3131 */
Kumar Sanghvi8c044692013-12-18 16:38:25 +05303132 if (dev->rdev.lldi.enable_fw_ofld_conn &&
3133 ep->com.local_addr.ss_family == AF_INET)
Vipul Pandya830662f2013-07-04 16:10:47 +05303134 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3135 cm_id->local_addr.ss_family, ep);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003136 else
Vipul Pandya830662f2013-07-04 16:10:47 +05303137 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3138 cm_id->local_addr.ss_family, ep);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003139
Steve Wisecfdda9d2010-04-21 15:30:06 -07003140 if (ep->stid == -1) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003141 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003142 err = -ENOMEM;
3143 goto fail2;
3144 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003145 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
Steve Wise9eccfe12014-03-26 17:08:09 -05003146
3147 /* No port mapper available, go with the specified info */
3148 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
3149 sizeof(ep->com.mapped_local_addr));
3150
3151 c4iw_form_reg_msg(dev, &pm_reg_msg);
3152 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
3153 if (iwpm_err) {
3154 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3155 __func__, iwpm_err);
3156 }
3157 if (iwpm_valid_pid() && !iwpm_err) {
3158 memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
3159 sizeof(ep->com.local_addr));
3160 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
3161 if (iwpm_err)
3162 PDBG("%s: Port Mapper query fail (err = %d).\n",
3163 __func__, iwpm_err);
3164 else
3165 memcpy(&ep->com.mapped_local_addr,
3166 &pm_msg.mapped_loc_addr,
3167 sizeof(ep->com.mapped_local_addr));
3168 }
3169 if (iwpm_create_mapinfo(&ep->com.local_addr,
3170 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
3171 err = -ENOMEM;
3172 goto fail3;
3173 }
3174 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
3175
3176 set_bit(RELEASE_MAPINFO, &ep->com.flags);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003177 state_set(&ep->com, LISTEN);
Vipul Pandya830662f2013-07-04 16:10:47 +05303178 if (ep->com.local_addr.ss_family == AF_INET)
3179 err = create_server4(dev, ep);
3180 else
3181 err = create_server6(dev, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003182 if (!err) {
3183 cm_id->provider_data = ep;
3184 goto out;
3185 }
Steve Wise9eccfe12014-03-26 17:08:09 -05003186
3187fail3:
Vipul Pandya830662f2013-07-04 16:10:47 +05303188 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3189 ep->com.local_addr.ss_family);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003190fail2:
3191 cm_id->rem_ref(cm_id);
3192 c4iw_put_ep(&ep->com);
3193fail1:
3194out:
3195 return err;
3196}
3197
3198int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3199{
3200 int err;
3201 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3202
3203 PDBG("%s ep %p\n", __func__, ep);
3204
3205 might_sleep();
3206 state_set(&ep->com, DEAD);
Vipul Pandya830662f2013-07-04 16:10:47 +05303207 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3208 ep->com.local_addr.ss_family == AF_INET) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00003209 err = cxgb4_remove_server_filter(
3210 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3211 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3212 } else {
3213 c4iw_init_wr_wait(&ep->com.wr_wait);
Vipul Pandya830662f2013-07-04 16:10:47 +05303214 err = cxgb4_remove_server(
3215 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3216 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003217 if (err)
3218 goto done;
3219 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3220 0, 0, __func__);
3221 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003222 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
Vipul Pandya830662f2013-07-04 16:10:47 +05303223 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3224 ep->com.local_addr.ss_family);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003225done:
Steve Wisecfdda9d2010-04-21 15:30:06 -07003226 cm_id->rem_ref(cm_id);
3227 c4iw_put_ep(&ep->com);
3228 return err;
3229}
3230
3231int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3232{
3233 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003234 int close = 0;
3235 int fatal = 0;
3236 struct c4iw_rdev *rdev;
Steve Wisecfdda9d2010-04-21 15:30:06 -07003237
Steve Wise2f5b48c2010-09-10 11:15:36 -05003238 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003239
3240 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
3241 states[ep->com.state], abrupt);
3242
3243 rdev = &ep->com.dev->rdev;
3244 if (c4iw_fatal_error(rdev)) {
3245 fatal = 1;
Steve Wisebe13b2d2014-03-21 20:40:33 +05303246 close_complete_upcall(ep, -EIO);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003247 ep->com.state = DEAD;
3248 }
3249 switch (ep->com.state) {
3250 case MPA_REQ_WAIT:
3251 case MPA_REQ_SENT:
3252 case MPA_REQ_RCVD:
3253 case MPA_REP_SENT:
3254 case FPDU_MODE:
3255 close = 1;
3256 if (abrupt)
3257 ep->com.state = ABORTING;
3258 else {
3259 ep->com.state = CLOSING;
Steve Wiseca5a2202010-07-23 19:12:37 +00003260 start_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003261 }
3262 set_bit(CLOSE_SENT, &ep->com.flags);
3263 break;
3264 case CLOSING:
3265 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3266 close = 1;
3267 if (abrupt) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003268 (void)stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003269 ep->com.state = ABORTING;
3270 } else
3271 ep->com.state = MORIBUND;
3272 }
3273 break;
3274 case MORIBUND:
3275 case ABORTING:
3276 case DEAD:
3277 PDBG("%s ignoring disconnect ep %p state %u\n",
3278 __func__, ep, ep->com.state);
3279 break;
3280 default:
3281 BUG();
3282 break;
3283 }
3284
Steve Wisecfdda9d2010-04-21 15:30:06 -07003285 if (close) {
Steve Wise8da7e7a2011-06-14 20:59:27 +00003286 if (abrupt) {
Vipul Pandya793dad92012-12-10 09:30:56 +00003287 set_bit(EP_DISC_ABORT, &ep->com.history);
Steve Wisebe13b2d2014-03-21 20:40:33 +05303288 close_complete_upcall(ep, -ECONNRESET);
Steve Wise8da7e7a2011-06-14 20:59:27 +00003289 ret = send_abort(ep, NULL, gfp);
Vipul Pandya793dad92012-12-10 09:30:56 +00003290 } else {
3291 set_bit(EP_DISC_CLOSE, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003292 ret = send_halfclose(ep, gfp);
Vipul Pandya793dad92012-12-10 09:30:56 +00003293 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07003294 if (ret)
3295 fatal = 1;
3296 }
Steve Wise8da7e7a2011-06-14 20:59:27 +00003297 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003298 if (fatal)
3299 release_ep_resources(ep);
3300 return ret;
3301}
3302
Vipul Pandya1cab7752012-12-10 09:30:55 +00003303static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3304 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3305{
3306 struct c4iw_ep *ep;
Vipul Pandya793dad92012-12-10 09:30:56 +00003307 int atid = be32_to_cpu(req->tid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003308
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003309 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3310 (__force u32) req->tid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003311 if (!ep)
3312 return;
3313
3314 switch (req->retval) {
3315 case FW_ENOMEM:
Vipul Pandya793dad92012-12-10 09:30:56 +00003316 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3317 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3318 send_fw_act_open_req(ep, atid);
3319 return;
3320 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003321 case FW_EADDRINUSE:
Vipul Pandya793dad92012-12-10 09:30:56 +00003322 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3323 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3324 send_fw_act_open_req(ep, atid);
3325 return;
3326 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003327 break;
3328 default:
3329 pr_info("%s unexpected ofld conn wr retval %d\n",
3330 __func__, req->retval);
3331 break;
3332 }
Vipul Pandya793dad92012-12-10 09:30:56 +00003333 pr_err("active ofld_connect_wr failure %d atid %d\n",
3334 req->retval, atid);
3335 mutex_lock(&dev->rdev.stats.lock);
3336 dev->rdev.stats.act_ofld_conn_fails++;
3337 mutex_unlock(&dev->rdev.stats.lock);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003338 connect_reply_upcall(ep, status2errno(req->retval));
Vipul Pandya793dad92012-12-10 09:30:56 +00003339 state_set(&ep->com, DEAD);
3340 remove_handle(dev, &dev->atid_idr, atid);
3341 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3342 dst_release(ep->dst);
3343 cxgb4_l2t_release(ep->l2t);
3344 c4iw_put_ep(&ep->com);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003345}
3346
3347static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3348 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3349{
3350 struct sk_buff *rpl_skb;
3351 struct cpl_pass_accept_req *cpl;
3352 int ret;
3353
Paul Bolle710a3112013-02-05 20:51:30 +00003354 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003355 BUG_ON(!rpl_skb);
3356 if (req->retval) {
3357 PDBG("%s passive open failure %d\n", __func__, req->retval);
Vipul Pandya793dad92012-12-10 09:30:56 +00003358 mutex_lock(&dev->rdev.stats.lock);
3359 dev->rdev.stats.pas_ofld_conn_fails++;
3360 mutex_unlock(&dev->rdev.stats.lock);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003361 kfree_skb(rpl_skb);
3362 } else {
3363 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3364 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003365 (__force u32) htonl(
3366 (__force u32) req->tid)));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003367 ret = pass_accept_req(dev, rpl_skb);
3368 if (!ret)
3369 kfree_skb(rpl_skb);
3370 }
3371 return;
3372}
3373
3374static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
Steve Wise2f5b48c2010-09-10 11:15:36 -05003375{
3376 struct cpl_fw6_msg *rpl = cplhdr(skb);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003377 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3378
3379 switch (rpl->type) {
3380 case FW6_TYPE_CQE:
3381 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3382 break;
3383 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3384 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3385 switch (req->t_state) {
3386 case TCP_SYN_SENT:
3387 active_ofld_conn_reply(dev, skb, req);
3388 break;
3389 case TCP_SYN_RECV:
3390 passive_ofld_conn_reply(dev, skb, req);
3391 break;
3392 default:
3393 pr_err("%s unexpected ofld conn wr state %d\n",
3394 __func__, req->t_state);
3395 break;
3396 }
3397 break;
3398 }
3399 return 0;
3400}
3401
3402static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3403{
3404 u32 l2info;
Vipul Pandyaf079af72013-03-14 05:08:58 +00003405 u16 vlantag, len, hdr_len, eth_hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003406 u8 intf;
3407 struct cpl_rx_pkt *cpl = cplhdr(skb);
3408 struct cpl_pass_accept_req *req;
3409 struct tcp_options_received tmp_opt;
Vipul Pandyaf079af72013-03-14 05:08:58 +00003410 struct c4iw_dev *dev;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003411
Vipul Pandyaf079af72013-03-14 05:08:58 +00003412 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003413 /* Store values from cpl_rx_pkt in temporary location. */
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003414 vlantag = (__force u16) cpl->vlan;
3415 len = (__force u16) cpl->len;
3416 l2info = (__force u32) cpl->l2info;
3417 hdr_len = (__force u16) cpl->hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003418 intf = cpl->iff;
3419
3420 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3421
3422 /*
3423 * We need to parse the TCP options from SYN packet.
3424 * to generate cpl_pass_accept_req.
3425 */
3426 memset(&tmp_opt, 0, sizeof(tmp_opt));
3427 tcp_clear_options(&tmp_opt);
Christoph Paasch1a2c6182013-03-17 08:23:34 +00003428 tcp_parse_options(skb, &tmp_opt, 0, NULL);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003429
3430 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3431 memset(req, 0, sizeof(*req));
3432 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003433 V_SYN_MAC_IDX(G_RX_MACIDX(
3434 (__force int) htonl(l2info))) |
Vipul Pandya1cab7752012-12-10 09:30:55 +00003435 F_SYN_XACT_MATCH);
Vipul Pandyaf079af72013-03-14 05:08:58 +00003436 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3437 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
3438 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003439 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
3440 (__force int) htonl(l2info))) |
3441 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
3442 (__force int) htons(hdr_len))) |
3443 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
3444 (__force int) htons(hdr_len))) |
Vipul Pandyaf079af72013-03-14 05:08:58 +00003445 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003446 req->vlan = (__force __be16) vlantag;
3447 req->len = (__force __be16) len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003448 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
3449 PASS_OPEN_TOS(tos));
3450 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3451 if (tmp_opt.wscale_ok)
3452 req->tcpopt.wsf = tmp_opt.snd_wscale;
3453 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3454 if (tmp_opt.sack_ok)
3455 req->tcpopt.sack = 1;
3456 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3457 return;
3458}
3459
3460static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3461 __be32 laddr, __be16 lport,
3462 __be32 raddr, __be16 rport,
3463 u32 rcv_isn, u32 filter, u16 window,
3464 u32 rss_qid, u8 port_id)
3465{
3466 struct sk_buff *req_skb;
3467 struct fw_ofld_connection_wr *req;
3468 struct cpl_pass_accept_req *cpl = cplhdr(skb);
Steve Wise1ce1d472014-03-21 20:40:31 +05303469 int ret;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003470
3471 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3472 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3473 memset(req, 0, sizeof(*req));
3474 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
3475 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
3476 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003477 req->le.filter = (__force __be32) filter;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003478 req->le.lport = lport;
3479 req->le.pport = rport;
3480 req->le.u.ipv4.lip = laddr;
3481 req->le.u.ipv4.pip = raddr;
3482 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3483 req->tcb.rcv_adv = htons(window);
3484 req->tcb.t_state_to_astid =
3485 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
3486 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
3487 V_FW_OFLD_CONNECTION_WR_ASTID(
3488 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
3489
3490 /*
3491 * We store the qid in opt2 which will be used by the firmware
3492 * to send us the wr response.
3493 */
3494 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
3495
3496 /*
3497 * We initialize the MSS index in TCB to 0xF.
3498 * So that when driver sends cpl_pass_accept_rpl
3499 * TCB picks up the correct value. If this was 0
3500 * TP will ignore any value > 0 for MSS index.
3501 */
3502 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
Paul Bolle710a3112013-02-05 20:51:30 +00003503 req->cookie = (unsigned long)skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003504
3505 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
Steve Wise1ce1d472014-03-21 20:40:31 +05303506 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3507 if (ret < 0) {
3508 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3509 ret);
3510 kfree_skb(skb);
3511 kfree_skb(req_skb);
3512 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003513}
3514
3515/*
3516 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3517 * messages when a filter is being used instead of server to
3518 * redirect a syn packet. When packets hit filter they are redirected
3519 * to the offload queue and driver tries to establish the connection
3520 * using firmware work request.
3521 */
3522static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3523{
3524 int stid;
3525 unsigned int filter;
3526 struct ethhdr *eh = NULL;
3527 struct vlan_ethhdr *vlan_eh = NULL;
3528 struct iphdr *iph;
3529 struct tcphdr *tcph;
3530 struct rss_header *rss = (void *)skb->data;
3531 struct cpl_rx_pkt *cpl = (void *)skb->data;
3532 struct cpl_pass_accept_req *req = (void *)(rss + 1);
3533 struct l2t_entry *e;
3534 struct dst_entry *dst;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003535 struct c4iw_ep *lep;
3536 u16 window;
3537 struct port_info *pi;
3538 struct net_device *pdev;
Vipul Pandyaf079af72013-03-14 05:08:58 +00003539 u16 rss_qid, eth_hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00003540 int step;
3541 u32 tx_chan;
3542 struct neighbour *neigh;
3543
3544 /* Drop all non-SYN packets */
3545 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
3546 goto reject;
3547
3548 /*
3549 * Drop all packets which did not hit the filter.
3550 * Unlikely to happen.
3551 */
3552 if (!(rss->filter_hit && rss->filter_tid))
3553 goto reject;
3554
3555 /*
3556 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3557 */
Kumar Sanghvia4ea0252013-12-18 16:38:24 +05303558 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003559
3560 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3561 if (!lep) {
3562 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3563 goto reject;
3564 }
3565
Vipul Pandyaf079af72013-03-14 05:08:58 +00003566 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3567 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3568 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3569 if (eth_hdr_len == ETH_HLEN) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00003570 eh = (struct ethhdr *)(req + 1);
3571 iph = (struct iphdr *)(eh + 1);
3572 } else {
3573 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3574 iph = (struct iphdr *)(vlan_eh + 1);
3575 skb->vlan_tci = ntohs(cpl->vlan);
3576 }
3577
3578 if (iph->version != 0x4)
3579 goto reject;
3580
3581 tcph = (struct tcphdr *)(iph + 1);
3582 skb_set_network_header(skb, (void *)iph - (void *)rss);
3583 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3584 skb_get(skb);
3585
3586 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3587 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3588 ntohs(tcph->source), iph->tos);
3589
Vipul Pandya830662f2013-07-04 16:10:47 +05303590 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3591 iph->tos);
3592 if (!dst) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00003593 pr_err("%s - failed to find dst entry!\n",
3594 __func__);
3595 goto reject;
3596 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00003597 neigh = dst_neigh_lookup_skb(dst, skb);
3598
Zhouyi Zhouaaa0c232013-03-14 17:21:50 +00003599 if (!neigh) {
3600 pr_err("%s - failed to allocate neigh!\n",
3601 __func__);
3602 goto free_dst;
3603 }
3604
Vipul Pandya1cab7752012-12-10 09:30:55 +00003605 if (neigh->dev->flags & IFF_LOOPBACK) {
3606 pdev = ip_dev_find(&init_net, iph->daddr);
3607 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3608 pdev, 0);
3609 pi = (struct port_info *)netdev_priv(pdev);
3610 tx_chan = cxgb4_port_chan(pdev);
3611 dev_put(pdev);
3612 } else {
Vipul Pandya830662f2013-07-04 16:10:47 +05303613 pdev = get_real_dev(neigh->dev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003614 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
Vipul Pandya830662f2013-07-04 16:10:47 +05303615 pdev, 0);
3616 pi = (struct port_info *)netdev_priv(pdev);
3617 tx_chan = cxgb4_port_chan(pdev);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003618 }
Steve Wiseebf00062014-03-19 17:44:40 +05303619 neigh_release(neigh);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003620 if (!e) {
3621 pr_err("%s - failed to allocate l2t entry!\n",
3622 __func__);
3623 goto free_dst;
3624 }
3625
3626 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3627 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003628 window = (__force u16) htons((__force u16)tcph->window);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003629
3630 /* Calcuate filter portion for LE region. */
Kumar Sanghvi41b4f862013-12-18 16:38:26 +05303631 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3632 dev->rdev.lldi.ports[0],
3633 e));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003634
3635 /*
3636 * Synthesize the cpl_pass_accept_req. We have everything except the
3637 * TID. Once firmware sends a reply with TID we update the TID field
3638 * in cpl and pass it through the regular cpl_pass_accept_req path.
3639 */
3640 build_cpl_pass_accept_req(skb, stid, iph->tos);
3641 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3642 tcph->source, ntohl(tcph->seq), filter, window,
3643 rss_qid, pi->port_id);
3644 cxgb4_l2t_release(e);
3645free_dst:
3646 dst_release(dst);
3647reject:
Steve Wise2f5b48c2010-09-10 11:15:36 -05003648 return 0;
3649}
3650
Steve Wisecfdda9d2010-04-21 15:30:06 -07003651/*
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003652 * These are the real handlers that are called from a
3653 * work queue.
3654 */
3655static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3656 [CPL_ACT_ESTABLISH] = act_establish,
3657 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3658 [CPL_RX_DATA] = rx_data,
3659 [CPL_ABORT_RPL_RSS] = abort_rpl,
3660 [CPL_ABORT_RPL] = abort_rpl,
3661 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3662 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3663 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3664 [CPL_PASS_ESTABLISH] = pass_establish,
3665 [CPL_PEER_CLOSE] = peer_close,
3666 [CPL_ABORT_REQ_RSS] = peer_abort,
3667 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3668 [CPL_RDMA_TERMINATE] = terminate,
Steve Wise2f5b48c2010-09-10 11:15:36 -05003669 [CPL_FW4_ACK] = fw4_ack,
Vipul Pandya1cab7752012-12-10 09:30:55 +00003670 [CPL_FW6_MSG] = deferred_fw6_msg,
3671 [CPL_RX_PKT] = rx_pkt
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003672};
3673
3674static void process_timeout(struct c4iw_ep *ep)
3675{
3676 struct c4iw_qp_attributes attrs;
3677 int abort = 1;
3678
Steve Wise2f5b48c2010-09-10 11:15:36 -05003679 mutex_lock(&ep->com.mutex);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003680 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3681 ep->com.state);
Vipul Pandya793dad92012-12-10 09:30:56 +00003682 set_bit(TIMEDOUT, &ep->com.history);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003683 switch (ep->com.state) {
3684 case MPA_REQ_SENT:
3685 __state_set(&ep->com, ABORTING);
3686 connect_reply_upcall(ep, -ETIMEDOUT);
3687 break;
3688 case MPA_REQ_WAIT:
3689 __state_set(&ep->com, ABORTING);
3690 break;
3691 case CLOSING:
3692 case MORIBUND:
3693 if (ep->com.cm_id && ep->com.qp) {
3694 attrs.next_state = C4IW_QP_STATE_ERROR;
3695 c4iw_modify_qp(ep->com.qp->rhp,
3696 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3697 &attrs, 1);
3698 }
3699 __state_set(&ep->com, ABORTING);
Steve Wisebe13b2d2014-03-21 20:40:33 +05303700 close_complete_upcall(ep, -ETIMEDOUT);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003701 break;
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003702 case ABORTING:
3703 case DEAD:
3704
3705 /*
3706 * These states are expected if the ep timed out at the same
3707 * time as another thread was calling stop_ep_timer().
3708 * So we silently do nothing for these states.
3709 */
3710 abort = 0;
3711 break;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003712 default:
Julia Lawall76f267b2012-11-03 10:58:27 +00003713 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003714 __func__, ep, ep->hwtid, ep->com.state);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003715 abort = 0;
3716 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003717 if (abort)
3718 abort_connection(ep, NULL, GFP_KERNEL);
Steve Wisecc18b932014-04-24 14:31:53 -05003719 mutex_unlock(&ep->com.mutex);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003720 c4iw_put_ep(&ep->com);
3721}
3722
3723static void process_timedout_eps(void)
3724{
3725 struct c4iw_ep *ep;
3726
3727 spin_lock_irq(&timeout_lock);
3728 while (!list_empty(&timeout_list)) {
3729 struct list_head *tmp;
3730
3731 tmp = timeout_list.next;
3732 list_del(tmp);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003733 tmp->next = NULL;
3734 tmp->prev = NULL;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003735 spin_unlock_irq(&timeout_lock);
3736 ep = list_entry(tmp, struct c4iw_ep, entry);
3737 process_timeout(ep);
3738 spin_lock_irq(&timeout_lock);
3739 }
3740 spin_unlock_irq(&timeout_lock);
3741}
3742
3743static void process_work(struct work_struct *work)
3744{
3745 struct sk_buff *skb = NULL;
3746 struct c4iw_dev *dev;
Dan Carpenterc1d73562010-05-31 14:00:53 +00003747 struct cpl_act_establish *rpl;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003748 unsigned int opcode;
3749 int ret;
3750
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003751 process_timedout_eps();
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003752 while ((skb = skb_dequeue(&rxq))) {
3753 rpl = cplhdr(skb);
3754 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3755 opcode = rpl->ot.opcode;
3756
3757 BUG_ON(!work_handlers[opcode]);
3758 ret = work_handlers[opcode](dev, skb);
3759 if (!ret)
3760 kfree_skb(skb);
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003761 process_timedout_eps();
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003762 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003763}
3764
3765static DECLARE_WORK(skb_work, process_work);
3766
3767static void ep_timeout(unsigned long arg)
3768{
3769 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003770 int kickit = 0;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003771
3772 spin_lock(&timeout_lock);
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003773 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
Steve Wiseb33bd0c2014-04-09 09:38:25 -05003774 /*
3775 * Only insert if it is not already on the list.
3776 */
3777 if (!ep->entry.next) {
3778 list_add_tail(&ep->entry, &timeout_list);
3779 kickit = 1;
3780 }
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003781 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003782 spin_unlock(&timeout_lock);
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003783 if (kickit)
3784 queue_work(workq, &skb_work);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003785}
3786
3787/*
Steve Wisecfdda9d2010-04-21 15:30:06 -07003788 * All the CM events are handled on a work queue to have a safe context.
3789 */
3790static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3791{
3792
3793 /*
3794 * Save dev in the skb->cb area.
3795 */
3796 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3797
3798 /*
3799 * Queue the skb and schedule the worker thread.
3800 */
3801 skb_queue_tail(&rxq, skb);
3802 queue_work(workq, &skb_work);
3803 return 0;
3804}
3805
3806static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3807{
3808 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3809
3810 if (rpl->status != CPL_ERR_NONE) {
3811 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3812 "for tid %u\n", rpl->status, GET_TID(rpl));
3813 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05003814 kfree_skb(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003815 return 0;
3816}
3817
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003818static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3819{
3820 struct cpl_fw6_msg *rpl = cplhdr(skb);
3821 struct c4iw_wr_wait *wr_waitp;
3822 int ret;
3823
3824 PDBG("%s type %u\n", __func__, rpl->type);
3825
3826 switch (rpl->type) {
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003827 case FW6_TYPE_WR_RPL:
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003828 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
Roland Dreierc8e081a2010-09-27 17:51:04 -07003829 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003830 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
Steve Wised9594d92011-05-09 22:06:22 -07003831 if (wr_waitp)
3832 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
Steve Wise2f5b48c2010-09-10 11:15:36 -05003833 kfree_skb(skb);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003834 break;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003835 case FW6_TYPE_CQE:
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003836 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
Vipul Pandya1cab7752012-12-10 09:30:55 +00003837 sched(dev, skb);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003838 break;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003839 default:
3840 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3841 rpl->type);
Steve Wise2f5b48c2010-09-10 11:15:36 -05003842 kfree_skb(skb);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003843 break;
3844 }
3845 return 0;
3846}
3847
Steve Wise8da7e7a2011-06-14 20:59:27 +00003848static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3849{
3850 struct cpl_abort_req_rss *req = cplhdr(skb);
3851 struct c4iw_ep *ep;
3852 struct tid_info *t = dev->rdev.lldi.tids;
3853 unsigned int tid = GET_TID(req);
3854
3855 ep = lookup_tid(t, tid);
Steve Wise14b92222012-04-30 15:31:29 -05003856 if (!ep) {
3857 printk(KERN_WARNING MOD
3858 "Abort on non-existent endpoint, tid %d\n", tid);
3859 kfree_skb(skb);
3860 return 0;
3861 }
Steve Wise7a2cea22014-03-14 21:52:07 +05303862 if (is_neg_adv(req->status)) {
Steve Wise8da7e7a2011-06-14 20:59:27 +00003863 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
3864 ep->hwtid);
3865 kfree_skb(skb);
3866 return 0;
3867 }
3868 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3869 ep->com.state);
3870
3871 /*
3872 * Wake up any threads in rdma_init() or rdma_fini().
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00003873 * However, if we are on MPAv2 and want to retry with MPAv1
3874 * then, don't wake up yet.
Steve Wise8da7e7a2011-06-14 20:59:27 +00003875 */
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00003876 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3877 if (ep->com.state != MPA_REQ_SENT)
3878 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3879 } else
3880 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wise8da7e7a2011-06-14 20:59:27 +00003881 sched(dev, skb);
3882 return 0;
3883}
3884
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003885/*
3886 * Most upcalls from the T4 Core go to sched() to
3887 * schedule the processing on a work queue.
3888 */
3889c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
3890 [CPL_ACT_ESTABLISH] = sched,
3891 [CPL_ACT_OPEN_RPL] = sched,
3892 [CPL_RX_DATA] = sched,
3893 [CPL_ABORT_RPL_RSS] = sched,
3894 [CPL_ABORT_RPL] = sched,
3895 [CPL_PASS_OPEN_RPL] = sched,
3896 [CPL_CLOSE_LISTSRV_RPL] = sched,
3897 [CPL_PASS_ACCEPT_REQ] = sched,
3898 [CPL_PASS_ESTABLISH] = sched,
3899 [CPL_PEER_CLOSE] = sched,
3900 [CPL_CLOSE_CON_RPL] = sched,
Steve Wise8da7e7a2011-06-14 20:59:27 +00003901 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003902 [CPL_RDMA_TERMINATE] = sched,
3903 [CPL_FW4_ACK] = sched,
3904 [CPL_SET_TCB_RPL] = set_tcb_rpl,
Vipul Pandya1cab7752012-12-10 09:30:55 +00003905 [CPL_FW6_MSG] = fw6_msg,
3906 [CPL_RX_PKT] = sched
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003907};
3908
Steve Wisecfdda9d2010-04-21 15:30:06 -07003909int __init c4iw_cm_init(void)
3910{
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003911 spin_lock_init(&timeout_lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003912 skb_queue_head_init(&rxq);
3913
3914 workq = create_singlethread_workqueue("iw_cxgb4");
3915 if (!workq)
3916 return -ENOMEM;
3917
Steve Wisecfdda9d2010-04-21 15:30:06 -07003918 return 0;
3919}
3920
3921void __exit c4iw_cm_term(void)
3922{
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003923 WARN_ON(!list_empty(&timeout_list));
Steve Wisecfdda9d2010-04-21 15:30:06 -07003924 flush_workqueue(workq);
3925 destroy_workqueue(workq);
3926}