blob: 7b8d5aaa220441c5e4ec7ecd89dc743e20836db2 [file] [log] [blame]
Steve Wiseb038ced2007-02-12 16:16:18 -08001/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
Steve Wiseb038ced2007-02-12 16:16:18 -08003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38
39#include <net/neighbour.h>
40#include <net/netevent.h>
41#include <net/route.h>
42
43#include "tcb.h"
44#include "cxgb3_offload.h"
45#include "iwch.h"
46#include "iwch_provider.h"
47#include "iwch_cm.h"
48
49static char *states[] = {
50 "idle",
51 "listen",
52 "connecting",
53 "mpa_wait_req",
54 "mpa_req_sent",
55 "mpa_req_rcvd",
56 "mpa_rep_sent",
57 "fpdu_mode",
58 "aborting",
59 "closing",
60 "moribund",
61 "dead",
62 NULL,
63};
64
65static int ep_timeout_secs = 10;
66module_param(ep_timeout_secs, int, 0444);
67MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
68 "in seconds (default=10)");
69
70static int mpa_rev = 1;
71module_param(mpa_rev, int, 0444);
72MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
73 "1 is spec compliant. (default=1)");
74
75static int markers_enabled = 0;
76module_param(markers_enabled, int, 0444);
77MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
78
79static int crc_enabled = 1;
80module_param(crc_enabled, int, 0444);
81MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
82
83static int rcv_win = 256 * 1024;
84module_param(rcv_win, int, 0444);
85MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
86
87static int snd_win = 32 * 1024;
88module_param(snd_win, int, 0444);
89MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
90
91static unsigned int nocong = 0;
92module_param(nocong, uint, 0444);
93MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
94
95static unsigned int cong_flavor = 1;
96module_param(cong_flavor, uint, 0444);
97MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
98
99static void process_work(struct work_struct *work);
100static struct workqueue_struct *workq;
101static DECLARE_WORK(skb_work, process_work);
102
103static struct sk_buff_head rxq;
104static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
105
106static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
107static void ep_timeout(unsigned long arg);
108static void connect_reply_upcall(struct iwch_ep *ep, int status);
109
110static void start_ep_timer(struct iwch_ep *ep)
111{
112 PDBG("%s ep %p\n", __FUNCTION__, ep);
113 if (timer_pending(&ep->timer)) {
114 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);
115 del_timer_sync(&ep->timer);
116 } else
117 get_ep(&ep->com);
118 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
119 ep->timer.data = (unsigned long)ep;
120 ep->timer.function = ep_timeout;
121 add_timer(&ep->timer);
122}
123
124static void stop_ep_timer(struct iwch_ep *ep)
125{
126 PDBG("%s ep %p\n", __FUNCTION__, ep);
127 del_timer_sync(&ep->timer);
128 put_ep(&ep->com);
129}
130
131static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
132{
133 struct cpl_tid_release *req;
134
135 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
136 if (!skb)
137 return;
138 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
139 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
140 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
141 skb->priority = CPL_PRIORITY_SETUP;
142 tdev->send(tdev, skb);
143 return;
144}
145
146int iwch_quiesce_tid(struct iwch_ep *ep)
147{
148 struct cpl_set_tcb_field *req;
149 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
150
151 if (!skb)
152 return -ENOMEM;
153 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
154 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
155 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
156 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
157 req->reply = 0;
158 req->cpu_idx = 0;
159 req->word = htons(W_TCB_RX_QUIESCE);
160 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
161 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
162
163 skb->priority = CPL_PRIORITY_DATA;
164 ep->com.tdev->send(ep->com.tdev, skb);
165 return 0;
166}
167
168int iwch_resume_tid(struct iwch_ep *ep)
169{
170 struct cpl_set_tcb_field *req;
171 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
172
173 if (!skb)
174 return -ENOMEM;
175 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
176 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
177 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
178 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
179 req->reply = 0;
180 req->cpu_idx = 0;
181 req->word = htons(W_TCB_RX_QUIESCE);
182 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
183 req->val = 0;
184
185 skb->priority = CPL_PRIORITY_DATA;
186 ep->com.tdev->send(ep->com.tdev, skb);
187 return 0;
188}
189
190static void set_emss(struct iwch_ep *ep, u16 opt)
191{
192 PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);
193 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
194 if (G_TCPOPT_TSTAMP(opt))
195 ep->emss -= 12;
196 if (ep->emss < 128)
197 ep->emss = 128;
198 PDBG("emss=%d\n", ep->emss);
199}
200
201static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
202{
203 unsigned long flags;
204 enum iwch_ep_state state;
205
206 spin_lock_irqsave(&epc->lock, flags);
207 state = epc->state;
208 spin_unlock_irqrestore(&epc->lock, flags);
209 return state;
210}
211
Adrian Bunk2b540352007-02-21 11:52:49 +0100212static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
Steve Wiseb038ced2007-02-12 16:16:18 -0800213{
214 epc->state = new;
215}
216
217static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
218{
219 unsigned long flags;
220
221 spin_lock_irqsave(&epc->lock, flags);
222 PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);
223 __state_set(epc, new);
224 spin_unlock_irqrestore(&epc->lock, flags);
225 return;
226}
227
228static void *alloc_ep(int size, gfp_t gfp)
229{
230 struct iwch_ep_common *epc;
231
232 epc = kmalloc(size, gfp);
233 if (epc) {
234 memset(epc, 0, size);
235 kref_init(&epc->kref);
236 spin_lock_init(&epc->lock);
237 init_waitqueue_head(&epc->waitq);
238 }
239 PDBG("%s alloc ep %p\n", __FUNCTION__, epc);
240 return epc;
241}
242
243void __free_ep(struct kref *kref)
244{
245 struct iwch_ep_common *epc;
246 epc = container_of(kref, struct iwch_ep_common, kref);
247 PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);
248 kfree(epc);
249}
250
251static void release_ep_resources(struct iwch_ep *ep)
252{
253 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
255 dst_release(ep->dst);
256 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
257 if (ep->com.tdev->type == T3B)
258 release_tid(ep->com.tdev, ep->hwtid, NULL);
259 put_ep(&ep->com);
260}
261
262static void process_work(struct work_struct *work)
263{
264 struct sk_buff *skb = NULL;
265 void *ep;
266 struct t3cdev *tdev;
267 int ret;
268
269 while ((skb = skb_dequeue(&rxq))) {
270 ep = *((void **) (skb->cb));
271 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
272 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
273 if (ret & CPL_RET_BUF_DONE)
274 kfree_skb(skb);
275
276 /*
277 * ep was referenced in sched(), and is freed here.
278 */
279 put_ep((struct iwch_ep_common *)ep);
280 }
281}
282
283static int status2errno(int status)
284{
285 switch (status) {
286 case CPL_ERR_NONE:
287 return 0;
288 case CPL_ERR_CONN_RESET:
289 return -ECONNRESET;
290 case CPL_ERR_ARP_MISS:
291 return -EHOSTUNREACH;
292 case CPL_ERR_CONN_TIMEDOUT:
293 return -ETIMEDOUT;
294 case CPL_ERR_TCAM_FULL:
295 return -ENOMEM;
296 case CPL_ERR_CONN_EXIST:
297 return -EADDRINUSE;
298 default:
299 return -EIO;
300 }
301}
302
303/*
304 * Try and reuse skbs already allocated...
305 */
306static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
307{
Steve Wise1f6a8492007-03-06 14:44:05 -0600308 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800309 skb_trim(skb, 0);
310 skb_get(skb);
311 } else {
312 skb = alloc_skb(len, gfp);
313 }
314 return skb;
315}
316
317static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
318 __be32 peer_ip, __be16 local_port,
319 __be16 peer_port, u8 tos)
320{
321 struct rtable *rt;
322 struct flowi fl = {
323 .oif = 0,
324 .nl_u = {
325 .ip4_u = {
326 .daddr = peer_ip,
327 .saddr = local_ip,
328 .tos = tos}
329 },
330 .proto = IPPROTO_TCP,
331 .uli_u = {
332 .ports = {
333 .sport = local_port,
334 .dport = peer_port}
335 }
336 };
337
338 if (ip_route_output_flow(&rt, &fl, NULL, 0))
339 return NULL;
340 return rt;
341}
342
343static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
344{
345 int i = 0;
346
347 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
348 ++i;
349 return i;
350}
351
352static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
353{
354 PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
355 kfree_skb(skb);
356}
357
358/*
359 * Handle an ARP failure for an active open.
360 */
361static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
362{
363 printk(KERN_ERR MOD "ARP failure duing connect\n");
364 kfree_skb(skb);
365}
366
367/*
368 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
369 * and send it along.
370 */
371static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
372{
373 struct cpl_abort_req *req = cplhdr(skb);
374
375 PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
376 req->cmd = CPL_ABORT_NO_RST;
377 cxgb3_ofld_send(dev, skb);
378}
379
380static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
381{
382 struct cpl_close_con_req *req;
383 struct sk_buff *skb;
384
385 PDBG("%s ep %p\n", __FUNCTION__, ep);
386 skb = get_skb(NULL, sizeof(*req), gfp);
387 if (!skb) {
388 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
389 return -ENOMEM;
390 }
391 skb->priority = CPL_PRIORITY_DATA;
392 set_arp_failure_handler(skb, arp_failure_discard);
393 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
394 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
395 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
396 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
397 l2t_send(ep->com.tdev, skb, ep->l2t);
398 return 0;
399}
400
401static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
402{
403 struct cpl_abort_req *req;
404
405 PDBG("%s ep %p\n", __FUNCTION__, ep);
406 skb = get_skb(skb, sizeof(*req), gfp);
407 if (!skb) {
408 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
409 __FUNCTION__);
410 return -ENOMEM;
411 }
412 skb->priority = CPL_PRIORITY_DATA;
413 set_arp_failure_handler(skb, abort_arp_failure);
414 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
415 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
416 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
417 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
418 req->cmd = CPL_ABORT_SEND_RST;
419 l2t_send(ep->com.tdev, skb, ep->l2t);
420 return 0;
421}
422
423static int send_connect(struct iwch_ep *ep)
424{
425 struct cpl_act_open_req *req;
426 struct sk_buff *skb;
427 u32 opt0h, opt0l, opt2;
428 unsigned int mtu_idx;
429 int wscale;
430
431 PDBG("%s ep %p\n", __FUNCTION__, ep);
432
433 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
434 if (!skb) {
435 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
436 __FUNCTION__);
437 return -ENOMEM;
438 }
439 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
440 wscale = compute_wscale(rcv_win);
441 opt0h = V_NAGLE(0) |
442 V_NO_CONG(nocong) |
443 V_KEEP_ALIVE(1) |
444 F_TCAM_BYPASS |
445 V_WND_SCALE(wscale) |
446 V_MSS_IDX(mtu_idx) |
447 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
448 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
449 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
450 skb->priority = CPL_PRIORITY_SETUP;
451 set_arp_failure_handler(skb, act_open_req_arp_failure);
452
453 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
454 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
455 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
456 req->local_port = ep->com.local_addr.sin_port;
457 req->peer_port = ep->com.remote_addr.sin_port;
458 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
459 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
460 req->opt0h = htonl(opt0h);
461 req->opt0l = htonl(opt0l);
462 req->params = 0;
463 req->opt2 = htonl(opt2);
464 l2t_send(ep->com.tdev, skb, ep->l2t);
465 return 0;
466}
467
468static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
469{
470 int mpalen;
471 struct tx_data_wr *req;
472 struct mpa_message *mpa;
473 int len;
474
475 PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);
476
477 BUG_ON(skb_cloned(skb));
478
479 mpalen = sizeof(*mpa) + ep->plen;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700480 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
Steve Wiseb038ced2007-02-12 16:16:18 -0800481 kfree_skb(skb);
482 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
483 if (!skb) {
484 connect_reply_upcall(ep, -ENOMEM);
485 return;
486 }
487 }
488 skb_trim(skb, 0);
489 skb_reserve(skb, sizeof(*req));
490 skb_put(skb, mpalen);
491 skb->priority = CPL_PRIORITY_DATA;
492 mpa = (struct mpa_message *) skb->data;
493 memset(mpa, 0, sizeof(*mpa));
494 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
495 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
496 (markers_enabled ? MPA_MARKERS : 0);
497 mpa->private_data_size = htons(ep->plen);
498 mpa->revision = mpa_rev;
499
500 if (ep->plen)
501 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
502
503 /*
504 * Reference the mpa skb. This ensures the data area
505 * will remain in memory until the hw acks the tx.
506 * Function tx_ack() will deref it.
507 */
508 skb_get(skb);
509 set_arp_failure_handler(skb, arp_failure_discard);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300510 skb_reset_transport_header(skb);
Steve Wiseb038ced2007-02-12 16:16:18 -0800511 len = skb->len;
512 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
513 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
514 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
515 req->len = htonl(len);
516 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
517 V_TX_SNDBUF(snd_win>>15));
Steve Wisede3d3532007-05-14 13:27:27 -0500518 req->flags = htonl(F_TX_INIT);
Steve Wiseb038ced2007-02-12 16:16:18 -0800519 req->sndseq = htonl(ep->snd_seq);
520 BUG_ON(ep->mpa_skb);
521 ep->mpa_skb = skb;
522 l2t_send(ep->com.tdev, skb, ep->l2t);
523 start_ep_timer(ep);
524 state_set(&ep->com, MPA_REQ_SENT);
525 return;
526}
527
528static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
529{
530 int mpalen;
531 struct tx_data_wr *req;
532 struct mpa_message *mpa;
533 struct sk_buff *skb;
534
535 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
536
537 mpalen = sizeof(*mpa) + plen;
538
539 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
540 if (!skb) {
541 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
542 return -ENOMEM;
543 }
544 skb_reserve(skb, sizeof(*req));
545 mpa = (struct mpa_message *) skb_put(skb, mpalen);
546 memset(mpa, 0, sizeof(*mpa));
547 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
548 mpa->flags = MPA_REJECT;
549 mpa->revision = mpa_rev;
550 mpa->private_data_size = htons(plen);
551 if (plen)
552 memcpy(mpa->private_data, pdata, plen);
553
554 /*
555 * Reference the mpa skb again. This ensures the data area
556 * will remain in memory until the hw acks the tx.
557 * Function tx_ack() will deref it.
558 */
559 skb_get(skb);
560 skb->priority = CPL_PRIORITY_DATA;
561 set_arp_failure_handler(skb, arp_failure_discard);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300562 skb_reset_transport_header(skb);
Steve Wiseb038ced2007-02-12 16:16:18 -0800563 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
564 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
565 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
566 req->len = htonl(mpalen);
567 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
568 V_TX_SNDBUF(snd_win>>15));
Steve Wisede3d3532007-05-14 13:27:27 -0500569 req->flags = htonl(F_TX_INIT);
Steve Wiseb038ced2007-02-12 16:16:18 -0800570 req->sndseq = htonl(ep->snd_seq);
571 BUG_ON(ep->mpa_skb);
572 ep->mpa_skb = skb;
573 l2t_send(ep->com.tdev, skb, ep->l2t);
574 return 0;
575}
576
577static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
578{
579 int mpalen;
580 struct tx_data_wr *req;
581 struct mpa_message *mpa;
582 int len;
583 struct sk_buff *skb;
584
585 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
586
587 mpalen = sizeof(*mpa) + plen;
588
589 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
590 if (!skb) {
591 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
592 return -ENOMEM;
593 }
594 skb->priority = CPL_PRIORITY_DATA;
595 skb_reserve(skb, sizeof(*req));
596 mpa = (struct mpa_message *) skb_put(skb, mpalen);
597 memset(mpa, 0, sizeof(*mpa));
598 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
599 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
600 (markers_enabled ? MPA_MARKERS : 0);
601 mpa->revision = mpa_rev;
602 mpa->private_data_size = htons(plen);
603 if (plen)
604 memcpy(mpa->private_data, pdata, plen);
605
606 /*
607 * Reference the mpa skb. This ensures the data area
608 * will remain in memory until the hw acks the tx.
609 * Function tx_ack() will deref it.
610 */
611 skb_get(skb);
612 set_arp_failure_handler(skb, arp_failure_discard);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300613 skb_reset_transport_header(skb);
Steve Wiseb038ced2007-02-12 16:16:18 -0800614 len = skb->len;
615 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
616 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
617 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
618 req->len = htonl(len);
619 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
620 V_TX_SNDBUF(snd_win>>15));
Steve Wisede3d3532007-05-14 13:27:27 -0500621 req->flags = htonl(F_TX_INIT);
Steve Wiseb038ced2007-02-12 16:16:18 -0800622 req->sndseq = htonl(ep->snd_seq);
623 ep->mpa_skb = skb;
624 state_set(&ep->com, MPA_REP_SENT);
625 l2t_send(ep->com.tdev, skb, ep->l2t);
626 return 0;
627}
628
629static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
630{
631 struct iwch_ep *ep = ctx;
632 struct cpl_act_establish *req = cplhdr(skb);
633 unsigned int tid = GET_TID(req);
634
635 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid);
636
637 dst_confirm(ep->dst);
638
639 /* setup the hwtid for this connection */
640 ep->hwtid = tid;
641 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
642
643 ep->snd_seq = ntohl(req->snd_isn);
Steve Wisede3d3532007-05-14 13:27:27 -0500644 ep->rcv_seq = ntohl(req->rcv_isn);
Steve Wiseb038ced2007-02-12 16:16:18 -0800645
646 set_emss(ep, ntohs(req->tcp_opt));
647
648 /* dealloc the atid */
649 cxgb3_free_atid(ep->com.tdev, ep->atid);
650
651 /* start MPA negotiation */
652 send_mpa_req(ep, skb);
653
654 return 0;
655}
656
657static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
658{
659 PDBG("%s ep %p\n", __FILE__, ep);
660 state_set(&ep->com, ABORTING);
661 send_abort(ep, skb, gfp);
662}
663
664static void close_complete_upcall(struct iwch_ep *ep)
665{
666 struct iw_cm_event event;
667
668 PDBG("%s ep %p\n", __FUNCTION__, ep);
669 memset(&event, 0, sizeof(event));
670 event.event = IW_CM_EVENT_CLOSE;
671 if (ep->com.cm_id) {
672 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
673 ep, ep->com.cm_id, ep->hwtid);
674 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
675 ep->com.cm_id->rem_ref(ep->com.cm_id);
676 ep->com.cm_id = NULL;
677 ep->com.qp = NULL;
678 }
679}
680
681static void peer_close_upcall(struct iwch_ep *ep)
682{
683 struct iw_cm_event event;
684
685 PDBG("%s ep %p\n", __FUNCTION__, ep);
686 memset(&event, 0, sizeof(event));
687 event.event = IW_CM_EVENT_DISCONNECT;
688 if (ep->com.cm_id) {
689 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
690 ep, ep->com.cm_id, ep->hwtid);
691 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
692 }
693}
694
695static void peer_abort_upcall(struct iwch_ep *ep)
696{
697 struct iw_cm_event event;
698
699 PDBG("%s ep %p\n", __FUNCTION__, ep);
700 memset(&event, 0, sizeof(event));
701 event.event = IW_CM_EVENT_CLOSE;
702 event.status = -ECONNRESET;
703 if (ep->com.cm_id) {
704 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
705 ep->com.cm_id, ep->hwtid);
706 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
707 ep->com.cm_id->rem_ref(ep->com.cm_id);
708 ep->com.cm_id = NULL;
709 ep->com.qp = NULL;
710 }
711}
712
713static void connect_reply_upcall(struct iwch_ep *ep, int status)
714{
715 struct iw_cm_event event;
716
717 PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status);
718 memset(&event, 0, sizeof(event));
719 event.event = IW_CM_EVENT_CONNECT_REPLY;
720 event.status = status;
721 event.local_addr = ep->com.local_addr;
722 event.remote_addr = ep->com.remote_addr;
723
724 if ((status == 0) || (status == -ECONNREFUSED)) {
725 event.private_data_len = ep->plen;
726 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
727 }
728 if (ep->com.cm_id) {
729 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep,
730 ep->hwtid, status);
731 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
732 }
733 if (status < 0) {
734 ep->com.cm_id->rem_ref(ep->com.cm_id);
735 ep->com.cm_id = NULL;
736 ep->com.qp = NULL;
737 }
738}
739
740static void connect_request_upcall(struct iwch_ep *ep)
741{
742 struct iw_cm_event event;
743
744 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
745 memset(&event, 0, sizeof(event));
746 event.event = IW_CM_EVENT_CONNECT_REQUEST;
747 event.local_addr = ep->com.local_addr;
748 event.remote_addr = ep->com.remote_addr;
749 event.private_data_len = ep->plen;
750 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
751 event.provider_data = ep;
752 if (state_read(&ep->parent_ep->com) != DEAD)
753 ep->parent_ep->com.cm_id->event_handler(
754 ep->parent_ep->com.cm_id,
755 &event);
756 put_ep(&ep->parent_ep->com);
757 ep->parent_ep = NULL;
758}
759
760static void established_upcall(struct iwch_ep *ep)
761{
762 struct iw_cm_event event;
763
764 PDBG("%s ep %p\n", __FUNCTION__, ep);
765 memset(&event, 0, sizeof(event));
766 event.event = IW_CM_EVENT_ESTABLISHED;
767 if (ep->com.cm_id) {
768 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
769 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
770 }
771}
772
773static int update_rx_credits(struct iwch_ep *ep, u32 credits)
774{
775 struct cpl_rx_data_ack *req;
776 struct sk_buff *skb;
777
778 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
779 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
780 if (!skb) {
781 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
782 return 0;
783 }
784
785 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
786 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
787 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
788 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
789 skb->priority = CPL_PRIORITY_ACK;
790 ep->com.tdev->send(ep->com.tdev, skb);
791 return credits;
792}
793
794static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
795{
796 struct mpa_message *mpa;
797 u16 plen;
798 struct iwch_qp_attributes attrs;
799 enum iwch_qp_attr_mask mask;
800 int err;
801
802 PDBG("%s ep %p\n", __FUNCTION__, ep);
803
804 /*
805 * Stop mpa timer. If it expired, then the state has
806 * changed and we bail since ep_timeout already aborted
807 * the connection.
808 */
809 stop_ep_timer(ep);
810 if (state_read(&ep->com) != MPA_REQ_SENT)
811 return;
812
813 /*
814 * If we get more than the supported amount of private data
815 * then we must fail this connection.
816 */
817 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
818 err = -EINVAL;
819 goto err;
820 }
821
822 /*
823 * copy the new data into our accumulation buffer.
824 */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300825 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
826 skb->len);
Steve Wiseb038ced2007-02-12 16:16:18 -0800827 ep->mpa_pkt_len += skb->len;
828
829 /*
830 * if we don't even have the mpa message, then bail.
831 */
832 if (ep->mpa_pkt_len < sizeof(*mpa))
833 return;
834 mpa = (struct mpa_message *) ep->mpa_pkt;
835
836 /* Validate MPA header. */
837 if (mpa->revision != mpa_rev) {
838 err = -EPROTO;
839 goto err;
840 }
841 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
842 err = -EPROTO;
843 goto err;
844 }
845
846 plen = ntohs(mpa->private_data_size);
847
848 /*
849 * Fail if there's too much private data.
850 */
851 if (plen > MPA_MAX_PRIVATE_DATA) {
852 err = -EPROTO;
853 goto err;
854 }
855
856 /*
857 * If plen does not account for pkt size
858 */
859 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
860 err = -EPROTO;
861 goto err;
862 }
863
864 ep->plen = (u8) plen;
865
866 /*
867 * If we don't have all the pdata yet, then bail.
868 * We'll continue process when more data arrives.
869 */
870 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
871 return;
872
873 if (mpa->flags & MPA_REJECT) {
874 err = -ECONNREFUSED;
875 goto err;
876 }
877
878 /*
879 * If we get here we have accumulated the entire mpa
880 * start reply message including private data. And
881 * the MPA header is valid.
882 */
883 state_set(&ep->com, FPDU_MODE);
884 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
885 ep->mpa_attr.recv_marker_enabled = markers_enabled;
886 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
887 ep->mpa_attr.version = mpa_rev;
888 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
889 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
890 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
891 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
892
893 attrs.mpa_attr = ep->mpa_attr;
894 attrs.max_ird = ep->ird;
895 attrs.max_ord = ep->ord;
896 attrs.llp_stream_handle = ep;
897 attrs.next_state = IWCH_QP_STATE_RTS;
898
899 mask = IWCH_QP_ATTR_NEXT_STATE |
900 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
901 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
902
903 /* bind QP and TID with INIT_WR */
904 err = iwch_modify_qp(ep->com.qp->rhp,
905 ep->com.qp, mask, &attrs, 1);
906 if (!err)
907 goto out;
908err:
909 abort_connection(ep, skb, GFP_KERNEL);
910out:
911 connect_reply_upcall(ep, err);
912 return;
913}
914
915static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
916{
917 struct mpa_message *mpa;
918 u16 plen;
919
920 PDBG("%s ep %p\n", __FUNCTION__, ep);
921
922 /*
923 * Stop mpa timer. If it expired, then the state has
924 * changed and we bail since ep_timeout already aborted
925 * the connection.
926 */
927 stop_ep_timer(ep);
928 if (state_read(&ep->com) != MPA_REQ_WAIT)
929 return;
930
931 /*
932 * If we get more than the supported amount of private data
933 * then we must fail this connection.
934 */
935 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
936 abort_connection(ep, skb, GFP_KERNEL);
937 return;
938 }
939
940 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
941
942 /*
943 * Copy the new data into our accumulation buffer.
944 */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300945 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
946 skb->len);
Steve Wiseb038ced2007-02-12 16:16:18 -0800947 ep->mpa_pkt_len += skb->len;
948
949 /*
950 * If we don't even have the mpa message, then bail.
951 * We'll continue process when more data arrives.
952 */
953 if (ep->mpa_pkt_len < sizeof(*mpa))
954 return;
955 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
956 mpa = (struct mpa_message *) ep->mpa_pkt;
957
958 /*
959 * Validate MPA Header.
960 */
961 if (mpa->revision != mpa_rev) {
962 abort_connection(ep, skb, GFP_KERNEL);
963 return;
964 }
965
966 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
967 abort_connection(ep, skb, GFP_KERNEL);
968 return;
969 }
970
971 plen = ntohs(mpa->private_data_size);
972
973 /*
974 * Fail if there's too much private data.
975 */
976 if (plen > MPA_MAX_PRIVATE_DATA) {
977 abort_connection(ep, skb, GFP_KERNEL);
978 return;
979 }
980
981 /*
982 * If plen does not account for pkt size
983 */
984 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
985 abort_connection(ep, skb, GFP_KERNEL);
986 return;
987 }
988 ep->plen = (u8) plen;
989
990 /*
991 * If we don't have all the pdata yet, then bail.
992 */
993 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
994 return;
995
996 /*
997 * If we get here we have accumulated the entire mpa
998 * start reply message including private data.
999 */
1000 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1001 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1002 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1003 ep->mpa_attr.version = mpa_rev;
1004 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1005 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
1006 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1007 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1008
1009 state_set(&ep->com, MPA_REQ_RCVD);
1010
1011 /* drive upcall */
1012 connect_request_upcall(ep);
1013 return;
1014}
1015
1016static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1017{
1018 struct iwch_ep *ep = ctx;
1019 struct cpl_rx_data *hdr = cplhdr(skb);
1020 unsigned int dlen = ntohs(hdr->len);
1021
1022 PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen);
1023
1024 skb_pull(skb, sizeof(*hdr));
1025 skb_trim(skb, dlen);
1026
Steve Wisede3d3532007-05-14 13:27:27 -05001027 ep->rcv_seq += dlen;
1028 BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1029
Steve Wiseb038ced2007-02-12 16:16:18 -08001030 switch (state_read(&ep->com)) {
1031 case MPA_REQ_SENT:
1032 process_mpa_reply(ep, skb);
1033 break;
1034 case MPA_REQ_WAIT:
1035 process_mpa_request(ep, skb);
1036 break;
1037 case MPA_REP_SENT:
1038 break;
1039 default:
1040 printk(KERN_ERR MOD "%s Unexpected streaming data."
1041 " ep %p state %d tid %d\n",
1042 __FUNCTION__, ep, state_read(&ep->com), ep->hwtid);
1043
1044 /*
1045 * The ep will timeout and inform the ULP of the failure.
1046 * See ep_timeout().
1047 */
1048 break;
1049 }
1050
1051 /* update RX credits */
1052 update_rx_credits(ep, dlen);
1053
1054 return CPL_RET_BUF_DONE;
1055}
1056
1057/*
1058 * Upcall from the adapter indicating data has been transmitted.
1059 * For us its just the single MPA request or reply. We can now free
1060 * the skb holding the mpa message.
1061 */
1062static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1063{
1064 struct iwch_ep *ep = ctx;
1065 struct cpl_wr_ack *hdr = cplhdr(skb);
1066 unsigned int credits = ntohs(hdr->credits);
Steve Wiseb038ced2007-02-12 16:16:18 -08001067
1068 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
1069
1070 if (credits == 0)
1071 return CPL_RET_BUF_DONE;
1072 BUG_ON(credits != 1);
1073 BUG_ON(ep->mpa_skb == NULL);
1074 kfree_skb(ep->mpa_skb);
1075 ep->mpa_skb = NULL;
1076 dst_confirm(ep->dst);
1077 if (state_read(&ep->com) == MPA_REP_SENT) {
Steve Wiseb038ced2007-02-12 16:16:18 -08001078 ep->com.rpl_done = 1;
1079 PDBG("waking up ep %p\n", ep);
1080 wake_up(&ep->com.waitq);
1081 }
1082 return CPL_RET_BUF_DONE;
1083}
1084
1085static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1086{
1087 struct iwch_ep *ep = ctx;
1088
1089 PDBG("%s ep %p\n", __FUNCTION__, ep);
1090
Steve Wiseaff9e392007-04-26 15:21:20 -05001091 /*
1092 * We get 2 abort replies from the HW. The first one must
1093 * be ignored except for scribbling that we need one more.
1094 */
1095 if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
1096 ep->flags |= ABORT_REQ_IN_PROGRESS;
1097 return CPL_RET_BUF_DONE;
1098 }
1099
Steve Wiseb038ced2007-02-12 16:16:18 -08001100 close_complete_upcall(ep);
1101 state_set(&ep->com, DEAD);
1102 release_ep_resources(ep);
1103 return CPL_RET_BUF_DONE;
1104}
1105
1106static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1107{
1108 struct iwch_ep *ep = ctx;
1109 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1110
1111 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status,
1112 status2errno(rpl->status));
1113 connect_reply_upcall(ep, status2errno(rpl->status));
1114 state_set(&ep->com, DEAD);
1115 if (ep->com.tdev->type == T3B)
1116 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1117 cxgb3_free_atid(ep->com.tdev, ep->atid);
1118 dst_release(ep->dst);
1119 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1120 put_ep(&ep->com);
1121 return CPL_RET_BUF_DONE;
1122}
1123
1124static int listen_start(struct iwch_listen_ep *ep)
1125{
1126 struct sk_buff *skb;
1127 struct cpl_pass_open_req *req;
1128
1129 PDBG("%s ep %p\n", __FUNCTION__, ep);
1130 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1131 if (!skb) {
1132 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1133 return -ENOMEM;
1134 }
1135
1136 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1137 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1138 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1139 req->local_port = ep->com.local_addr.sin_port;
1140 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1141 req->peer_port = 0;
1142 req->peer_ip = 0;
1143 req->peer_netmask = 0;
1144 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1145 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1146 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1147
1148 skb->priority = 1;
1149 ep->com.tdev->send(ep->com.tdev, skb);
1150 return 0;
1151}
1152
1153static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1154{
1155 struct iwch_listen_ep *ep = ctx;
1156 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1157
1158 PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep,
1159 rpl->status, status2errno(rpl->status));
1160 ep->com.rpl_err = status2errno(rpl->status);
1161 ep->com.rpl_done = 1;
1162 wake_up(&ep->com.waitq);
1163
1164 return CPL_RET_BUF_DONE;
1165}
1166
1167static int listen_stop(struct iwch_listen_ep *ep)
1168{
1169 struct sk_buff *skb;
1170 struct cpl_close_listserv_req *req;
1171
1172 PDBG("%s ep %p\n", __FUNCTION__, ep);
1173 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1174 if (!skb) {
1175 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
1176 return -ENOMEM;
1177 }
1178 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1179 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
Steve Wise60be4b52007-04-26 15:21:15 -05001180 req->cpu_idx = 0;
Steve Wiseb038ced2007-02-12 16:16:18 -08001181 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1182 skb->priority = 1;
1183 ep->com.tdev->send(ep->com.tdev, skb);
1184 return 0;
1185}
1186
1187static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1188 void *ctx)
1189{
1190 struct iwch_listen_ep *ep = ctx;
1191 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1192
1193 PDBG("%s ep %p\n", __FUNCTION__, ep);
1194 ep->com.rpl_err = status2errno(rpl->status);
1195 ep->com.rpl_done = 1;
1196 wake_up(&ep->com.waitq);
1197 return CPL_RET_BUF_DONE;
1198}
1199
1200static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1201{
1202 struct cpl_pass_accept_rpl *rpl;
1203 unsigned int mtu_idx;
1204 u32 opt0h, opt0l, opt2;
1205 int wscale;
1206
1207 PDBG("%s ep %p\n", __FUNCTION__, ep);
1208 BUG_ON(skb_cloned(skb));
1209 skb_trim(skb, sizeof(*rpl));
1210 skb_get(skb);
1211 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1212 wscale = compute_wscale(rcv_win);
1213 opt0h = V_NAGLE(0) |
1214 V_NO_CONG(nocong) |
1215 V_KEEP_ALIVE(1) |
1216 F_TCAM_BYPASS |
1217 V_WND_SCALE(wscale) |
1218 V_MSS_IDX(mtu_idx) |
1219 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1220 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1221 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1222
1223 rpl = cplhdr(skb);
1224 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1225 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1226 rpl->peer_ip = peer_ip;
1227 rpl->opt0h = htonl(opt0h);
1228 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1229 rpl->opt2 = htonl(opt2);
1230 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1231 skb->priority = CPL_PRIORITY_SETUP;
1232 l2t_send(ep->com.tdev, skb, ep->l2t);
1233
1234 return;
1235}
1236
1237static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1238 struct sk_buff *skb)
1239{
1240 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid,
1241 peer_ip);
1242 BUG_ON(skb_cloned(skb));
1243 skb_trim(skb, sizeof(struct cpl_tid_release));
1244 skb_get(skb);
1245
1246 if (tdev->type == T3B)
1247 release_tid(tdev, hwtid, skb);
1248 else {
1249 struct cpl_pass_accept_rpl *rpl;
1250
1251 rpl = cplhdr(skb);
1252 skb->priority = CPL_PRIORITY_SETUP;
1253 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1254 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1255 hwtid));
1256 rpl->peer_ip = peer_ip;
1257 rpl->opt0h = htonl(F_TCAM_BYPASS);
1258 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1259 rpl->opt2 = 0;
1260 rpl->rsvd = rpl->opt2;
1261 tdev->send(tdev, skb);
1262 }
1263}
1264
1265static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1266{
1267 struct iwch_ep *child_ep, *parent_ep = ctx;
1268 struct cpl_pass_accept_req *req = cplhdr(skb);
1269 unsigned int hwtid = GET_TID(req);
1270 struct dst_entry *dst;
1271 struct l2t_entry *l2t;
1272 struct rtable *rt;
1273 struct iff_mac tim;
1274
1275 PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid);
1276
1277 if (state_read(&parent_ep->com) != LISTEN) {
1278 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1279 __FUNCTION__);
1280 goto reject;
1281 }
1282
1283 /*
1284 * Find the netdev for this connection request.
1285 */
1286 tim.mac_addr = req->dst_mac;
1287 tim.vlan_tag = ntohs(req->vlan_tag);
1288 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1289 printk(KERN_ERR
1290 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1291 __FUNCTION__,
1292 req->dst_mac[0],
1293 req->dst_mac[1],
1294 req->dst_mac[2],
1295 req->dst_mac[3],
1296 req->dst_mac[4],
1297 req->dst_mac[5]);
1298 goto reject;
1299 }
1300
1301 /* Find output route */
1302 rt = find_route(tdev,
1303 req->local_ip,
1304 req->peer_ip,
1305 req->local_port,
1306 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1307 if (!rt) {
1308 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1309 __FUNCTION__);
1310 goto reject;
1311 }
1312 dst = &rt->u.dst;
1313 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1314 if (!l2t) {
1315 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1316 __FUNCTION__);
1317 dst_release(dst);
1318 goto reject;
1319 }
1320 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1321 if (!child_ep) {
1322 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1323 __FUNCTION__);
1324 l2t_release(L2DATA(tdev), l2t);
1325 dst_release(dst);
1326 goto reject;
1327 }
1328 state_set(&child_ep->com, CONNECTING);
1329 child_ep->com.tdev = tdev;
1330 child_ep->com.cm_id = NULL;
1331 child_ep->com.local_addr.sin_family = PF_INET;
1332 child_ep->com.local_addr.sin_port = req->local_port;
1333 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1334 child_ep->com.remote_addr.sin_family = PF_INET;
1335 child_ep->com.remote_addr.sin_port = req->peer_port;
1336 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1337 get_ep(&parent_ep->com);
1338 child_ep->parent_ep = parent_ep;
1339 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1340 child_ep->l2t = l2t;
1341 child_ep->dst = dst;
1342 child_ep->hwtid = hwtid;
1343 init_timer(&child_ep->timer);
1344 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1345 accept_cr(child_ep, req->peer_ip, skb);
1346 goto out;
1347reject:
1348 reject_cr(tdev, hwtid, req->peer_ip, skb);
1349out:
1350 return CPL_RET_BUF_DONE;
1351}
1352
1353static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1354{
1355 struct iwch_ep *ep = ctx;
1356 struct cpl_pass_establish *req = cplhdr(skb);
1357
1358 PDBG("%s ep %p\n", __FUNCTION__, ep);
1359 ep->snd_seq = ntohl(req->snd_isn);
Steve Wisede3d3532007-05-14 13:27:27 -05001360 ep->rcv_seq = ntohl(req->rcv_isn);
Steve Wiseb038ced2007-02-12 16:16:18 -08001361
1362 set_emss(ep, ntohs(req->tcp_opt));
1363
1364 dst_confirm(ep->dst);
1365 state_set(&ep->com, MPA_REQ_WAIT);
1366 start_ep_timer(ep);
1367
1368 return CPL_RET_BUF_DONE;
1369}
1370
1371static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1372{
1373 struct iwch_ep *ep = ctx;
1374 struct iwch_qp_attributes attrs;
1375 unsigned long flags;
1376 int disconnect = 1;
1377 int release = 0;
1378
1379 PDBG("%s ep %p\n", __FUNCTION__, ep);
1380 dst_confirm(ep->dst);
1381
1382 spin_lock_irqsave(&ep->com.lock, flags);
1383 switch (ep->com.state) {
1384 case MPA_REQ_WAIT:
1385 __state_set(&ep->com, CLOSING);
1386 break;
1387 case MPA_REQ_SENT:
1388 __state_set(&ep->com, CLOSING);
1389 connect_reply_upcall(ep, -ECONNRESET);
1390 break;
1391 case MPA_REQ_RCVD:
1392
1393 /*
1394 * We're gonna mark this puppy DEAD, but keep
1395 * the reference on it until the ULP accepts or
1396 * rejects the CR.
1397 */
1398 __state_set(&ep->com, CLOSING);
1399 get_ep(&ep->com);
1400 break;
1401 case MPA_REP_SENT:
1402 __state_set(&ep->com, CLOSING);
1403 ep->com.rpl_done = 1;
1404 ep->com.rpl_err = -ECONNRESET;
1405 PDBG("waking up ep %p\n", ep);
1406 wake_up(&ep->com.waitq);
1407 break;
1408 case FPDU_MODE:
Steve Wise42e31752007-03-06 14:43:56 -06001409 start_ep_timer(ep);
Steve Wiseb038ced2007-02-12 16:16:18 -08001410 __state_set(&ep->com, CLOSING);
1411 attrs.next_state = IWCH_QP_STATE_CLOSING;
1412 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1413 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1414 peer_close_upcall(ep);
1415 break;
1416 case ABORTING:
1417 disconnect = 0;
1418 break;
1419 case CLOSING:
Steve Wiseb038ced2007-02-12 16:16:18 -08001420 __state_set(&ep->com, MORIBUND);
1421 disconnect = 0;
1422 break;
1423 case MORIBUND:
1424 stop_ep_timer(ep);
1425 if (ep->com.cm_id && ep->com.qp) {
1426 attrs.next_state = IWCH_QP_STATE_IDLE;
1427 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1428 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1429 }
1430 close_complete_upcall(ep);
1431 __state_set(&ep->com, DEAD);
1432 release = 1;
1433 disconnect = 0;
1434 break;
1435 case DEAD:
1436 disconnect = 0;
1437 break;
1438 default:
1439 BUG_ON(1);
1440 }
1441 spin_unlock_irqrestore(&ep->com.lock, flags);
1442 if (disconnect)
1443 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1444 if (release)
1445 release_ep_resources(ep);
1446 return CPL_RET_BUF_DONE;
1447}
1448
1449/*
1450 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1451 */
Adrian Bunk2b540352007-02-21 11:52:49 +01001452static int is_neg_adv_abort(unsigned int status)
Steve Wiseb038ced2007-02-12 16:16:18 -08001453{
1454 return status == CPL_ERR_RTX_NEG_ADVICE ||
1455 status == CPL_ERR_PERSIST_NEG_ADVICE;
1456}
1457
1458static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1459{
1460 struct cpl_abort_req_rss *req = cplhdr(skb);
1461 struct iwch_ep *ep = ctx;
1462 struct cpl_abort_rpl *rpl;
1463 struct sk_buff *rpl_skb;
1464 struct iwch_qp_attributes attrs;
1465 int ret;
1466 int state;
1467
Steve Wiseaff9e392007-04-26 15:21:20 -05001468 /*
1469 * We get 2 peer aborts from the HW. The first one must
1470 * be ignored except for scribbling that we need one more.
1471 */
1472 if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
1473 ep->flags |= PEER_ABORT_IN_PROGRESS;
1474 return CPL_RET_BUF_DONE;
1475 }
1476
Steve Wiseb038ced2007-02-12 16:16:18 -08001477 if (is_neg_adv_abort(req->status)) {
1478 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
1479 ep->hwtid);
1480 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1481 return CPL_RET_BUF_DONE;
1482 }
1483
1484 state = state_read(&ep->com);
1485 PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
1486 switch (state) {
1487 case CONNECTING:
1488 break;
1489 case MPA_REQ_WAIT:
Steve Wiseadf376b2007-03-06 14:44:01 -06001490 stop_ep_timer(ep);
Steve Wiseb038ced2007-02-12 16:16:18 -08001491 break;
1492 case MPA_REQ_SENT:
Steve Wiseadf376b2007-03-06 14:44:01 -06001493 stop_ep_timer(ep);
Steve Wiseb038ced2007-02-12 16:16:18 -08001494 connect_reply_upcall(ep, -ECONNRESET);
1495 break;
1496 case MPA_REP_SENT:
1497 ep->com.rpl_done = 1;
1498 ep->com.rpl_err = -ECONNRESET;
1499 PDBG("waking up ep %p\n", ep);
1500 wake_up(&ep->com.waitq);
1501 break;
1502 case MPA_REQ_RCVD:
1503
1504 /*
1505 * We're gonna mark this puppy DEAD, but keep
1506 * the reference on it until the ULP accepts or
1507 * rejects the CR.
1508 */
1509 get_ep(&ep->com);
1510 break;
1511 case MORIBUND:
Steve Wiseb038ced2007-02-12 16:16:18 -08001512 case CLOSING:
Steve Wise42e31752007-03-06 14:43:56 -06001513 stop_ep_timer(ep);
1514 /*FALLTHROUGH*/
1515 case FPDU_MODE:
Steve Wiseb038ced2007-02-12 16:16:18 -08001516 if (ep->com.cm_id && ep->com.qp) {
1517 attrs.next_state = IWCH_QP_STATE_ERROR;
1518 ret = iwch_modify_qp(ep->com.qp->rhp,
1519 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1520 &attrs, 1);
1521 if (ret)
1522 printk(KERN_ERR MOD
1523 "%s - qp <- error failed!\n",
1524 __FUNCTION__);
1525 }
1526 peer_abort_upcall(ep);
1527 break;
1528 case ABORTING:
1529 break;
1530 case DEAD:
1531 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__);
1532 return CPL_RET_BUF_DONE;
1533 default:
1534 BUG_ON(1);
1535 break;
1536 }
1537 dst_confirm(ep->dst);
1538
1539 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1540 if (!rpl_skb) {
1541 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1542 __FUNCTION__);
1543 dst_release(ep->dst);
1544 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1545 put_ep(&ep->com);
1546 return CPL_RET_BUF_DONE;
1547 }
1548 rpl_skb->priority = CPL_PRIORITY_DATA;
1549 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1550 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1551 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1552 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1553 rpl->cmd = CPL_ABORT_NO_RST;
1554 ep->com.tdev->send(ep->com.tdev, rpl_skb);
1555 if (state != ABORTING) {
1556 state_set(&ep->com, DEAD);
1557 release_ep_resources(ep);
1558 }
1559 return CPL_RET_BUF_DONE;
1560}
1561
1562static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1563{
1564 struct iwch_ep *ep = ctx;
1565 struct iwch_qp_attributes attrs;
1566 unsigned long flags;
1567 int release = 0;
1568
1569 PDBG("%s ep %p\n", __FUNCTION__, ep);
1570 BUG_ON(!ep);
1571
1572 /* The cm_id may be null if we failed to connect */
1573 spin_lock_irqsave(&ep->com.lock, flags);
1574 switch (ep->com.state) {
1575 case CLOSING:
Steve Wiseb038ced2007-02-12 16:16:18 -08001576 __state_set(&ep->com, MORIBUND);
1577 break;
1578 case MORIBUND:
1579 stop_ep_timer(ep);
1580 if ((ep->com.cm_id) && (ep->com.qp)) {
1581 attrs.next_state = IWCH_QP_STATE_IDLE;
1582 iwch_modify_qp(ep->com.qp->rhp,
1583 ep->com.qp,
1584 IWCH_QP_ATTR_NEXT_STATE,
1585 &attrs, 1);
1586 }
1587 close_complete_upcall(ep);
1588 __state_set(&ep->com, DEAD);
1589 release = 1;
1590 break;
Steve Wise42e31752007-03-06 14:43:56 -06001591 case ABORTING:
1592 break;
Steve Wiseb038ced2007-02-12 16:16:18 -08001593 case DEAD:
1594 default:
1595 BUG_ON(1);
1596 break;
1597 }
1598 spin_unlock_irqrestore(&ep->com.lock, flags);
1599 if (release)
1600 release_ep_resources(ep);
1601 return CPL_RET_BUF_DONE;
1602}
1603
1604/*
1605 * T3A does 3 things when a TERM is received:
1606 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1607 * 2) generate an async event on the QP with the TERMINATE opcode
1608 * 3) post a TERMINATE opcde cqe into the associated CQ.
1609 *
1610 * For (1), we save the message in the qp for later consumer consumption.
1611 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1612 * For (3), we toss the CQE in cxio_poll_cq().
1613 *
1614 * terminate() handles case (1)...
1615 */
1616static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1617{
1618 struct iwch_ep *ep = ctx;
1619
1620 PDBG("%s ep %p\n", __FUNCTION__, ep);
1621 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1622 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001623 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1624 skb->len);
Steve Wiseb038ced2007-02-12 16:16:18 -08001625 ep->com.qp->attr.terminate_msg_len = skb->len;
1626 ep->com.qp->attr.is_terminate_local = 0;
1627 return CPL_RET_BUF_DONE;
1628}
1629
1630static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1631{
1632 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1633 struct iwch_ep *ep = ctx;
1634
1635 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,
1636 rep->status);
1637 if (rep->status) {
1638 struct iwch_qp_attributes attrs;
1639
1640 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1641 __FUNCTION__, ep->hwtid);
Steve Wise2f236732007-02-21 14:45:39 -06001642 stop_ep_timer(ep);
Steve Wiseb038ced2007-02-12 16:16:18 -08001643 attrs.next_state = IWCH_QP_STATE_ERROR;
1644 iwch_modify_qp(ep->com.qp->rhp,
1645 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1646 &attrs, 1);
1647 abort_connection(ep, NULL, GFP_KERNEL);
1648 }
1649 return CPL_RET_BUF_DONE;
1650}
1651
1652static void ep_timeout(unsigned long arg)
1653{
1654 struct iwch_ep *ep = (struct iwch_ep *)arg;
1655 struct iwch_qp_attributes attrs;
1656 unsigned long flags;
1657
1658 spin_lock_irqsave(&ep->com.lock, flags);
1659 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,
1660 ep->com.state);
1661 switch (ep->com.state) {
1662 case MPA_REQ_SENT:
1663 connect_reply_upcall(ep, -ETIMEDOUT);
1664 break;
1665 case MPA_REQ_WAIT:
1666 break;
Steve Wise42e31752007-03-06 14:43:56 -06001667 case CLOSING:
Steve Wiseb038ced2007-02-12 16:16:18 -08001668 case MORIBUND:
1669 if (ep->com.cm_id && ep->com.qp) {
1670 attrs.next_state = IWCH_QP_STATE_ERROR;
1671 iwch_modify_qp(ep->com.qp->rhp,
1672 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1673 &attrs, 1);
1674 }
1675 break;
1676 default:
1677 BUG();
1678 }
1679 __state_set(&ep->com, CLOSING);
1680 spin_unlock_irqrestore(&ep->com.lock, flags);
1681 abort_connection(ep, NULL, GFP_ATOMIC);
1682 put_ep(&ep->com);
1683}
1684
1685int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1686{
1687 int err;
1688 struct iwch_ep *ep = to_ep(cm_id);
1689 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
1690
1691 if (state_read(&ep->com) == DEAD) {
1692 put_ep(&ep->com);
1693 return -ECONNRESET;
1694 }
1695 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
Steve Wiseb038ced2007-02-12 16:16:18 -08001696 if (mpa_rev == 0)
1697 abort_connection(ep, NULL, GFP_KERNEL);
1698 else {
1699 err = send_mpa_reject(ep, pdata, pdata_len);
Steve Wise7d526e62007-03-05 17:32:46 -06001700 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
Steve Wiseb038ced2007-02-12 16:16:18 -08001701 }
1702 return 0;
1703}
1704
1705int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1706{
1707 int err;
1708 struct iwch_qp_attributes attrs;
1709 enum iwch_qp_attr_mask mask;
1710 struct iwch_ep *ep = to_ep(cm_id);
1711 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1712 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1713
1714 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
Steve Wisede3d3532007-05-14 13:27:27 -05001715 if (state_read(&ep->com) == DEAD)
Steve Wiseb038ced2007-02-12 16:16:18 -08001716 return -ECONNRESET;
Steve Wiseb038ced2007-02-12 16:16:18 -08001717
1718 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1719 BUG_ON(!qp);
1720
1721 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1722 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1723 abort_connection(ep, NULL, GFP_KERNEL);
1724 return -EINVAL;
1725 }
1726
1727 cm_id->add_ref(cm_id);
1728 ep->com.cm_id = cm_id;
1729 ep->com.qp = qp;
1730
1731 ep->com.rpl_done = 0;
1732 ep->com.rpl_err = 0;
1733 ep->ird = conn_param->ird;
1734 ep->ord = conn_param->ord;
1735 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
Steve Wisede3d3532007-05-14 13:27:27 -05001736
Steve Wiseb038ced2007-02-12 16:16:18 -08001737 get_ep(&ep->com);
Steve Wiseb038ced2007-02-12 16:16:18 -08001738
1739 /* bind QP to EP and move to RTS */
1740 attrs.mpa_attr = ep->mpa_attr;
1741 attrs.max_ird = ep->ord;
1742 attrs.max_ord = ep->ord;
1743 attrs.llp_stream_handle = ep;
1744 attrs.next_state = IWCH_QP_STATE_RTS;
1745
1746 /* bind QP and TID with INIT_WR */
1747 mask = IWCH_QP_ATTR_NEXT_STATE |
1748 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1749 IWCH_QP_ATTR_MPA_ATTR |
1750 IWCH_QP_ATTR_MAX_IRD |
1751 IWCH_QP_ATTR_MAX_ORD;
1752
1753 err = iwch_modify_qp(ep->com.qp->rhp,
1754 ep->com.qp, mask, &attrs, 1);
Steve Wisede3d3532007-05-14 13:27:27 -05001755 if (err)
1756 goto err;
Steve Wiseb038ced2007-02-12 16:16:18 -08001757
Steve Wisede3d3532007-05-14 13:27:27 -05001758 err = send_mpa_reply(ep, conn_param->private_data,
1759 conn_param->private_data_len);
1760 if (err)
1761 goto err;
1762
1763 /* wait for wr_ack */
1764 wait_event(ep->com.waitq, ep->com.rpl_done);
1765 err = ep->com.rpl_err;
1766 if (err)
1767 goto err;
1768
1769 state_set(&ep->com, FPDU_MODE);
1770 established_upcall(ep);
1771 put_ep(&ep->com);
1772 return 0;
1773err:
1774 ep->com.cm_id = NULL;
1775 ep->com.qp = NULL;
1776 cm_id->rem_ref(cm_id);
1777 abort_connection(ep, NULL, GFP_KERNEL);
Steve Wiseb038ced2007-02-12 16:16:18 -08001778 put_ep(&ep->com);
1779 return err;
1780}
1781
1782int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1783{
1784 int err = 0;
1785 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1786 struct iwch_ep *ep;
1787 struct rtable *rt;
1788
1789 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1790 if (!ep) {
1791 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
1792 err = -ENOMEM;
1793 goto out;
1794 }
1795 init_timer(&ep->timer);
1796 ep->plen = conn_param->private_data_len;
1797 if (ep->plen)
1798 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1799 conn_param->private_data, ep->plen);
1800 ep->ird = conn_param->ird;
1801 ep->ord = conn_param->ord;
1802 ep->com.tdev = h->rdev.t3cdev_p;
1803
1804 cm_id->add_ref(cm_id);
1805 ep->com.cm_id = cm_id;
1806 ep->com.qp = get_qhp(h, conn_param->qpn);
1807 BUG_ON(!ep->com.qp);
1808 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,
1809 ep->com.qp, cm_id);
1810
1811 /*
1812 * Allocate an active TID to initiate a TCP connection.
1813 */
1814 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1815 if (ep->atid == -1) {
1816 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
1817 err = -ENOMEM;
1818 goto fail2;
1819 }
1820
1821 /* find a route */
1822 rt = find_route(h->rdev.t3cdev_p,
1823 cm_id->local_addr.sin_addr.s_addr,
1824 cm_id->remote_addr.sin_addr.s_addr,
1825 cm_id->local_addr.sin_port,
1826 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1827 if (!rt) {
1828 printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
1829 err = -EHOSTUNREACH;
1830 goto fail3;
1831 }
1832 ep->dst = &rt->u.dst;
1833
1834 /* get a l2t entry */
1835 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1836 ep->dst->neighbour->dev);
1837 if (!ep->l2t) {
1838 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
1839 err = -ENOMEM;
1840 goto fail4;
1841 }
1842
1843 state_set(&ep->com, CONNECTING);
1844 ep->tos = IPTOS_LOWDELAY;
1845 ep->com.local_addr = cm_id->local_addr;
1846 ep->com.remote_addr = cm_id->remote_addr;
1847
1848 /* send connect request to rnic */
1849 err = send_connect(ep);
1850 if (!err)
1851 goto out;
1852
1853 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1854fail4:
1855 dst_release(ep->dst);
1856fail3:
1857 cxgb3_free_atid(ep->com.tdev, ep->atid);
1858fail2:
1859 put_ep(&ep->com);
1860out:
1861 return err;
1862}
1863
1864int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1865{
1866 int err = 0;
1867 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1868 struct iwch_listen_ep *ep;
1869
1870
1871 might_sleep();
1872
1873 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1874 if (!ep) {
1875 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
1876 err = -ENOMEM;
1877 goto fail1;
1878 }
1879 PDBG("%s ep %p\n", __FUNCTION__, ep);
1880 ep->com.tdev = h->rdev.t3cdev_p;
1881 cm_id->add_ref(cm_id);
1882 ep->com.cm_id = cm_id;
1883 ep->backlog = backlog;
1884 ep->com.local_addr = cm_id->local_addr;
1885
1886 /*
1887 * Allocate a server TID.
1888 */
1889 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1890 if (ep->stid == -1) {
1891 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
1892 err = -ENOMEM;
1893 goto fail2;
1894 }
1895
1896 state_set(&ep->com, LISTEN);
1897 err = listen_start(ep);
1898 if (err)
1899 goto fail3;
1900
1901 /* wait for pass_open_rpl */
1902 wait_event(ep->com.waitq, ep->com.rpl_done);
1903 err = ep->com.rpl_err;
1904 if (!err) {
1905 cm_id->provider_data = ep;
1906 goto out;
1907 }
1908fail3:
1909 cxgb3_free_stid(ep->com.tdev, ep->stid);
1910fail2:
1911 put_ep(&ep->com);
1912fail1:
1913out:
1914 return err;
1915}
1916
1917int iwch_destroy_listen(struct iw_cm_id *cm_id)
1918{
1919 int err;
1920 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
1921
1922 PDBG("%s ep %p\n", __FUNCTION__, ep);
1923
1924 might_sleep();
1925 state_set(&ep->com, DEAD);
1926 ep->com.rpl_done = 0;
1927 ep->com.rpl_err = 0;
1928 err = listen_stop(ep);
1929 wait_event(ep->com.waitq, ep->com.rpl_done);
1930 cxgb3_free_stid(ep->com.tdev, ep->stid);
1931 err = ep->com.rpl_err;
1932 cm_id->rem_ref(cm_id);
1933 put_ep(&ep->com);
1934 return err;
1935}
1936
1937int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1938{
1939 int ret=0;
1940 unsigned long flags;
1941 int close = 0;
1942
1943 spin_lock_irqsave(&ep->com.lock, flags);
1944
1945 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,
1946 states[ep->com.state], abrupt);
1947
1948 if (ep->com.state == DEAD) {
1949 PDBG("%s already dead ep %p\n", __FUNCTION__, ep);
1950 goto out;
1951 }
1952
1953 if (abrupt) {
1954 if (ep->com.state != ABORTING) {
1955 ep->com.state = ABORTING;
1956 close = 1;
1957 }
1958 goto out;
1959 }
1960
1961 switch (ep->com.state) {
1962 case MPA_REQ_WAIT:
1963 case MPA_REQ_SENT:
1964 case MPA_REQ_RCVD:
1965 case MPA_REP_SENT:
1966 case FPDU_MODE:
Steve Wise42e31752007-03-06 14:43:56 -06001967 start_ep_timer(ep);
Steve Wiseb038ced2007-02-12 16:16:18 -08001968 ep->com.state = CLOSING;
1969 close = 1;
1970 break;
1971 case CLOSING:
Steve Wiseb038ced2007-02-12 16:16:18 -08001972 ep->com.state = MORIBUND;
1973 close = 1;
1974 break;
1975 case MORIBUND:
1976 break;
1977 default:
1978 BUG();
1979 break;
1980 }
1981out:
1982 spin_unlock_irqrestore(&ep->com.lock, flags);
1983 if (close) {
1984 if (abrupt)
1985 ret = send_abort(ep, NULL, gfp);
1986 else
1987 ret = send_halfclose(ep, gfp);
1988 }
1989 return ret;
1990}
1991
1992int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
1993 struct l2t_entry *l2t)
1994{
1995 struct iwch_ep *ep = ctx;
1996
1997 if (ep->dst != old)
1998 return 0;
1999
2000 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,
2001 l2t);
2002 dst_hold(new);
2003 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2004 ep->l2t = l2t;
2005 dst_release(old);
2006 ep->dst = new;
2007 return 1;
2008}
2009
2010/*
2011 * All the CM events are handled on a work queue to have a safe context.
2012 */
2013static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2014{
2015 struct iwch_ep_common *epc = ctx;
2016
2017 get_ep(epc);
2018
2019 /*
2020 * Save ctx and tdev in the skb->cb area.
2021 */
2022 *((void **) skb->cb) = ctx;
2023 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2024
2025 /*
2026 * Queue the skb and schedule the worker thread.
2027 */
2028 skb_queue_tail(&rxq, skb);
2029 queue_work(workq, &skb_work);
2030 return 0;
2031}
2032
Steve Wise1ca19772007-04-12 07:56:34 -05002033static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2034{
2035 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2036
2037 if (rpl->status != CPL_ERR_NONE) {
2038 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2039 "for tid %u\n", rpl->status, GET_TID(rpl));
2040 }
2041 return CPL_RET_BUF_DONE;
2042}
2043
Steve Wiseb038ced2007-02-12 16:16:18 -08002044int __init iwch_cm_init(void)
2045{
2046 skb_queue_head_init(&rxq);
2047
2048 workq = create_singlethread_workqueue("iw_cxgb3");
2049 if (!workq)
2050 return -ENOMEM;
2051
2052 /*
2053 * All upcalls from the T3 Core go to sched() to
2054 * schedule the processing on a work queue.
2055 */
2056 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2057 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2058 t3c_handlers[CPL_RX_DATA] = sched;
2059 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2060 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2061 t3c_handlers[CPL_ABORT_RPL] = sched;
2062 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2063 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2064 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2065 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2066 t3c_handlers[CPL_PEER_CLOSE] = sched;
2067 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2068 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2069 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2070 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
Steve Wise1ca19772007-04-12 07:56:34 -05002071 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
Steve Wiseb038ced2007-02-12 16:16:18 -08002072
2073 /*
2074 * These are the real handlers that are called from a
2075 * work queue.
2076 */
2077 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2078 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2079 work_handlers[CPL_RX_DATA] = rx_data;
2080 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2081 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2082 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2083 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2084 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2085 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2086 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2087 work_handlers[CPL_PEER_CLOSE] = peer_close;
2088 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2089 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2090 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2091 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2092 return 0;
2093}
2094
2095void __exit iwch_cm_term(void)
2096{
2097 flush_workqueue(workq);
2098 destroy_workqueue(workq);
2099}