blob: e5442e34b788566f4adba514fa4bc332db8f8698 [file] [log] [blame]
Steve Wiseb038ced2007-02-12 16:16:18 -08001/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
Steve Wiseb038ced2007-02-12 16:16:18 -08003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38
39#include <net/neighbour.h>
40#include <net/netevent.h>
41#include <net/route.h>
42
43#include "tcb.h"
44#include "cxgb3_offload.h"
45#include "iwch.h"
46#include "iwch_provider.h"
47#include "iwch_cm.h"
48
49static char *states[] = {
50 "idle",
51 "listen",
52 "connecting",
53 "mpa_wait_req",
54 "mpa_req_sent",
55 "mpa_req_rcvd",
56 "mpa_rep_sent",
57 "fpdu_mode",
58 "aborting",
59 "closing",
60 "moribund",
61 "dead",
62 NULL,
63};
64
65static int ep_timeout_secs = 10;
66module_param(ep_timeout_secs, int, 0444);
67MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
68 "in seconds (default=10)");
69
70static int mpa_rev = 1;
71module_param(mpa_rev, int, 0444);
72MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
73 "1 is spec compliant. (default=1)");
74
75static int markers_enabled = 0;
76module_param(markers_enabled, int, 0444);
77MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
78
79static int crc_enabled = 1;
80module_param(crc_enabled, int, 0444);
81MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
82
83static int rcv_win = 256 * 1024;
84module_param(rcv_win, int, 0444);
85MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
86
87static int snd_win = 32 * 1024;
88module_param(snd_win, int, 0444);
89MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
90
91static unsigned int nocong = 0;
92module_param(nocong, uint, 0444);
93MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
94
95static unsigned int cong_flavor = 1;
96module_param(cong_flavor, uint, 0444);
97MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
98
99static void process_work(struct work_struct *work);
100static struct workqueue_struct *workq;
101static DECLARE_WORK(skb_work, process_work);
102
103static struct sk_buff_head rxq;
104static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
105
106static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
107static void ep_timeout(unsigned long arg);
108static void connect_reply_upcall(struct iwch_ep *ep, int status);
109
110static void start_ep_timer(struct iwch_ep *ep)
111{
112 PDBG("%s ep %p\n", __FUNCTION__, ep);
113 if (timer_pending(&ep->timer)) {
114 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);
115 del_timer_sync(&ep->timer);
116 } else
117 get_ep(&ep->com);
118 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
119 ep->timer.data = (unsigned long)ep;
120 ep->timer.function = ep_timeout;
121 add_timer(&ep->timer);
122}
123
124static void stop_ep_timer(struct iwch_ep *ep)
125{
126 PDBG("%s ep %p\n", __FUNCTION__, ep);
127 del_timer_sync(&ep->timer);
128 put_ep(&ep->com);
129}
130
131static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
132{
133 struct cpl_tid_release *req;
134
135 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
136 if (!skb)
137 return;
138 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
139 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
140 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
141 skb->priority = CPL_PRIORITY_SETUP;
142 tdev->send(tdev, skb);
143 return;
144}
145
146int iwch_quiesce_tid(struct iwch_ep *ep)
147{
148 struct cpl_set_tcb_field *req;
149 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
150
151 if (!skb)
152 return -ENOMEM;
153 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
154 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
155 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
156 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
157 req->reply = 0;
158 req->cpu_idx = 0;
159 req->word = htons(W_TCB_RX_QUIESCE);
160 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
161 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
162
163 skb->priority = CPL_PRIORITY_DATA;
164 ep->com.tdev->send(ep->com.tdev, skb);
165 return 0;
166}
167
168int iwch_resume_tid(struct iwch_ep *ep)
169{
170 struct cpl_set_tcb_field *req;
171 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
172
173 if (!skb)
174 return -ENOMEM;
175 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
176 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
177 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
178 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
179 req->reply = 0;
180 req->cpu_idx = 0;
181 req->word = htons(W_TCB_RX_QUIESCE);
182 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
183 req->val = 0;
184
185 skb->priority = CPL_PRIORITY_DATA;
186 ep->com.tdev->send(ep->com.tdev, skb);
187 return 0;
188}
189
190static void set_emss(struct iwch_ep *ep, u16 opt)
191{
192 PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);
193 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
194 if (G_TCPOPT_TSTAMP(opt))
195 ep->emss -= 12;
196 if (ep->emss < 128)
197 ep->emss = 128;
198 PDBG("emss=%d\n", ep->emss);
199}
200
201static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
202{
203 unsigned long flags;
204 enum iwch_ep_state state;
205
206 spin_lock_irqsave(&epc->lock, flags);
207 state = epc->state;
208 spin_unlock_irqrestore(&epc->lock, flags);
209 return state;
210}
211
212static inline void __state_set(struct iwch_ep_common *epc,
213 enum iwch_ep_state new)
214{
215 epc->state = new;
216}
217
218static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
219{
220 unsigned long flags;
221
222 spin_lock_irqsave(&epc->lock, flags);
223 PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);
224 __state_set(epc, new);
225 spin_unlock_irqrestore(&epc->lock, flags);
226 return;
227}
228
229static void *alloc_ep(int size, gfp_t gfp)
230{
231 struct iwch_ep_common *epc;
232
233 epc = kmalloc(size, gfp);
234 if (epc) {
235 memset(epc, 0, size);
236 kref_init(&epc->kref);
237 spin_lock_init(&epc->lock);
238 init_waitqueue_head(&epc->waitq);
239 }
240 PDBG("%s alloc ep %p\n", __FUNCTION__, epc);
241 return epc;
242}
243
244void __free_ep(struct kref *kref)
245{
246 struct iwch_ep_common *epc;
247 epc = container_of(kref, struct iwch_ep_common, kref);
248 PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);
249 kfree(epc);
250}
251
252static void release_ep_resources(struct iwch_ep *ep)
253{
254 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
255 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
256 dst_release(ep->dst);
257 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
258 if (ep->com.tdev->type == T3B)
259 release_tid(ep->com.tdev, ep->hwtid, NULL);
260 put_ep(&ep->com);
261}
262
263static void process_work(struct work_struct *work)
264{
265 struct sk_buff *skb = NULL;
266 void *ep;
267 struct t3cdev *tdev;
268 int ret;
269
270 while ((skb = skb_dequeue(&rxq))) {
271 ep = *((void **) (skb->cb));
272 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
273 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
274 if (ret & CPL_RET_BUF_DONE)
275 kfree_skb(skb);
276
277 /*
278 * ep was referenced in sched(), and is freed here.
279 */
280 put_ep((struct iwch_ep_common *)ep);
281 }
282}
283
284static int status2errno(int status)
285{
286 switch (status) {
287 case CPL_ERR_NONE:
288 return 0;
289 case CPL_ERR_CONN_RESET:
290 return -ECONNRESET;
291 case CPL_ERR_ARP_MISS:
292 return -EHOSTUNREACH;
293 case CPL_ERR_CONN_TIMEDOUT:
294 return -ETIMEDOUT;
295 case CPL_ERR_TCAM_FULL:
296 return -ENOMEM;
297 case CPL_ERR_CONN_EXIST:
298 return -EADDRINUSE;
299 default:
300 return -EIO;
301 }
302}
303
304/*
305 * Try and reuse skbs already allocated...
306 */
307static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
308{
309 if (skb) {
310 BUG_ON(skb_cloned(skb));
311 skb_trim(skb, 0);
312 skb_get(skb);
313 } else {
314 skb = alloc_skb(len, gfp);
315 }
316 return skb;
317}
318
319static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
320 __be32 peer_ip, __be16 local_port,
321 __be16 peer_port, u8 tos)
322{
323 struct rtable *rt;
324 struct flowi fl = {
325 .oif = 0,
326 .nl_u = {
327 .ip4_u = {
328 .daddr = peer_ip,
329 .saddr = local_ip,
330 .tos = tos}
331 },
332 .proto = IPPROTO_TCP,
333 .uli_u = {
334 .ports = {
335 .sport = local_port,
336 .dport = peer_port}
337 }
338 };
339
340 if (ip_route_output_flow(&rt, &fl, NULL, 0))
341 return NULL;
342 return rt;
343}
344
345static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
346{
347 int i = 0;
348
349 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
350 ++i;
351 return i;
352}
353
354static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
355{
356 PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
357 kfree_skb(skb);
358}
359
360/*
361 * Handle an ARP failure for an active open.
362 */
363static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
364{
365 printk(KERN_ERR MOD "ARP failure duing connect\n");
366 kfree_skb(skb);
367}
368
369/*
370 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
371 * and send it along.
372 */
373static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
374{
375 struct cpl_abort_req *req = cplhdr(skb);
376
377 PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
378 req->cmd = CPL_ABORT_NO_RST;
379 cxgb3_ofld_send(dev, skb);
380}
381
382static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
383{
384 struct cpl_close_con_req *req;
385 struct sk_buff *skb;
386
387 PDBG("%s ep %p\n", __FUNCTION__, ep);
388 skb = get_skb(NULL, sizeof(*req), gfp);
389 if (!skb) {
390 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
391 return -ENOMEM;
392 }
393 skb->priority = CPL_PRIORITY_DATA;
394 set_arp_failure_handler(skb, arp_failure_discard);
395 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
396 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
397 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
398 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
399 l2t_send(ep->com.tdev, skb, ep->l2t);
400 return 0;
401}
402
403static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
404{
405 struct cpl_abort_req *req;
406
407 PDBG("%s ep %p\n", __FUNCTION__, ep);
408 skb = get_skb(skb, sizeof(*req), gfp);
409 if (!skb) {
410 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
411 __FUNCTION__);
412 return -ENOMEM;
413 }
414 skb->priority = CPL_PRIORITY_DATA;
415 set_arp_failure_handler(skb, abort_arp_failure);
416 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
417 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
418 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
419 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
420 req->cmd = CPL_ABORT_SEND_RST;
421 l2t_send(ep->com.tdev, skb, ep->l2t);
422 return 0;
423}
424
425static int send_connect(struct iwch_ep *ep)
426{
427 struct cpl_act_open_req *req;
428 struct sk_buff *skb;
429 u32 opt0h, opt0l, opt2;
430 unsigned int mtu_idx;
431 int wscale;
432
433 PDBG("%s ep %p\n", __FUNCTION__, ep);
434
435 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
436 if (!skb) {
437 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
438 __FUNCTION__);
439 return -ENOMEM;
440 }
441 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
442 wscale = compute_wscale(rcv_win);
443 opt0h = V_NAGLE(0) |
444 V_NO_CONG(nocong) |
445 V_KEEP_ALIVE(1) |
446 F_TCAM_BYPASS |
447 V_WND_SCALE(wscale) |
448 V_MSS_IDX(mtu_idx) |
449 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
450 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
451 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
452 skb->priority = CPL_PRIORITY_SETUP;
453 set_arp_failure_handler(skb, act_open_req_arp_failure);
454
455 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
456 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
457 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
458 req->local_port = ep->com.local_addr.sin_port;
459 req->peer_port = ep->com.remote_addr.sin_port;
460 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
461 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
462 req->opt0h = htonl(opt0h);
463 req->opt0l = htonl(opt0l);
464 req->params = 0;
465 req->opt2 = htonl(opt2);
466 l2t_send(ep->com.tdev, skb, ep->l2t);
467 return 0;
468}
469
470static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
471{
472 int mpalen;
473 struct tx_data_wr *req;
474 struct mpa_message *mpa;
475 int len;
476
477 PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);
478
479 BUG_ON(skb_cloned(skb));
480
481 mpalen = sizeof(*mpa) + ep->plen;
482 if (skb->data + mpalen + sizeof(*req) > skb->end) {
483 kfree_skb(skb);
484 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
485 if (!skb) {
486 connect_reply_upcall(ep, -ENOMEM);
487 return;
488 }
489 }
490 skb_trim(skb, 0);
491 skb_reserve(skb, sizeof(*req));
492 skb_put(skb, mpalen);
493 skb->priority = CPL_PRIORITY_DATA;
494 mpa = (struct mpa_message *) skb->data;
495 memset(mpa, 0, sizeof(*mpa));
496 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
497 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
498 (markers_enabled ? MPA_MARKERS : 0);
499 mpa->private_data_size = htons(ep->plen);
500 mpa->revision = mpa_rev;
501
502 if (ep->plen)
503 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
504
505 /*
506 * Reference the mpa skb. This ensures the data area
507 * will remain in memory until the hw acks the tx.
508 * Function tx_ack() will deref it.
509 */
510 skb_get(skb);
511 set_arp_failure_handler(skb, arp_failure_discard);
512 skb->h.raw = skb->data;
513 len = skb->len;
514 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
515 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
516 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
517 req->len = htonl(len);
518 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
519 V_TX_SNDBUF(snd_win>>15));
520 req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
521 req->sndseq = htonl(ep->snd_seq);
522 BUG_ON(ep->mpa_skb);
523 ep->mpa_skb = skb;
524 l2t_send(ep->com.tdev, skb, ep->l2t);
525 start_ep_timer(ep);
526 state_set(&ep->com, MPA_REQ_SENT);
527 return;
528}
529
530static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
531{
532 int mpalen;
533 struct tx_data_wr *req;
534 struct mpa_message *mpa;
535 struct sk_buff *skb;
536
537 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
538
539 mpalen = sizeof(*mpa) + plen;
540
541 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
542 if (!skb) {
543 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
544 return -ENOMEM;
545 }
546 skb_reserve(skb, sizeof(*req));
547 mpa = (struct mpa_message *) skb_put(skb, mpalen);
548 memset(mpa, 0, sizeof(*mpa));
549 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
550 mpa->flags = MPA_REJECT;
551 mpa->revision = mpa_rev;
552 mpa->private_data_size = htons(plen);
553 if (plen)
554 memcpy(mpa->private_data, pdata, plen);
555
556 /*
557 * Reference the mpa skb again. This ensures the data area
558 * will remain in memory until the hw acks the tx.
559 * Function tx_ack() will deref it.
560 */
561 skb_get(skb);
562 skb->priority = CPL_PRIORITY_DATA;
563 set_arp_failure_handler(skb, arp_failure_discard);
564 skb->h.raw = skb->data;
565 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
566 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
567 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
568 req->len = htonl(mpalen);
569 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
570 V_TX_SNDBUF(snd_win>>15));
571 req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
572 req->sndseq = htonl(ep->snd_seq);
573 BUG_ON(ep->mpa_skb);
574 ep->mpa_skb = skb;
575 l2t_send(ep->com.tdev, skb, ep->l2t);
576 return 0;
577}
578
579static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
580{
581 int mpalen;
582 struct tx_data_wr *req;
583 struct mpa_message *mpa;
584 int len;
585 struct sk_buff *skb;
586
587 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
588
589 mpalen = sizeof(*mpa) + plen;
590
591 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
592 if (!skb) {
593 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
594 return -ENOMEM;
595 }
596 skb->priority = CPL_PRIORITY_DATA;
597 skb_reserve(skb, sizeof(*req));
598 mpa = (struct mpa_message *) skb_put(skb, mpalen);
599 memset(mpa, 0, sizeof(*mpa));
600 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
601 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
602 (markers_enabled ? MPA_MARKERS : 0);
603 mpa->revision = mpa_rev;
604 mpa->private_data_size = htons(plen);
605 if (plen)
606 memcpy(mpa->private_data, pdata, plen);
607
608 /*
609 * Reference the mpa skb. This ensures the data area
610 * will remain in memory until the hw acks the tx.
611 * Function tx_ack() will deref it.
612 */
613 skb_get(skb);
614 set_arp_failure_handler(skb, arp_failure_discard);
615 skb->h.raw = skb->data;
616 len = skb->len;
617 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
618 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
619 req->wr_lo = htonl(V_WR_TID(ep->hwtid));
620 req->len = htonl(len);
621 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
622 V_TX_SNDBUF(snd_win>>15));
623 req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT);
624 req->sndseq = htonl(ep->snd_seq);
625 ep->mpa_skb = skb;
626 state_set(&ep->com, MPA_REP_SENT);
627 l2t_send(ep->com.tdev, skb, ep->l2t);
628 return 0;
629}
630
631static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
632{
633 struct iwch_ep *ep = ctx;
634 struct cpl_act_establish *req = cplhdr(skb);
635 unsigned int tid = GET_TID(req);
636
637 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid);
638
639 dst_confirm(ep->dst);
640
641 /* setup the hwtid for this connection */
642 ep->hwtid = tid;
643 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
644
645 ep->snd_seq = ntohl(req->snd_isn);
646
647 set_emss(ep, ntohs(req->tcp_opt));
648
649 /* dealloc the atid */
650 cxgb3_free_atid(ep->com.tdev, ep->atid);
651
652 /* start MPA negotiation */
653 send_mpa_req(ep, skb);
654
655 return 0;
656}
657
658static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
659{
660 PDBG("%s ep %p\n", __FILE__, ep);
661 state_set(&ep->com, ABORTING);
662 send_abort(ep, skb, gfp);
663}
664
665static void close_complete_upcall(struct iwch_ep *ep)
666{
667 struct iw_cm_event event;
668
669 PDBG("%s ep %p\n", __FUNCTION__, ep);
670 memset(&event, 0, sizeof(event));
671 event.event = IW_CM_EVENT_CLOSE;
672 if (ep->com.cm_id) {
673 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
674 ep, ep->com.cm_id, ep->hwtid);
675 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
676 ep->com.cm_id->rem_ref(ep->com.cm_id);
677 ep->com.cm_id = NULL;
678 ep->com.qp = NULL;
679 }
680}
681
682static void peer_close_upcall(struct iwch_ep *ep)
683{
684 struct iw_cm_event event;
685
686 PDBG("%s ep %p\n", __FUNCTION__, ep);
687 memset(&event, 0, sizeof(event));
688 event.event = IW_CM_EVENT_DISCONNECT;
689 if (ep->com.cm_id) {
690 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
691 ep, ep->com.cm_id, ep->hwtid);
692 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
693 }
694}
695
696static void peer_abort_upcall(struct iwch_ep *ep)
697{
698 struct iw_cm_event event;
699
700 PDBG("%s ep %p\n", __FUNCTION__, ep);
701 memset(&event, 0, sizeof(event));
702 event.event = IW_CM_EVENT_CLOSE;
703 event.status = -ECONNRESET;
704 if (ep->com.cm_id) {
705 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
706 ep->com.cm_id, ep->hwtid);
707 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
708 ep->com.cm_id->rem_ref(ep->com.cm_id);
709 ep->com.cm_id = NULL;
710 ep->com.qp = NULL;
711 }
712}
713
714static void connect_reply_upcall(struct iwch_ep *ep, int status)
715{
716 struct iw_cm_event event;
717
718 PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status);
719 memset(&event, 0, sizeof(event));
720 event.event = IW_CM_EVENT_CONNECT_REPLY;
721 event.status = status;
722 event.local_addr = ep->com.local_addr;
723 event.remote_addr = ep->com.remote_addr;
724
725 if ((status == 0) || (status == -ECONNREFUSED)) {
726 event.private_data_len = ep->plen;
727 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
728 }
729 if (ep->com.cm_id) {
730 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep,
731 ep->hwtid, status);
732 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
733 }
734 if (status < 0) {
735 ep->com.cm_id->rem_ref(ep->com.cm_id);
736 ep->com.cm_id = NULL;
737 ep->com.qp = NULL;
738 }
739}
740
741static void connect_request_upcall(struct iwch_ep *ep)
742{
743 struct iw_cm_event event;
744
745 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
746 memset(&event, 0, sizeof(event));
747 event.event = IW_CM_EVENT_CONNECT_REQUEST;
748 event.local_addr = ep->com.local_addr;
749 event.remote_addr = ep->com.remote_addr;
750 event.private_data_len = ep->plen;
751 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
752 event.provider_data = ep;
753 if (state_read(&ep->parent_ep->com) != DEAD)
754 ep->parent_ep->com.cm_id->event_handler(
755 ep->parent_ep->com.cm_id,
756 &event);
757 put_ep(&ep->parent_ep->com);
758 ep->parent_ep = NULL;
759}
760
761static void established_upcall(struct iwch_ep *ep)
762{
763 struct iw_cm_event event;
764
765 PDBG("%s ep %p\n", __FUNCTION__, ep);
766 memset(&event, 0, sizeof(event));
767 event.event = IW_CM_EVENT_ESTABLISHED;
768 if (ep->com.cm_id) {
769 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
770 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
771 }
772}
773
774static int update_rx_credits(struct iwch_ep *ep, u32 credits)
775{
776 struct cpl_rx_data_ack *req;
777 struct sk_buff *skb;
778
779 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
780 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
781 if (!skb) {
782 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
783 return 0;
784 }
785
786 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
787 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
788 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
789 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
790 skb->priority = CPL_PRIORITY_ACK;
791 ep->com.tdev->send(ep->com.tdev, skb);
792 return credits;
793}
794
795static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
796{
797 struct mpa_message *mpa;
798 u16 plen;
799 struct iwch_qp_attributes attrs;
800 enum iwch_qp_attr_mask mask;
801 int err;
802
803 PDBG("%s ep %p\n", __FUNCTION__, ep);
804
805 /*
806 * Stop mpa timer. If it expired, then the state has
807 * changed and we bail since ep_timeout already aborted
808 * the connection.
809 */
810 stop_ep_timer(ep);
811 if (state_read(&ep->com) != MPA_REQ_SENT)
812 return;
813
814 /*
815 * If we get more than the supported amount of private data
816 * then we must fail this connection.
817 */
818 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
819 err = -EINVAL;
820 goto err;
821 }
822
823 /*
824 * copy the new data into our accumulation buffer.
825 */
826 memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
827 ep->mpa_pkt_len += skb->len;
828
829 /*
830 * if we don't even have the mpa message, then bail.
831 */
832 if (ep->mpa_pkt_len < sizeof(*mpa))
833 return;
834 mpa = (struct mpa_message *) ep->mpa_pkt;
835
836 /* Validate MPA header. */
837 if (mpa->revision != mpa_rev) {
838 err = -EPROTO;
839 goto err;
840 }
841 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
842 err = -EPROTO;
843 goto err;
844 }
845
846 plen = ntohs(mpa->private_data_size);
847
848 /*
849 * Fail if there's too much private data.
850 */
851 if (plen > MPA_MAX_PRIVATE_DATA) {
852 err = -EPROTO;
853 goto err;
854 }
855
856 /*
857 * If plen does not account for pkt size
858 */
859 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
860 err = -EPROTO;
861 goto err;
862 }
863
864 ep->plen = (u8) plen;
865
866 /*
867 * If we don't have all the pdata yet, then bail.
868 * We'll continue process when more data arrives.
869 */
870 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
871 return;
872
873 if (mpa->flags & MPA_REJECT) {
874 err = -ECONNREFUSED;
875 goto err;
876 }
877
878 /*
879 * If we get here we have accumulated the entire mpa
880 * start reply message including private data. And
881 * the MPA header is valid.
882 */
883 state_set(&ep->com, FPDU_MODE);
884 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
885 ep->mpa_attr.recv_marker_enabled = markers_enabled;
886 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
887 ep->mpa_attr.version = mpa_rev;
888 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
889 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
890 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
891 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
892
893 attrs.mpa_attr = ep->mpa_attr;
894 attrs.max_ird = ep->ird;
895 attrs.max_ord = ep->ord;
896 attrs.llp_stream_handle = ep;
897 attrs.next_state = IWCH_QP_STATE_RTS;
898
899 mask = IWCH_QP_ATTR_NEXT_STATE |
900 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
901 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
902
903 /* bind QP and TID with INIT_WR */
904 err = iwch_modify_qp(ep->com.qp->rhp,
905 ep->com.qp, mask, &attrs, 1);
906 if (!err)
907 goto out;
908err:
909 abort_connection(ep, skb, GFP_KERNEL);
910out:
911 connect_reply_upcall(ep, err);
912 return;
913}
914
915static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
916{
917 struct mpa_message *mpa;
918 u16 plen;
919
920 PDBG("%s ep %p\n", __FUNCTION__, ep);
921
922 /*
923 * Stop mpa timer. If it expired, then the state has
924 * changed and we bail since ep_timeout already aborted
925 * the connection.
926 */
927 stop_ep_timer(ep);
928 if (state_read(&ep->com) != MPA_REQ_WAIT)
929 return;
930
931 /*
932 * If we get more than the supported amount of private data
933 * then we must fail this connection.
934 */
935 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
936 abort_connection(ep, skb, GFP_KERNEL);
937 return;
938 }
939
940 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
941
942 /*
943 * Copy the new data into our accumulation buffer.
944 */
945 memcpy(&(ep->mpa_pkt[ep->mpa_pkt_len]), skb->data, skb->len);
946 ep->mpa_pkt_len += skb->len;
947
948 /*
949 * If we don't even have the mpa message, then bail.
950 * We'll continue process when more data arrives.
951 */
952 if (ep->mpa_pkt_len < sizeof(*mpa))
953 return;
954 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
955 mpa = (struct mpa_message *) ep->mpa_pkt;
956
957 /*
958 * Validate MPA Header.
959 */
960 if (mpa->revision != mpa_rev) {
961 abort_connection(ep, skb, GFP_KERNEL);
962 return;
963 }
964
965 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
966 abort_connection(ep, skb, GFP_KERNEL);
967 return;
968 }
969
970 plen = ntohs(mpa->private_data_size);
971
972 /*
973 * Fail if there's too much private data.
974 */
975 if (plen > MPA_MAX_PRIVATE_DATA) {
976 abort_connection(ep, skb, GFP_KERNEL);
977 return;
978 }
979
980 /*
981 * If plen does not account for pkt size
982 */
983 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
984 abort_connection(ep, skb, GFP_KERNEL);
985 return;
986 }
987 ep->plen = (u8) plen;
988
989 /*
990 * If we don't have all the pdata yet, then bail.
991 */
992 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
993 return;
994
995 /*
996 * If we get here we have accumulated the entire mpa
997 * start reply message including private data.
998 */
999 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1000 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1001 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1002 ep->mpa_attr.version = mpa_rev;
1003 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1004 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
1005 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1006 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1007
1008 state_set(&ep->com, MPA_REQ_RCVD);
1009
1010 /* drive upcall */
1011 connect_request_upcall(ep);
1012 return;
1013}
1014
1015static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1016{
1017 struct iwch_ep *ep = ctx;
1018 struct cpl_rx_data *hdr = cplhdr(skb);
1019 unsigned int dlen = ntohs(hdr->len);
1020
1021 PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen);
1022
1023 skb_pull(skb, sizeof(*hdr));
1024 skb_trim(skb, dlen);
1025
1026 switch (state_read(&ep->com)) {
1027 case MPA_REQ_SENT:
1028 process_mpa_reply(ep, skb);
1029 break;
1030 case MPA_REQ_WAIT:
1031 process_mpa_request(ep, skb);
1032 break;
1033 case MPA_REP_SENT:
1034 break;
1035 default:
1036 printk(KERN_ERR MOD "%s Unexpected streaming data."
1037 " ep %p state %d tid %d\n",
1038 __FUNCTION__, ep, state_read(&ep->com), ep->hwtid);
1039
1040 /*
1041 * The ep will timeout and inform the ULP of the failure.
1042 * See ep_timeout().
1043 */
1044 break;
1045 }
1046
1047 /* update RX credits */
1048 update_rx_credits(ep, dlen);
1049
1050 return CPL_RET_BUF_DONE;
1051}
1052
1053/*
1054 * Upcall from the adapter indicating data has been transmitted.
1055 * For us its just the single MPA request or reply. We can now free
1056 * the skb holding the mpa message.
1057 */
1058static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1059{
1060 struct iwch_ep *ep = ctx;
1061 struct cpl_wr_ack *hdr = cplhdr(skb);
1062 unsigned int credits = ntohs(hdr->credits);
1063 enum iwch_qp_attr_mask mask;
1064
1065 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
1066
1067 if (credits == 0)
1068 return CPL_RET_BUF_DONE;
1069 BUG_ON(credits != 1);
1070 BUG_ON(ep->mpa_skb == NULL);
1071 kfree_skb(ep->mpa_skb);
1072 ep->mpa_skb = NULL;
1073 dst_confirm(ep->dst);
1074 if (state_read(&ep->com) == MPA_REP_SENT) {
1075 struct iwch_qp_attributes attrs;
1076
1077 /* bind QP to EP and move to RTS */
1078 attrs.mpa_attr = ep->mpa_attr;
1079 attrs.max_ird = ep->ord;
1080 attrs.max_ord = ep->ord;
1081 attrs.llp_stream_handle = ep;
1082 attrs.next_state = IWCH_QP_STATE_RTS;
1083
1084 /* bind QP and TID with INIT_WR */
1085 mask = IWCH_QP_ATTR_NEXT_STATE |
1086 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1087 IWCH_QP_ATTR_MPA_ATTR |
1088 IWCH_QP_ATTR_MAX_IRD |
1089 IWCH_QP_ATTR_MAX_ORD;
1090
1091 ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp,
1092 ep->com.qp, mask, &attrs, 1);
1093
1094 if (!ep->com.rpl_err) {
1095 state_set(&ep->com, FPDU_MODE);
1096 established_upcall(ep);
1097 }
1098
1099 ep->com.rpl_done = 1;
1100 PDBG("waking up ep %p\n", ep);
1101 wake_up(&ep->com.waitq);
1102 }
1103 return CPL_RET_BUF_DONE;
1104}
1105
1106static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1107{
1108 struct iwch_ep *ep = ctx;
1109
1110 PDBG("%s ep %p\n", __FUNCTION__, ep);
1111
1112 close_complete_upcall(ep);
1113 state_set(&ep->com, DEAD);
1114 release_ep_resources(ep);
1115 return CPL_RET_BUF_DONE;
1116}
1117
1118static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1119{
1120 struct iwch_ep *ep = ctx;
1121 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1122
1123 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status,
1124 status2errno(rpl->status));
1125 connect_reply_upcall(ep, status2errno(rpl->status));
1126 state_set(&ep->com, DEAD);
1127 if (ep->com.tdev->type == T3B)
1128 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1129 cxgb3_free_atid(ep->com.tdev, ep->atid);
1130 dst_release(ep->dst);
1131 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1132 put_ep(&ep->com);
1133 return CPL_RET_BUF_DONE;
1134}
1135
1136static int listen_start(struct iwch_listen_ep *ep)
1137{
1138 struct sk_buff *skb;
1139 struct cpl_pass_open_req *req;
1140
1141 PDBG("%s ep %p\n", __FUNCTION__, ep);
1142 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1143 if (!skb) {
1144 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
1145 return -ENOMEM;
1146 }
1147
1148 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
1149 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1150 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
1151 req->local_port = ep->com.local_addr.sin_port;
1152 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
1153 req->peer_port = 0;
1154 req->peer_ip = 0;
1155 req->peer_netmask = 0;
1156 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
1157 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
1158 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
1159
1160 skb->priority = 1;
1161 ep->com.tdev->send(ep->com.tdev, skb);
1162 return 0;
1163}
1164
1165static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1166{
1167 struct iwch_listen_ep *ep = ctx;
1168 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1169
1170 PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep,
1171 rpl->status, status2errno(rpl->status));
1172 ep->com.rpl_err = status2errno(rpl->status);
1173 ep->com.rpl_done = 1;
1174 wake_up(&ep->com.waitq);
1175
1176 return CPL_RET_BUF_DONE;
1177}
1178
1179static int listen_stop(struct iwch_listen_ep *ep)
1180{
1181 struct sk_buff *skb;
1182 struct cpl_close_listserv_req *req;
1183
1184 PDBG("%s ep %p\n", __FUNCTION__, ep);
1185 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1186 if (!skb) {
1187 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
1188 return -ENOMEM;
1189 }
1190 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
1191 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1192 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
1193 skb->priority = 1;
1194 ep->com.tdev->send(ep->com.tdev, skb);
1195 return 0;
1196}
1197
1198static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1199 void *ctx)
1200{
1201 struct iwch_listen_ep *ep = ctx;
1202 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1203
1204 PDBG("%s ep %p\n", __FUNCTION__, ep);
1205 ep->com.rpl_err = status2errno(rpl->status);
1206 ep->com.rpl_done = 1;
1207 wake_up(&ep->com.waitq);
1208 return CPL_RET_BUF_DONE;
1209}
1210
1211static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1212{
1213 struct cpl_pass_accept_rpl *rpl;
1214 unsigned int mtu_idx;
1215 u32 opt0h, opt0l, opt2;
1216 int wscale;
1217
1218 PDBG("%s ep %p\n", __FUNCTION__, ep);
1219 BUG_ON(skb_cloned(skb));
1220 skb_trim(skb, sizeof(*rpl));
1221 skb_get(skb);
1222 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
1223 wscale = compute_wscale(rcv_win);
1224 opt0h = V_NAGLE(0) |
1225 V_NO_CONG(nocong) |
1226 V_KEEP_ALIVE(1) |
1227 F_TCAM_BYPASS |
1228 V_WND_SCALE(wscale) |
1229 V_MSS_IDX(mtu_idx) |
1230 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1231 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1232 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
1233
1234 rpl = cplhdr(skb);
1235 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1236 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
1237 rpl->peer_ip = peer_ip;
1238 rpl->opt0h = htonl(opt0h);
1239 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
1240 rpl->opt2 = htonl(opt2);
1241 rpl->rsvd = rpl->opt2; /* workaround for HW bug */
1242 skb->priority = CPL_PRIORITY_SETUP;
1243 l2t_send(ep->com.tdev, skb, ep->l2t);
1244
1245 return;
1246}
1247
1248static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1249 struct sk_buff *skb)
1250{
1251 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid,
1252 peer_ip);
1253 BUG_ON(skb_cloned(skb));
1254 skb_trim(skb, sizeof(struct cpl_tid_release));
1255 skb_get(skb);
1256
1257 if (tdev->type == T3B)
1258 release_tid(tdev, hwtid, skb);
1259 else {
1260 struct cpl_pass_accept_rpl *rpl;
1261
1262 rpl = cplhdr(skb);
1263 skb->priority = CPL_PRIORITY_SETUP;
1264 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1265 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1266 hwtid));
1267 rpl->peer_ip = peer_ip;
1268 rpl->opt0h = htonl(F_TCAM_BYPASS);
1269 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
1270 rpl->opt2 = 0;
1271 rpl->rsvd = rpl->opt2;
1272 tdev->send(tdev, skb);
1273 }
1274}
1275
1276static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1277{
1278 struct iwch_ep *child_ep, *parent_ep = ctx;
1279 struct cpl_pass_accept_req *req = cplhdr(skb);
1280 unsigned int hwtid = GET_TID(req);
1281 struct dst_entry *dst;
1282 struct l2t_entry *l2t;
1283 struct rtable *rt;
1284 struct iff_mac tim;
1285
1286 PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid);
1287
1288 if (state_read(&parent_ep->com) != LISTEN) {
1289 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1290 __FUNCTION__);
1291 goto reject;
1292 }
1293
1294 /*
1295 * Find the netdev for this connection request.
1296 */
1297 tim.mac_addr = req->dst_mac;
1298 tim.vlan_tag = ntohs(req->vlan_tag);
1299 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1300 printk(KERN_ERR
1301 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1302 __FUNCTION__,
1303 req->dst_mac[0],
1304 req->dst_mac[1],
1305 req->dst_mac[2],
1306 req->dst_mac[3],
1307 req->dst_mac[4],
1308 req->dst_mac[5]);
1309 goto reject;
1310 }
1311
1312 /* Find output route */
1313 rt = find_route(tdev,
1314 req->local_ip,
1315 req->peer_ip,
1316 req->local_port,
1317 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1318 if (!rt) {
1319 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1320 __FUNCTION__);
1321 goto reject;
1322 }
1323 dst = &rt->u.dst;
1324 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1325 if (!l2t) {
1326 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1327 __FUNCTION__);
1328 dst_release(dst);
1329 goto reject;
1330 }
1331 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1332 if (!child_ep) {
1333 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1334 __FUNCTION__);
1335 l2t_release(L2DATA(tdev), l2t);
1336 dst_release(dst);
1337 goto reject;
1338 }
1339 state_set(&child_ep->com, CONNECTING);
1340 child_ep->com.tdev = tdev;
1341 child_ep->com.cm_id = NULL;
1342 child_ep->com.local_addr.sin_family = PF_INET;
1343 child_ep->com.local_addr.sin_port = req->local_port;
1344 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
1345 child_ep->com.remote_addr.sin_family = PF_INET;
1346 child_ep->com.remote_addr.sin_port = req->peer_port;
1347 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
1348 get_ep(&parent_ep->com);
1349 child_ep->parent_ep = parent_ep;
1350 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
1351 child_ep->l2t = l2t;
1352 child_ep->dst = dst;
1353 child_ep->hwtid = hwtid;
1354 init_timer(&child_ep->timer);
1355 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
1356 accept_cr(child_ep, req->peer_ip, skb);
1357 goto out;
1358reject:
1359 reject_cr(tdev, hwtid, req->peer_ip, skb);
1360out:
1361 return CPL_RET_BUF_DONE;
1362}
1363
1364static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1365{
1366 struct iwch_ep *ep = ctx;
1367 struct cpl_pass_establish *req = cplhdr(skb);
1368
1369 PDBG("%s ep %p\n", __FUNCTION__, ep);
1370 ep->snd_seq = ntohl(req->snd_isn);
1371
1372 set_emss(ep, ntohs(req->tcp_opt));
1373
1374 dst_confirm(ep->dst);
1375 state_set(&ep->com, MPA_REQ_WAIT);
1376 start_ep_timer(ep);
1377
1378 return CPL_RET_BUF_DONE;
1379}
1380
1381static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1382{
1383 struct iwch_ep *ep = ctx;
1384 struct iwch_qp_attributes attrs;
1385 unsigned long flags;
1386 int disconnect = 1;
1387 int release = 0;
1388
1389 PDBG("%s ep %p\n", __FUNCTION__, ep);
1390 dst_confirm(ep->dst);
1391
1392 spin_lock_irqsave(&ep->com.lock, flags);
1393 switch (ep->com.state) {
1394 case MPA_REQ_WAIT:
1395 __state_set(&ep->com, CLOSING);
1396 break;
1397 case MPA_REQ_SENT:
1398 __state_set(&ep->com, CLOSING);
1399 connect_reply_upcall(ep, -ECONNRESET);
1400 break;
1401 case MPA_REQ_RCVD:
1402
1403 /*
1404 * We're gonna mark this puppy DEAD, but keep
1405 * the reference on it until the ULP accepts or
1406 * rejects the CR.
1407 */
1408 __state_set(&ep->com, CLOSING);
1409 get_ep(&ep->com);
1410 break;
1411 case MPA_REP_SENT:
1412 __state_set(&ep->com, CLOSING);
1413 ep->com.rpl_done = 1;
1414 ep->com.rpl_err = -ECONNRESET;
1415 PDBG("waking up ep %p\n", ep);
1416 wake_up(&ep->com.waitq);
1417 break;
1418 case FPDU_MODE:
1419 __state_set(&ep->com, CLOSING);
1420 attrs.next_state = IWCH_QP_STATE_CLOSING;
1421 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1422 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1423 peer_close_upcall(ep);
1424 break;
1425 case ABORTING:
1426 disconnect = 0;
1427 break;
1428 case CLOSING:
1429 start_ep_timer(ep);
1430 __state_set(&ep->com, MORIBUND);
1431 disconnect = 0;
1432 break;
1433 case MORIBUND:
1434 stop_ep_timer(ep);
1435 if (ep->com.cm_id && ep->com.qp) {
1436 attrs.next_state = IWCH_QP_STATE_IDLE;
1437 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
1438 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
1439 }
1440 close_complete_upcall(ep);
1441 __state_set(&ep->com, DEAD);
1442 release = 1;
1443 disconnect = 0;
1444 break;
1445 case DEAD:
1446 disconnect = 0;
1447 break;
1448 default:
1449 BUG_ON(1);
1450 }
1451 spin_unlock_irqrestore(&ep->com.lock, flags);
1452 if (disconnect)
1453 iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1454 if (release)
1455 release_ep_resources(ep);
1456 return CPL_RET_BUF_DONE;
1457}
1458
1459/*
1460 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1461 */
1462static inline int is_neg_adv_abort(unsigned int status)
1463{
1464 return status == CPL_ERR_RTX_NEG_ADVICE ||
1465 status == CPL_ERR_PERSIST_NEG_ADVICE;
1466}
1467
1468static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1469{
1470 struct cpl_abort_req_rss *req = cplhdr(skb);
1471 struct iwch_ep *ep = ctx;
1472 struct cpl_abort_rpl *rpl;
1473 struct sk_buff *rpl_skb;
1474 struct iwch_qp_attributes attrs;
1475 int ret;
1476 int state;
1477
1478 if (is_neg_adv_abort(req->status)) {
1479 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
1480 ep->hwtid);
1481 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1482 return CPL_RET_BUF_DONE;
1483 }
1484
1485 state = state_read(&ep->com);
1486 PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
1487 switch (state) {
1488 case CONNECTING:
1489 break;
1490 case MPA_REQ_WAIT:
1491 break;
1492 case MPA_REQ_SENT:
1493 connect_reply_upcall(ep, -ECONNRESET);
1494 break;
1495 case MPA_REP_SENT:
1496 ep->com.rpl_done = 1;
1497 ep->com.rpl_err = -ECONNRESET;
1498 PDBG("waking up ep %p\n", ep);
1499 wake_up(&ep->com.waitq);
1500 break;
1501 case MPA_REQ_RCVD:
1502
1503 /*
1504 * We're gonna mark this puppy DEAD, but keep
1505 * the reference on it until the ULP accepts or
1506 * rejects the CR.
1507 */
1508 get_ep(&ep->com);
1509 break;
1510 case MORIBUND:
1511 stop_ep_timer(ep);
1512 case FPDU_MODE:
1513 case CLOSING:
1514 if (ep->com.cm_id && ep->com.qp) {
1515 attrs.next_state = IWCH_QP_STATE_ERROR;
1516 ret = iwch_modify_qp(ep->com.qp->rhp,
1517 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1518 &attrs, 1);
1519 if (ret)
1520 printk(KERN_ERR MOD
1521 "%s - qp <- error failed!\n",
1522 __FUNCTION__);
1523 }
1524 peer_abort_upcall(ep);
1525 break;
1526 case ABORTING:
1527 break;
1528 case DEAD:
1529 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__);
1530 return CPL_RET_BUF_DONE;
1531 default:
1532 BUG_ON(1);
1533 break;
1534 }
1535 dst_confirm(ep->dst);
1536
1537 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1538 if (!rpl_skb) {
1539 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1540 __FUNCTION__);
1541 dst_release(ep->dst);
1542 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1543 put_ep(&ep->com);
1544 return CPL_RET_BUF_DONE;
1545 }
1546 rpl_skb->priority = CPL_PRIORITY_DATA;
1547 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1548 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
1549 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
1550 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1551 rpl->cmd = CPL_ABORT_NO_RST;
1552 ep->com.tdev->send(ep->com.tdev, rpl_skb);
1553 if (state != ABORTING) {
1554 state_set(&ep->com, DEAD);
1555 release_ep_resources(ep);
1556 }
1557 return CPL_RET_BUF_DONE;
1558}
1559
1560static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1561{
1562 struct iwch_ep *ep = ctx;
1563 struct iwch_qp_attributes attrs;
1564 unsigned long flags;
1565 int release = 0;
1566
1567 PDBG("%s ep %p\n", __FUNCTION__, ep);
1568 BUG_ON(!ep);
1569
1570 /* The cm_id may be null if we failed to connect */
1571 spin_lock_irqsave(&ep->com.lock, flags);
1572 switch (ep->com.state) {
1573 case CLOSING:
1574 start_ep_timer(ep);
1575 __state_set(&ep->com, MORIBUND);
1576 break;
1577 case MORIBUND:
1578 stop_ep_timer(ep);
1579 if ((ep->com.cm_id) && (ep->com.qp)) {
1580 attrs.next_state = IWCH_QP_STATE_IDLE;
1581 iwch_modify_qp(ep->com.qp->rhp,
1582 ep->com.qp,
1583 IWCH_QP_ATTR_NEXT_STATE,
1584 &attrs, 1);
1585 }
1586 close_complete_upcall(ep);
1587 __state_set(&ep->com, DEAD);
1588 release = 1;
1589 break;
1590 case DEAD:
1591 default:
1592 BUG_ON(1);
1593 break;
1594 }
1595 spin_unlock_irqrestore(&ep->com.lock, flags);
1596 if (release)
1597 release_ep_resources(ep);
1598 return CPL_RET_BUF_DONE;
1599}
1600
1601/*
1602 * T3A does 3 things when a TERM is received:
1603 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1604 * 2) generate an async event on the QP with the TERMINATE opcode
1605 * 3) post a TERMINATE opcde cqe into the associated CQ.
1606 *
1607 * For (1), we save the message in the qp for later consumer consumption.
1608 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1609 * For (3), we toss the CQE in cxio_poll_cq().
1610 *
1611 * terminate() handles case (1)...
1612 */
1613static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1614{
1615 struct iwch_ep *ep = ctx;
1616
1617 PDBG("%s ep %p\n", __FUNCTION__, ep);
1618 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1619 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
1620 memcpy(ep->com.qp->attr.terminate_buffer, skb->data, skb->len);
1621 ep->com.qp->attr.terminate_msg_len = skb->len;
1622 ep->com.qp->attr.is_terminate_local = 0;
1623 return CPL_RET_BUF_DONE;
1624}
1625
1626static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1627{
1628 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1629 struct iwch_ep *ep = ctx;
1630
1631 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,
1632 rep->status);
1633 if (rep->status) {
1634 struct iwch_qp_attributes attrs;
1635
1636 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1637 __FUNCTION__, ep->hwtid);
1638 attrs.next_state = IWCH_QP_STATE_ERROR;
1639 iwch_modify_qp(ep->com.qp->rhp,
1640 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1641 &attrs, 1);
1642 abort_connection(ep, NULL, GFP_KERNEL);
1643 }
1644 return CPL_RET_BUF_DONE;
1645}
1646
1647static void ep_timeout(unsigned long arg)
1648{
1649 struct iwch_ep *ep = (struct iwch_ep *)arg;
1650 struct iwch_qp_attributes attrs;
1651 unsigned long flags;
1652
1653 spin_lock_irqsave(&ep->com.lock, flags);
1654 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,
1655 ep->com.state);
1656 switch (ep->com.state) {
1657 case MPA_REQ_SENT:
1658 connect_reply_upcall(ep, -ETIMEDOUT);
1659 break;
1660 case MPA_REQ_WAIT:
1661 break;
1662 case MORIBUND:
1663 if (ep->com.cm_id && ep->com.qp) {
1664 attrs.next_state = IWCH_QP_STATE_ERROR;
1665 iwch_modify_qp(ep->com.qp->rhp,
1666 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1667 &attrs, 1);
1668 }
1669 break;
1670 default:
1671 BUG();
1672 }
1673 __state_set(&ep->com, CLOSING);
1674 spin_unlock_irqrestore(&ep->com.lock, flags);
1675 abort_connection(ep, NULL, GFP_ATOMIC);
1676 put_ep(&ep->com);
1677}
1678
1679int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1680{
1681 int err;
1682 struct iwch_ep *ep = to_ep(cm_id);
1683 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
1684
1685 if (state_read(&ep->com) == DEAD) {
1686 put_ep(&ep->com);
1687 return -ECONNRESET;
1688 }
1689 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1690 state_set(&ep->com, CLOSING);
1691 if (mpa_rev == 0)
1692 abort_connection(ep, NULL, GFP_KERNEL);
1693 else {
1694 err = send_mpa_reject(ep, pdata, pdata_len);
1695 err = send_halfclose(ep, GFP_KERNEL);
1696 }
1697 return 0;
1698}
1699
1700int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1701{
1702 int err;
1703 struct iwch_qp_attributes attrs;
1704 enum iwch_qp_attr_mask mask;
1705 struct iwch_ep *ep = to_ep(cm_id);
1706 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1707 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1708
1709 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
1710 if (state_read(&ep->com) == DEAD) {
1711 put_ep(&ep->com);
1712 return -ECONNRESET;
1713 }
1714
1715 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1716 BUG_ON(!qp);
1717
1718 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1719 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1720 abort_connection(ep, NULL, GFP_KERNEL);
1721 return -EINVAL;
1722 }
1723
1724 cm_id->add_ref(cm_id);
1725 ep->com.cm_id = cm_id;
1726 ep->com.qp = qp;
1727
1728 ep->com.rpl_done = 0;
1729 ep->com.rpl_err = 0;
1730 ep->ird = conn_param->ird;
1731 ep->ord = conn_param->ord;
1732 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
1733 get_ep(&ep->com);
1734 err = send_mpa_reply(ep, conn_param->private_data,
1735 conn_param->private_data_len);
1736 if (err) {
1737 ep->com.cm_id = NULL;
1738 ep->com.qp = NULL;
1739 cm_id->rem_ref(cm_id);
1740 abort_connection(ep, NULL, GFP_KERNEL);
1741 put_ep(&ep->com);
1742 return err;
1743 }
1744
1745 /* bind QP to EP and move to RTS */
1746 attrs.mpa_attr = ep->mpa_attr;
1747 attrs.max_ird = ep->ord;
1748 attrs.max_ord = ep->ord;
1749 attrs.llp_stream_handle = ep;
1750 attrs.next_state = IWCH_QP_STATE_RTS;
1751
1752 /* bind QP and TID with INIT_WR */
1753 mask = IWCH_QP_ATTR_NEXT_STATE |
1754 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1755 IWCH_QP_ATTR_MPA_ATTR |
1756 IWCH_QP_ATTR_MAX_IRD |
1757 IWCH_QP_ATTR_MAX_ORD;
1758
1759 err = iwch_modify_qp(ep->com.qp->rhp,
1760 ep->com.qp, mask, &attrs, 1);
1761
1762 if (err) {
1763 ep->com.cm_id = NULL;
1764 ep->com.qp = NULL;
1765 cm_id->rem_ref(cm_id);
1766 abort_connection(ep, NULL, GFP_KERNEL);
1767 } else {
1768 state_set(&ep->com, FPDU_MODE);
1769 established_upcall(ep);
1770 }
1771 put_ep(&ep->com);
1772 return err;
1773}
1774
1775int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1776{
1777 int err = 0;
1778 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1779 struct iwch_ep *ep;
1780 struct rtable *rt;
1781
1782 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1783 if (!ep) {
1784 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
1785 err = -ENOMEM;
1786 goto out;
1787 }
1788 init_timer(&ep->timer);
1789 ep->plen = conn_param->private_data_len;
1790 if (ep->plen)
1791 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1792 conn_param->private_data, ep->plen);
1793 ep->ird = conn_param->ird;
1794 ep->ord = conn_param->ord;
1795 ep->com.tdev = h->rdev.t3cdev_p;
1796
1797 cm_id->add_ref(cm_id);
1798 ep->com.cm_id = cm_id;
1799 ep->com.qp = get_qhp(h, conn_param->qpn);
1800 BUG_ON(!ep->com.qp);
1801 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,
1802 ep->com.qp, cm_id);
1803
1804 /*
1805 * Allocate an active TID to initiate a TCP connection.
1806 */
1807 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1808 if (ep->atid == -1) {
1809 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
1810 err = -ENOMEM;
1811 goto fail2;
1812 }
1813
1814 /* find a route */
1815 rt = find_route(h->rdev.t3cdev_p,
1816 cm_id->local_addr.sin_addr.s_addr,
1817 cm_id->remote_addr.sin_addr.s_addr,
1818 cm_id->local_addr.sin_port,
1819 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1820 if (!rt) {
1821 printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
1822 err = -EHOSTUNREACH;
1823 goto fail3;
1824 }
1825 ep->dst = &rt->u.dst;
1826
1827 /* get a l2t entry */
1828 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1829 ep->dst->neighbour->dev);
1830 if (!ep->l2t) {
1831 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
1832 err = -ENOMEM;
1833 goto fail4;
1834 }
1835
1836 state_set(&ep->com, CONNECTING);
1837 ep->tos = IPTOS_LOWDELAY;
1838 ep->com.local_addr = cm_id->local_addr;
1839 ep->com.remote_addr = cm_id->remote_addr;
1840
1841 /* send connect request to rnic */
1842 err = send_connect(ep);
1843 if (!err)
1844 goto out;
1845
1846 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
1847fail4:
1848 dst_release(ep->dst);
1849fail3:
1850 cxgb3_free_atid(ep->com.tdev, ep->atid);
1851fail2:
1852 put_ep(&ep->com);
1853out:
1854 return err;
1855}
1856
1857int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1858{
1859 int err = 0;
1860 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1861 struct iwch_listen_ep *ep;
1862
1863
1864 might_sleep();
1865
1866 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1867 if (!ep) {
1868 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
1869 err = -ENOMEM;
1870 goto fail1;
1871 }
1872 PDBG("%s ep %p\n", __FUNCTION__, ep);
1873 ep->com.tdev = h->rdev.t3cdev_p;
1874 cm_id->add_ref(cm_id);
1875 ep->com.cm_id = cm_id;
1876 ep->backlog = backlog;
1877 ep->com.local_addr = cm_id->local_addr;
1878
1879 /*
1880 * Allocate a server TID.
1881 */
1882 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1883 if (ep->stid == -1) {
1884 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
1885 err = -ENOMEM;
1886 goto fail2;
1887 }
1888
1889 state_set(&ep->com, LISTEN);
1890 err = listen_start(ep);
1891 if (err)
1892 goto fail3;
1893
1894 /* wait for pass_open_rpl */
1895 wait_event(ep->com.waitq, ep->com.rpl_done);
1896 err = ep->com.rpl_err;
1897 if (!err) {
1898 cm_id->provider_data = ep;
1899 goto out;
1900 }
1901fail3:
1902 cxgb3_free_stid(ep->com.tdev, ep->stid);
1903fail2:
1904 put_ep(&ep->com);
1905fail1:
1906out:
1907 return err;
1908}
1909
1910int iwch_destroy_listen(struct iw_cm_id *cm_id)
1911{
1912 int err;
1913 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
1914
1915 PDBG("%s ep %p\n", __FUNCTION__, ep);
1916
1917 might_sleep();
1918 state_set(&ep->com, DEAD);
1919 ep->com.rpl_done = 0;
1920 ep->com.rpl_err = 0;
1921 err = listen_stop(ep);
1922 wait_event(ep->com.waitq, ep->com.rpl_done);
1923 cxgb3_free_stid(ep->com.tdev, ep->stid);
1924 err = ep->com.rpl_err;
1925 cm_id->rem_ref(cm_id);
1926 put_ep(&ep->com);
1927 return err;
1928}
1929
1930int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1931{
1932 int ret=0;
1933 unsigned long flags;
1934 int close = 0;
1935
1936 spin_lock_irqsave(&ep->com.lock, flags);
1937
1938 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,
1939 states[ep->com.state], abrupt);
1940
1941 if (ep->com.state == DEAD) {
1942 PDBG("%s already dead ep %p\n", __FUNCTION__, ep);
1943 goto out;
1944 }
1945
1946 if (abrupt) {
1947 if (ep->com.state != ABORTING) {
1948 ep->com.state = ABORTING;
1949 close = 1;
1950 }
1951 goto out;
1952 }
1953
1954 switch (ep->com.state) {
1955 case MPA_REQ_WAIT:
1956 case MPA_REQ_SENT:
1957 case MPA_REQ_RCVD:
1958 case MPA_REP_SENT:
1959 case FPDU_MODE:
1960 ep->com.state = CLOSING;
1961 close = 1;
1962 break;
1963 case CLOSING:
1964 start_ep_timer(ep);
1965 ep->com.state = MORIBUND;
1966 close = 1;
1967 break;
1968 case MORIBUND:
1969 break;
1970 default:
1971 BUG();
1972 break;
1973 }
1974out:
1975 spin_unlock_irqrestore(&ep->com.lock, flags);
1976 if (close) {
1977 if (abrupt)
1978 ret = send_abort(ep, NULL, gfp);
1979 else
1980 ret = send_halfclose(ep, gfp);
1981 }
1982 return ret;
1983}
1984
1985int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
1986 struct l2t_entry *l2t)
1987{
1988 struct iwch_ep *ep = ctx;
1989
1990 if (ep->dst != old)
1991 return 0;
1992
1993 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,
1994 l2t);
1995 dst_hold(new);
1996 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1997 ep->l2t = l2t;
1998 dst_release(old);
1999 ep->dst = new;
2000 return 1;
2001}
2002
2003/*
2004 * All the CM events are handled on a work queue to have a safe context.
2005 */
2006static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2007{
2008 struct iwch_ep_common *epc = ctx;
2009
2010 get_ep(epc);
2011
2012 /*
2013 * Save ctx and tdev in the skb->cb area.
2014 */
2015 *((void **) skb->cb) = ctx;
2016 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
2017
2018 /*
2019 * Queue the skb and schedule the worker thread.
2020 */
2021 skb_queue_tail(&rxq, skb);
2022 queue_work(workq, &skb_work);
2023 return 0;
2024}
2025
2026int __init iwch_cm_init(void)
2027{
2028 skb_queue_head_init(&rxq);
2029
2030 workq = create_singlethread_workqueue("iw_cxgb3");
2031 if (!workq)
2032 return -ENOMEM;
2033
2034 /*
2035 * All upcalls from the T3 Core go to sched() to
2036 * schedule the processing on a work queue.
2037 */
2038 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2039 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2040 t3c_handlers[CPL_RX_DATA] = sched;
2041 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2042 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2043 t3c_handlers[CPL_ABORT_RPL] = sched;
2044 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2045 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2046 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2047 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2048 t3c_handlers[CPL_PEER_CLOSE] = sched;
2049 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2050 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2051 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2052 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2053
2054 /*
2055 * These are the real handlers that are called from a
2056 * work queue.
2057 */
2058 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2059 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2060 work_handlers[CPL_RX_DATA] = rx_data;
2061 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2062 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2063 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2064 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2065 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2066 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2067 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2068 work_handlers[CPL_PEER_CLOSE] = peer_close;
2069 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2070 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2071 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2072 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2073 return 0;
2074}
2075
2076void __exit iwch_cm_term(void)
2077{
2078 flush_workqueue(workq);
2079 destroy_workqueue(workq);
2080}