blob: d2fa7251696077a3fb3f7b8cec5955bd9348a603 [file] [log] [blame]
Faisal Latiff27b4742016-01-20 13:40:04 -06001/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses. You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11* Redistribution and use in source and binary forms, with or
12* without modification, are permitted provided that the following
13* conditions are met:
14*
15* - Redistributions of source code must retain the above
16* copyright notice, this list of conditions and the following
17* disclaimer.
18*
19* - Redistributions in binary form must reproduce the above
20* copyright notice, this list of conditions and the following
21* disclaimer in the documentation and/or other materials
22* provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include <linux/atomic.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/init.h>
39#include <linux/if_arp.h>
40#include <linux/if_vlan.h>
41#include <linux/notifier.h>
42#include <linux/net.h>
43#include <linux/types.h>
44#include <linux/timer.h>
45#include <linux/time.h>
46#include <linux/delay.h>
47#include <linux/etherdevice.h>
48#include <linux/netdevice.h>
49#include <linux/random.h>
50#include <linux/list.h>
51#include <linux/threads.h>
52#include <linux/highmem.h>
53#include <net/arp.h>
54#include <net/ndisc.h>
55#include <net/neighbour.h>
56#include <net/route.h>
57#include <net/addrconf.h>
58#include <net/ip6_route.h>
59#include <net/ip_fib.h>
60#include <net/tcp.h>
61#include <asm/checksum.h>
62
63#include "i40iw.h"
64
65static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
66static void i40iw_cm_post_event(struct i40iw_cm_event *event);
67static void i40iw_disconnect_worker(struct work_struct *work);
68
69/**
70 * i40iw_free_sqbuf - put back puda buffer if refcount = 0
71 * @dev: FPK device
72 * @buf: puda buffer to free
73 */
74void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
75{
76 struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
77 struct i40iw_puda_rsrc *ilq = dev->ilq;
78
79 if (!atomic_dec_return(&buf->refcount))
80 i40iw_puda_ret_bufpool(ilq, buf);
81}
82
83/**
84 * i40iw_derive_hw_ird_setting - Calculate IRD
85 *
86 * @cm_ird: IRD of connection's node
87 *
88 * The ird from the connection is rounded to a supported HW
89 * setting (2,8,32,64) and then encoded for ird_size field of
90 * qp_ctx
91 */
92static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
93{
94 u8 encoded_ird_size;
95 u8 pof2_cm_ird = 1;
96
97 /* round-off to next powerof2 */
98 while (pof2_cm_ird < cm_ird)
99 pof2_cm_ird *= 2;
100
101 /* ird_size field is encoded in qp_ctx */
102 switch (pof2_cm_ird) {
103 case I40IW_HW_IRD_SETTING_64:
104 encoded_ird_size = 3;
105 break;
106 case I40IW_HW_IRD_SETTING_32:
107 case I40IW_HW_IRD_SETTING_16:
108 encoded_ird_size = 2;
109 break;
110 case I40IW_HW_IRD_SETTING_8:
111 case I40IW_HW_IRD_SETTING_4:
112 encoded_ird_size = 1;
113 break;
114 case I40IW_HW_IRD_SETTING_2:
115 default:
116 encoded_ird_size = 0;
117 break;
118 }
119 return encoded_ird_size;
120}
121
122/**
123 * i40iw_record_ird_ord - Record IRD/ORD passed in
124 * @cm_node: connection's node
125 * @conn_ird: connection IRD
126 * @conn_ord: connection ORD
127 */
128static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
129{
130 if (conn_ird > I40IW_MAX_IRD_SIZE)
131 conn_ird = I40IW_MAX_IRD_SIZE;
132
133 if (conn_ord > I40IW_MAX_ORD_SIZE)
134 conn_ord = I40IW_MAX_ORD_SIZE;
135
136 cm_node->ird_size = conn_ird;
137 cm_node->ord_size = conn_ord;
138}
139
140/**
141 * i40iw_copy_ip_ntohl - change network to host ip
142 * @dst: host ip
143 * @src: big endian
144 */
145void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
146{
147 *dst++ = ntohl(*src++);
148 *dst++ = ntohl(*src++);
149 *dst++ = ntohl(*src++);
150 *dst = ntohl(*src);
151}
152
153/**
154 * i40iw_copy_ip_htonl - change host addr to network ip
155 * @dst: host ip
156 * @src: little endian
157 */
158static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
159{
160 *dst++ = htonl(*src++);
161 *dst++ = htonl(*src++);
162 *dst++ = htonl(*src++);
163 *dst = htonl(*src);
164}
165
166/**
167 * i40iw_fill_sockaddr4 - get addr info for passive connection
168 * @cm_node: connection's node
169 * @event: upper layer's cm event
170 */
171static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
172 struct iw_cm_event *event)
173{
174 struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
175 struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
176
177 laddr->sin_family = AF_INET;
178 raddr->sin_family = AF_INET;
179
180 laddr->sin_port = htons(cm_node->loc_port);
181 raddr->sin_port = htons(cm_node->rem_port);
182
183 laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
184 raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
185}
186
187/**
188 * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
189 * @cm_node: connection's node
190 * @event: upper layer's cm event
191 */
192static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
193 struct iw_cm_event *event)
194{
195 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
196 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
197
198 laddr6->sin6_family = AF_INET6;
199 raddr6->sin6_family = AF_INET6;
200
201 laddr6->sin6_port = htons(cm_node->loc_port);
202 raddr6->sin6_port = htons(cm_node->rem_port);
203
204 i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
205 cm_node->loc_addr);
206 i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
207 cm_node->rem_addr);
208}
209
210/**
Faisal Latiff27b4742016-01-20 13:40:04 -0600211 * i40iw_get_addr_info
212 * @cm_node: contains ip/tcp info
213 * @cm_info: to get a copy of the cm_node ip/tcp info
214*/
215static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
216 struct i40iw_cm_info *cm_info)
217{
218 cm_info->ipv4 = cm_node->ipv4;
219 cm_info->vlan_id = cm_node->vlan_id;
220 memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
221 memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
Faisal Latiff27b4742016-01-20 13:40:04 -0600222 cm_info->loc_port = cm_node->loc_port;
223 cm_info->rem_port = cm_node->rem_port;
Faisal Latiff27b4742016-01-20 13:40:04 -0600224}
225
226/**
227 * i40iw_get_cmevent_info - for cm event upcall
228 * @cm_node: connection's node
229 * @cm_id: upper layers cm struct for the event
230 * @event: upper layer's cm event
231 */
232static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
233 struct iw_cm_id *cm_id,
234 struct iw_cm_event *event)
235{
Faisal Latif8d8cd0b2016-02-26 09:18:01 -0600236 memcpy(&event->local_addr, &cm_id->m_local_addr,
Faisal Latiff27b4742016-01-20 13:40:04 -0600237 sizeof(event->local_addr));
Faisal Latif8d8cd0b2016-02-26 09:18:01 -0600238 memcpy(&event->remote_addr, &cm_id->m_remote_addr,
Faisal Latiff27b4742016-01-20 13:40:04 -0600239 sizeof(event->remote_addr));
240 if (cm_node) {
241 event->private_data = (void *)cm_node->pdata_buf;
242 event->private_data_len = (u8)cm_node->pdata.size;
243 event->ird = cm_node->ird_size;
244 event->ord = cm_node->ord_size;
245 }
246}
247
248/**
249 * i40iw_send_cm_event - upcall cm's event handler
250 * @cm_node: connection's node
251 * @cm_id: upper layer's cm info struct
252 * @type: Event type to indicate
253 * @status: status for the event type
254 */
255static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
256 struct iw_cm_id *cm_id,
257 enum iw_cm_event_type type,
258 int status)
259{
260 struct iw_cm_event event;
261
262 memset(&event, 0, sizeof(event));
263 event.event = type;
264 event.status = status;
265 switch (type) {
266 case IW_CM_EVENT_CONNECT_REQUEST:
267 if (cm_node->ipv4)
268 i40iw_fill_sockaddr4(cm_node, &event);
269 else
270 i40iw_fill_sockaddr6(cm_node, &event);
271 event.provider_data = (void *)cm_node;
272 event.private_data = (void *)cm_node->pdata_buf;
273 event.private_data_len = (u8)cm_node->pdata.size;
274 break;
275 case IW_CM_EVENT_CONNECT_REPLY:
276 i40iw_get_cmevent_info(cm_node, cm_id, &event);
277 break;
278 case IW_CM_EVENT_ESTABLISHED:
279 event.ird = cm_node->ird_size;
280 event.ord = cm_node->ord_size;
281 break;
282 case IW_CM_EVENT_DISCONNECT:
283 break;
284 case IW_CM_EVENT_CLOSE:
285 break;
286 default:
287 i40iw_pr_err("event type received type = %d\n", type);
288 return -1;
289 }
290 return cm_id->event_handler(cm_id, &event);
291}
292
293/**
294 * i40iw_create_event - create cm event
295 * @cm_node: connection's node
296 * @type: Event type to generate
297 */
298static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
299 enum i40iw_cm_event_type type)
300{
301 struct i40iw_cm_event *event;
302
303 if (!cm_node->cm_id)
304 return NULL;
305
306 event = kzalloc(sizeof(*event), GFP_ATOMIC);
307
308 if (!event)
309 return NULL;
310
311 event->type = type;
312 event->cm_node = cm_node;
313 memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
314 memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
315 event->cm_info.rem_port = cm_node->rem_port;
316 event->cm_info.loc_port = cm_node->loc_port;
317 event->cm_info.cm_id = cm_node->cm_id;
318
319 i40iw_debug(cm_node->dev,
320 I40IW_DEBUG_CM,
321 "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
322 cm_node,
323 event,
324 type,
325 event->cm_info.loc_addr,
326 event->cm_info.rem_addr);
327
328 i40iw_cm_post_event(event);
329 return event;
330}
331
332/**
333 * i40iw_free_retrans_entry - free send entry
334 * @cm_node: connection's node
335 */
336static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
337{
338 struct i40iw_sc_dev *dev = cm_node->dev;
339 struct i40iw_timer_entry *send_entry;
340
341 send_entry = cm_node->send_entry;
342 if (send_entry) {
343 cm_node->send_entry = NULL;
344 i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
345 kfree(send_entry);
346 atomic_dec(&cm_node->ref_count);
347 }
348}
349
350/**
351 * i40iw_cleanup_retrans_entry - free send entry with lock
352 * @cm_node: connection's node
353 */
354static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
359 i40iw_free_retrans_entry(cm_node);
360 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
361}
362
363static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
364{
365 if ((cm_node->rem_mac[0] == 0x0) &&
366 (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
367 ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
368 return true;
369 return false;
370}
371
372/**
373 * i40iw_form_cm_frame - get a free packet and build frame
374 * @cm_node: connection's node ionfo to use in frame
375 * @options: pointer to options info
376 * @hdr: pointer mpa header
377 * @pdata: pointer to private data
378 * @flags: indicates FIN or ACK
379 */
380static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
381 struct i40iw_kmem_info *options,
382 struct i40iw_kmem_info *hdr,
383 struct i40iw_kmem_info *pdata,
384 u8 flags)
385{
386 struct i40iw_puda_buf *sqbuf;
387 struct i40iw_sc_dev *dev = cm_node->dev;
388 u8 *buf;
389
390 struct tcphdr *tcph;
391 struct iphdr *iph;
392 struct ipv6hdr *ip6h;
393 struct ethhdr *ethh;
394 u16 packetsize;
395 u16 eth_hlen = ETH_HLEN;
396 u32 opts_len = 0;
397 u32 pd_len = 0;
398 u32 hdr_len = 0;
399
400 sqbuf = i40iw_puda_get_bufpool(dev->ilq);
401 if (!sqbuf)
402 return NULL;
403 buf = sqbuf->mem.va;
404
405 if (options)
406 opts_len = (u32)options->size;
407
408 if (hdr)
409 hdr_len = hdr->size;
410
411 if (pdata) {
412 pd_len = pdata->size;
413 if (!is_remote_ne020_or_chelsio(cm_node))
414 pd_len += MPA_ZERO_PAD_LEN;
415 }
416
417 if (cm_node->vlan_id < VLAN_TAG_PRESENT)
418 eth_hlen += 4;
419
420 if (cm_node->ipv4)
421 packetsize = sizeof(*iph) + sizeof(*tcph);
422 else
423 packetsize = sizeof(*ip6h) + sizeof(*tcph);
424 packetsize += opts_len + hdr_len + pd_len;
425
426 memset(buf, 0x00, eth_hlen + packetsize);
427
428 sqbuf->totallen = packetsize + eth_hlen;
429 sqbuf->maclen = eth_hlen;
430 sqbuf->tcphlen = sizeof(*tcph) + opts_len;
431 sqbuf->scratch = (void *)cm_node;
432
433 ethh = (struct ethhdr *)buf;
434 buf += eth_hlen;
435
436 if (cm_node->ipv4) {
437 sqbuf->ipv4 = true;
438
439 iph = (struct iphdr *)buf;
440 buf += sizeof(*iph);
441 tcph = (struct tcphdr *)buf;
442 buf += sizeof(*tcph);
443
444 ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
445 ether_addr_copy(ethh->h_source, cm_node->loc_mac);
446 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
447 ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
448 ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
449
450 ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
451 } else {
452 ethh->h_proto = htons(ETH_P_IP);
453 }
454
455 iph->version = IPVERSION;
456 iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
457 iph->tos = 0;
458 iph->tot_len = htons(packetsize);
459 iph->id = htons(++cm_node->tcp_cntxt.loc_id);
460
461 iph->frag_off = htons(0x4000);
462 iph->ttl = 0x40;
463 iph->protocol = IPPROTO_TCP;
Faisal Latif8d8cd0b2016-02-26 09:18:01 -0600464 iph->saddr = htonl(cm_node->loc_addr[0]);
465 iph->daddr = htonl(cm_node->rem_addr[0]);
Faisal Latiff27b4742016-01-20 13:40:04 -0600466 } else {
467 sqbuf->ipv4 = false;
468 ip6h = (struct ipv6hdr *)buf;
469 buf += sizeof(*ip6h);
470 tcph = (struct tcphdr *)buf;
471 buf += sizeof(*tcph);
472
473 ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
474 ether_addr_copy(ethh->h_source, cm_node->loc_mac);
475 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
476 ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
477 ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
478 ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
479 } else {
480 ethh->h_proto = htons(ETH_P_IPV6);
481 }
482 ip6h->version = 6;
483 ip6h->flow_lbl[0] = 0;
484 ip6h->flow_lbl[1] = 0;
485 ip6h->flow_lbl[2] = 0;
486 ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
487 ip6h->nexthdr = 6;
488 ip6h->hop_limit = 128;
Faisal Latiff27b4742016-01-20 13:40:04 -0600489 i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -0600490 cm_node->loc_addr);
Faisal Latiff27b4742016-01-20 13:40:04 -0600491 i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -0600492 cm_node->rem_addr);
Faisal Latiff27b4742016-01-20 13:40:04 -0600493 }
494
Faisal Latif8d8cd0b2016-02-26 09:18:01 -0600495 tcph->source = htons(cm_node->loc_port);
496 tcph->dest = htons(cm_node->rem_port);
Faisal Latiff27b4742016-01-20 13:40:04 -0600497
498 tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
499
500 if (flags & SET_ACK) {
501 cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
502 tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
503 tcph->ack = 1;
504 } else {
505 tcph->ack_seq = 0;
506 }
507
508 if (flags & SET_SYN) {
509 cm_node->tcp_cntxt.loc_seq_num++;
510 tcph->syn = 1;
511 } else {
512 cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
513 }
514
515 if (flags & SET_FIN) {
516 cm_node->tcp_cntxt.loc_seq_num++;
517 tcph->fin = 1;
518 }
519
520 if (flags & SET_RST)
521 tcph->rst = 1;
522
523 tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
524 sqbuf->tcphlen = tcph->doff << 2;
525 tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
526 tcph->urg_ptr = 0;
527
528 if (opts_len) {
529 memcpy(buf, options->addr, opts_len);
530 buf += opts_len;
531 }
532
533 if (hdr_len) {
534 memcpy(buf, hdr->addr, hdr_len);
535 buf += hdr_len;
536 }
537
538 if (pd_len)
539 memcpy(buf, pdata->addr, pd_len);
540
541 atomic_set(&sqbuf->refcount, 1);
542
543 return sqbuf;
544}
545
546/**
547 * i40iw_send_reset - Send RST packet
548 * @cm_node: connection's node
549 */
550static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
551{
552 struct i40iw_puda_buf *sqbuf;
553 int flags = SET_RST | SET_ACK;
554
555 sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
556 if (!sqbuf) {
557 i40iw_pr_err("no sqbuf\n");
558 return -1;
559 }
560
561 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
562}
563
564/**
565 * i40iw_active_open_err - send event for active side cm error
566 * @cm_node: connection's node
567 * @reset: Flag to send reset or not
568 */
569static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
570{
571 i40iw_cleanup_retrans_entry(cm_node);
572 cm_node->cm_core->stats_connect_errs++;
573 if (reset) {
574 i40iw_debug(cm_node->dev,
575 I40IW_DEBUG_CM,
576 "%s cm_node=%p state=%d\n",
577 __func__,
578 cm_node,
579 cm_node->state);
580 atomic_inc(&cm_node->ref_count);
581 i40iw_send_reset(cm_node);
582 }
583
584 cm_node->state = I40IW_CM_STATE_CLOSED;
585 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
586}
587
588/**
589 * i40iw_passive_open_err - handle passive side cm error
590 * @cm_node: connection's node
591 * @reset: send reset or just free cm_node
592 */
593static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
594{
595 i40iw_cleanup_retrans_entry(cm_node);
596 cm_node->cm_core->stats_passive_errs++;
597 cm_node->state = I40IW_CM_STATE_CLOSED;
598 i40iw_debug(cm_node->dev,
599 I40IW_DEBUG_CM,
600 "%s cm_node=%p state =%d\n",
601 __func__,
602 cm_node,
603 cm_node->state);
604 if (reset)
605 i40iw_send_reset(cm_node);
606 else
607 i40iw_rem_ref_cm_node(cm_node);
608}
609
610/**
611 * i40iw_event_connect_error - to create connect error event
612 * @event: cm information for connect event
613 */
614static void i40iw_event_connect_error(struct i40iw_cm_event *event)
615{
616 struct i40iw_qp *iwqp;
617 struct iw_cm_id *cm_id;
618
619 cm_id = event->cm_node->cm_id;
620 if (!cm_id)
621 return;
622
623 iwqp = cm_id->provider_data;
624
625 if (!iwqp || !iwqp->iwdev)
626 return;
627
628 iwqp->cm_id = NULL;
629 cm_id->provider_data = NULL;
630 i40iw_send_cm_event(event->cm_node, cm_id,
631 IW_CM_EVENT_CONNECT_REPLY,
632 -ECONNRESET);
633 cm_id->rem_ref(cm_id);
634 i40iw_rem_ref_cm_node(event->cm_node);
635}
636
637/**
638 * i40iw_process_options
639 * @cm_node: connection's node
640 * @optionsloc: point to start of options
641 * @optionsize: size of all options
642 * @syn_packet: flag if syn packet
643 */
644static int i40iw_process_options(struct i40iw_cm_node *cm_node,
645 u8 *optionsloc,
646 u32 optionsize,
647 u32 syn_packet)
648{
649 u32 tmp;
650 u32 offset = 0;
651 union all_known_options *all_options;
652 char got_mss_option = 0;
653
654 while (offset < optionsize) {
655 all_options = (union all_known_options *)(optionsloc + offset);
656 switch (all_options->as_base.optionnum) {
657 case OPTION_NUMBER_END:
658 offset = optionsize;
659 break;
660 case OPTION_NUMBER_NONE:
661 offset += 1;
662 continue;
663 case OPTION_NUMBER_MSS:
664 i40iw_debug(cm_node->dev,
665 I40IW_DEBUG_CM,
666 "%s: MSS Length: %d Offset: %d Size: %d\n",
667 __func__,
668 all_options->as_mss.length,
669 offset,
670 optionsize);
671 got_mss_option = 1;
672 if (all_options->as_mss.length != 4)
673 return -1;
674 tmp = ntohs(all_options->as_mss.mss);
675 if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
676 cm_node->tcp_cntxt.mss = tmp;
677 break;
678 case OPTION_NUMBER_WINDOW_SCALE:
679 cm_node->tcp_cntxt.snd_wscale =
680 all_options->as_windowscale.shiftcount;
681 break;
682 default:
683 i40iw_debug(cm_node->dev,
684 I40IW_DEBUG_CM,
685 "TCP Option not understood: %x\n",
686 all_options->as_base.optionnum);
687 break;
688 }
689 offset += all_options->as_base.length;
690 }
691 if (!got_mss_option && syn_packet)
692 cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
693 return 0;
694}
695
696/**
697 * i40iw_handle_tcp_options -
698 * @cm_node: connection's node
699 * @tcph: pointer tcp header
700 * @optionsize: size of options rcvd
701 * @passive: active or passive flag
702 */
703static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
704 struct tcphdr *tcph,
705 int optionsize,
706 int passive)
707{
708 u8 *optionsloc = (u8 *)&tcph[1];
709
710 if (optionsize) {
711 if (i40iw_process_options(cm_node,
712 optionsloc,
713 optionsize,
714 (u32)tcph->syn)) {
715 i40iw_debug(cm_node->dev,
716 I40IW_DEBUG_CM,
717 "%s: Node %p, Sending RESET\n",
718 __func__,
719 cm_node);
720 if (passive)
721 i40iw_passive_open_err(cm_node, true);
722 else
723 i40iw_active_open_err(cm_node, true);
724 return -1;
725 }
726 }
727
728 cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
729 cm_node->tcp_cntxt.snd_wscale;
730
731 if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
732 cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
733 return 0;
734}
735
736/**
737 * i40iw_build_mpa_v1 - build a MPA V1 frame
738 * @cm_node: connection's node
739 * @mpa_key: to do read0 or write0
740 */
741static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
742 void *start_addr,
743 u8 mpa_key)
744{
745 struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
746
747 switch (mpa_key) {
748 case MPA_KEY_REQUEST:
749 memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
750 break;
751 case MPA_KEY_REPLY:
752 memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
753 break;
754 default:
755 break;
756 }
757 mpa_frame->flags = IETF_MPA_FLAGS_CRC;
758 mpa_frame->rev = cm_node->mpa_frame_rev;
759 mpa_frame->priv_data_len = htons(cm_node->pdata.size);
760}
761
762/**
763 * i40iw_build_mpa_v2 - build a MPA V2 frame
764 * @cm_node: connection's node
765 * @start_addr: buffer start address
766 * @mpa_key: to do read0 or write0
767 */
768static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
769 void *start_addr,
770 u8 mpa_key)
771{
772 struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
773 struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500774 u16 ctrl_ird, ctrl_ord;
Faisal Latiff27b4742016-01-20 13:40:04 -0600775
776 /* initialize the upper 5 bytes of the frame */
777 i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
778 mpa_frame->flags |= IETF_MPA_V2_FLAG;
779 mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
780
781 /* initialize RTR msg */
782 if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500783 ctrl_ird = IETF_NO_IRD_ORD;
784 ctrl_ord = IETF_NO_IRD_ORD;
Faisal Latiff27b4742016-01-20 13:40:04 -0600785 } else {
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500786 ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
Faisal Latiff27b4742016-01-20 13:40:04 -0600787 IETF_NO_IRD_ORD : cm_node->ird_size;
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500788 ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
Faisal Latiff27b4742016-01-20 13:40:04 -0600789 IETF_NO_IRD_ORD : cm_node->ord_size;
790 }
791
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500792 ctrl_ird |= IETF_PEER_TO_PEER;
793 ctrl_ird |= IETF_FLPDU_ZERO_LEN;
Faisal Latiff27b4742016-01-20 13:40:04 -0600794
795 switch (mpa_key) {
796 case MPA_KEY_REQUEST:
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500797 ctrl_ord |= IETF_RDMA0_WRITE;
798 ctrl_ord |= IETF_RDMA0_READ;
Faisal Latiff27b4742016-01-20 13:40:04 -0600799 break;
800 case MPA_KEY_REPLY:
801 switch (cm_node->send_rdma0_op) {
802 case SEND_RDMA_WRITE_ZERO:
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500803 ctrl_ord |= IETF_RDMA0_WRITE;
Faisal Latiff27b4742016-01-20 13:40:04 -0600804 break;
805 case SEND_RDMA_READ_ZERO:
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500806 ctrl_ord |= IETF_RDMA0_READ;
Faisal Latiff27b4742016-01-20 13:40:04 -0600807 break;
808 }
809 break;
810 default:
811 break;
812 }
Ismail, Mustafa20c61f72016-04-18 10:33:07 -0500813 rtr_msg->ctrl_ird = htons(ctrl_ird);
814 rtr_msg->ctrl_ord = htons(ctrl_ord);
Faisal Latiff27b4742016-01-20 13:40:04 -0600815}
816
817/**
818 * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
819 * @cm_node: connection's node
820 * @mpa: mpa: data buffer
821 * @mpa_key: to do read0 or write0
822 */
823static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
824 struct i40iw_kmem_info *mpa,
825 u8 mpa_key)
826{
827 int hdr_len = 0;
828
829 switch (cm_node->mpa_frame_rev) {
830 case IETF_MPA_V1:
831 hdr_len = sizeof(struct ietf_mpa_v1);
832 i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
833 break;
834 case IETF_MPA_V2:
835 hdr_len = sizeof(struct ietf_mpa_v2);
836 i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
837 break;
838 default:
839 break;
840 }
841
842 return hdr_len;
843}
844
845/**
846 * i40iw_send_mpa_request - active node send mpa request to passive node
847 * @cm_node: connection's node
848 */
849static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
850{
851 struct i40iw_puda_buf *sqbuf;
852
853 if (!cm_node) {
854 i40iw_pr_err("cm_node == NULL\n");
855 return -1;
856 }
857
858 cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
859 cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
860 &cm_node->mpa_hdr,
861 MPA_KEY_REQUEST);
862 if (!cm_node->mpa_hdr.size) {
863 i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
864 return -1;
865 }
866
867 sqbuf = i40iw_form_cm_frame(cm_node,
868 NULL,
869 &cm_node->mpa_hdr,
870 &cm_node->pdata,
871 SET_ACK);
872 if (!sqbuf) {
873 i40iw_pr_err("sq_buf == NULL\n");
874 return -1;
875 }
Faisal Latiff27b4742016-01-20 13:40:04 -0600876 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
877}
878
879/**
880 * i40iw_send_mpa_reject -
881 * @cm_node: connection's node
882 * @pdata: reject data for connection
883 * @plen: length of reject data
884 */
885static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
886 const void *pdata,
887 u8 plen)
888{
889 struct i40iw_puda_buf *sqbuf;
890 struct i40iw_kmem_info priv_info;
891
892 cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
893 cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
894 &cm_node->mpa_hdr,
895 MPA_KEY_REPLY);
896
897 cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
898 priv_info.addr = (void *)pdata;
899 priv_info.size = plen;
900
901 sqbuf = i40iw_form_cm_frame(cm_node,
902 NULL,
903 &cm_node->mpa_hdr,
904 &priv_info,
905 SET_ACK | SET_FIN);
906 if (!sqbuf) {
907 i40iw_pr_err("no sqbuf\n");
908 return -ENOMEM;
909 }
910 cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
911 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
912}
913
914/**
915 * recv_mpa - process an IETF MPA frame
916 * @cm_node: connection's node
917 * @buffer: Data pointer
918 * @type: to return accept or reject
919 * @len: Len of mpa buffer
920 */
921static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
922{
923 struct ietf_mpa_v1 *mpa_frame;
924 struct ietf_mpa_v2 *mpa_v2_frame;
925 struct ietf_rtr_msg *rtr_msg;
926 int mpa_hdr_len;
927 int priv_data_len;
928
929 *type = I40IW_MPA_REQUEST_ACCEPT;
930
931 if (len < sizeof(struct ietf_mpa_v1)) {
932 i40iw_pr_err("ietf buffer small (%x)\n", len);
933 return -1;
934 }
935
936 mpa_frame = (struct ietf_mpa_v1 *)buffer;
937 mpa_hdr_len = sizeof(struct ietf_mpa_v1);
938 priv_data_len = ntohs(mpa_frame->priv_data_len);
939
940 if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
941 i40iw_pr_err("large pri_data %d\n", priv_data_len);
942 return -1;
943 }
944 if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
945 i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
946 return -1;
947 }
948 if (mpa_frame->rev > cm_node->mpa_frame_rev) {
949 i40iw_pr_err("rev %d\n", mpa_frame->rev);
950 return -1;
951 }
952 cm_node->mpa_frame_rev = mpa_frame->rev;
953
954 if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
955 if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
956 i40iw_pr_err("Unexpected MPA Key received\n");
957 return -1;
958 }
959 } else {
960 if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
961 i40iw_pr_err("Unexpected MPA Key received\n");
962 return -1;
963 }
964 }
965
966 if (priv_data_len + mpa_hdr_len > len) {
967 i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
968 priv_data_len, mpa_hdr_len, len);
969 return -1;
970 }
971 if (len > MAX_CM_BUFFER) {
972 i40iw_pr_err("ietf buffer large len = %d\n", len);
973 return -1;
974 }
975
976 switch (mpa_frame->rev) {
977 case IETF_MPA_V2:{
978 u16 ird_size;
979 u16 ord_size;
980 u16 ctrl_ord;
981 u16 ctrl_ird;
982
983 mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
984 mpa_hdr_len += IETF_RTR_MSG_SIZE;
985 rtr_msg = &mpa_v2_frame->rtr_msg;
986
987 /* parse rtr message */
988 ctrl_ord = ntohs(rtr_msg->ctrl_ord);
989 ctrl_ird = ntohs(rtr_msg->ctrl_ird);
990 ird_size = ctrl_ird & IETF_NO_IRD_ORD;
991 ord_size = ctrl_ord & IETF_NO_IRD_ORD;
992
993 if (!(ctrl_ird & IETF_PEER_TO_PEER))
994 return -1;
995
996 if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
997 cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
998 goto negotiate_done;
999 }
1000
1001 if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
1002 /* responder */
1003 if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
1004 cm_node->ird_size = 1;
1005 if (cm_node->ord_size > ird_size)
1006 cm_node->ord_size = ird_size;
1007 } else {
1008 /* initiator */
1009 if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
1010 return -1;
1011 if (cm_node->ord_size > ird_size)
1012 cm_node->ord_size = ird_size;
1013
1014 if (cm_node->ird_size < ord_size)
1015 /* no resources available */
1016 return -1;
1017 }
1018
1019negotiate_done:
1020 if (ctrl_ord & IETF_RDMA0_READ)
1021 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
1022 else if (ctrl_ord & IETF_RDMA0_WRITE)
1023 cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
1024 else /* Not supported RDMA0 operation */
1025 return -1;
1026 i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
1027 "MPAV2: Negotiated ORD: %d, IRD: %d\n",
1028 cm_node->ord_size, cm_node->ird_size);
1029 break;
1030 }
1031 break;
1032 case IETF_MPA_V1:
1033 default:
1034 break;
1035 }
1036
1037 memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
1038 cm_node->pdata.size = priv_data_len;
1039
1040 if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
1041 *type = I40IW_MPA_REQUEST_REJECT;
1042
1043 if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
1044 cm_node->snd_mark_en = true;
1045
1046 return 0;
1047}
1048
1049/**
1050 * i40iw_schedule_cm_timer
1051 * @@cm_node: connection's node
1052 * @sqbuf: buffer to send
1053 * @type: if it es send ot close
1054 * @send_retrans: if rexmits to be done
1055 * @close_when_complete: is cm_node to be removed
1056 *
1057 * note - cm_node needs to be protected before calling this. Encase in:
1058 * i40iw_rem_ref_cm_node(cm_core, cm_node);
1059 * i40iw_schedule_cm_timer(...)
1060 * atomic_inc(&cm_node->ref_count);
1061 */
1062int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
1063 struct i40iw_puda_buf *sqbuf,
1064 enum i40iw_timer_type type,
1065 int send_retrans,
1066 int close_when_complete)
1067{
1068 struct i40iw_sc_dev *dev = cm_node->dev;
1069 struct i40iw_cm_core *cm_core = cm_node->cm_core;
1070 struct i40iw_timer_entry *new_send;
1071 int ret = 0;
1072 u32 was_timer_set;
1073 unsigned long flags;
1074
1075 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
1076 if (!new_send) {
1077 i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
1078 return -ENOMEM;
1079 }
1080 new_send->retrycount = I40IW_DEFAULT_RETRYS;
1081 new_send->retranscount = I40IW_DEFAULT_RETRANS;
1082 new_send->sqbuf = sqbuf;
1083 new_send->timetosend = jiffies;
1084 new_send->type = type;
1085 new_send->send_retrans = send_retrans;
1086 new_send->close_when_complete = close_when_complete;
1087
1088 if (type == I40IW_TIMER_TYPE_CLOSE) {
1089 new_send->timetosend += (HZ / 10);
1090 if (cm_node->close_entry) {
1091 kfree(new_send);
1092 i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
1093 i40iw_pr_err("already close entry\n");
1094 return -EINVAL;
1095 }
1096 cm_node->close_entry = new_send;
1097 }
1098
1099 if (type == I40IW_TIMER_TYPE_SEND) {
1100 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1101 cm_node->send_entry = new_send;
1102 atomic_inc(&cm_node->ref_count);
1103 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1104 new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
1105
1106 atomic_inc(&sqbuf->refcount);
1107 i40iw_puda_send_buf(dev->ilq, sqbuf);
1108 if (!send_retrans) {
1109 i40iw_cleanup_retrans_entry(cm_node);
1110 if (close_when_complete)
1111 i40iw_rem_ref_cm_node(cm_node);
1112 return ret;
1113 }
1114 }
1115
1116 spin_lock_irqsave(&cm_core->ht_lock, flags);
1117 was_timer_set = timer_pending(&cm_core->tcp_timer);
1118
1119 if (!was_timer_set) {
1120 cm_core->tcp_timer.expires = new_send->timetosend;
1121 add_timer(&cm_core->tcp_timer);
1122 }
1123 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1124
1125 return ret;
1126}
1127
1128/**
1129 * i40iw_retrans_expired - Could not rexmit the packet
1130 * @cm_node: connection's node
1131 */
1132static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
1133{
1134 struct iw_cm_id *cm_id = cm_node->cm_id;
1135 enum i40iw_cm_node_state state = cm_node->state;
1136
1137 cm_node->state = I40IW_CM_STATE_CLOSED;
1138 switch (state) {
1139 case I40IW_CM_STATE_SYN_RCVD:
1140 case I40IW_CM_STATE_CLOSING:
1141 i40iw_rem_ref_cm_node(cm_node);
1142 break;
1143 case I40IW_CM_STATE_FIN_WAIT1:
1144 case I40IW_CM_STATE_LAST_ACK:
1145 if (cm_node->cm_id)
1146 cm_id->rem_ref(cm_id);
1147 i40iw_send_reset(cm_node);
1148 break;
1149 default:
1150 atomic_inc(&cm_node->ref_count);
1151 i40iw_send_reset(cm_node);
1152 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
1153 break;
1154 }
1155}
1156
1157/**
1158 * i40iw_handle_close_entry - for handling retry/timeouts
1159 * @cm_node: connection's node
1160 * @rem_node: flag for remove cm_node
1161 */
1162static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
1163{
1164 struct i40iw_timer_entry *close_entry = cm_node->close_entry;
1165 struct iw_cm_id *cm_id = cm_node->cm_id;
1166 struct i40iw_qp *iwqp;
1167 unsigned long flags;
1168
1169 if (!close_entry)
1170 return;
1171 iwqp = (struct i40iw_qp *)close_entry->sqbuf;
1172 if (iwqp) {
1173 spin_lock_irqsave(&iwqp->lock, flags);
1174 if (iwqp->cm_id) {
1175 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1176 iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
1177 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1178 iwqp->ibqp_state = IB_QPS_ERR;
1179 spin_unlock_irqrestore(&iwqp->lock, flags);
1180 i40iw_cm_disconn(iwqp);
1181 } else {
1182 spin_unlock_irqrestore(&iwqp->lock, flags);
1183 }
1184 } else if (rem_node) {
1185 /* TIME_WAIT state */
1186 i40iw_rem_ref_cm_node(cm_node);
1187 }
1188 if (cm_id)
1189 cm_id->rem_ref(cm_id);
1190 kfree(close_entry);
1191 cm_node->close_entry = NULL;
1192}
1193
1194/**
1195 * i40iw_cm_timer_tick - system's timer expired callback
1196 * @pass: Pointing to cm_core
1197 */
1198static void i40iw_cm_timer_tick(unsigned long pass)
1199{
1200 unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
1201 struct i40iw_cm_node *cm_node;
1202 struct i40iw_timer_entry *send_entry, *close_entry;
1203 struct list_head *list_core_temp;
1204 struct list_head *list_node;
1205 struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
1206 u32 settimer = 0;
1207 unsigned long timetosend;
1208 struct i40iw_sc_dev *dev;
1209 unsigned long flags;
1210
1211 struct list_head timer_list;
1212
1213 INIT_LIST_HEAD(&timer_list);
1214 spin_lock_irqsave(&cm_core->ht_lock, flags);
1215
1216 list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
1217 cm_node = container_of(list_node, struct i40iw_cm_node, list);
1218 if (cm_node->close_entry || cm_node->send_entry) {
1219 atomic_inc(&cm_node->ref_count);
1220 list_add(&cm_node->timer_entry, &timer_list);
1221 }
1222 }
1223 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1224
1225 list_for_each_safe(list_node, list_core_temp, &timer_list) {
1226 cm_node = container_of(list_node,
1227 struct i40iw_cm_node,
1228 timer_entry);
1229 close_entry = cm_node->close_entry;
1230
1231 if (close_entry) {
1232 if (time_after(close_entry->timetosend, jiffies)) {
1233 if (nexttimeout > close_entry->timetosend ||
1234 !settimer) {
1235 nexttimeout = close_entry->timetosend;
1236 settimer = 1;
1237 }
1238 } else {
1239 i40iw_handle_close_entry(cm_node, 1);
1240 }
1241 }
1242
1243 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1244
1245 send_entry = cm_node->send_entry;
1246 if (!send_entry)
1247 goto done;
1248 if (time_after(send_entry->timetosend, jiffies)) {
1249 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
1250 if ((nexttimeout > send_entry->timetosend) ||
1251 !settimer) {
1252 nexttimeout = send_entry->timetosend;
1253 settimer = 1;
1254 }
1255 } else {
1256 i40iw_free_retrans_entry(cm_node);
1257 }
1258 goto done;
1259 }
1260
1261 if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
1262 (cm_node->state == I40IW_CM_STATE_CLOSED)) {
1263 i40iw_free_retrans_entry(cm_node);
1264 goto done;
1265 }
1266
1267 if (!send_entry->retranscount || !send_entry->retrycount) {
1268 i40iw_free_retrans_entry(cm_node);
1269
1270 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1271 i40iw_retrans_expired(cm_node);
1272 cm_node->state = I40IW_CM_STATE_CLOSED;
1273 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1274 goto done;
1275 }
1276 cm_node->cm_core->stats_pkt_retrans++;
1277 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1278
1279 dev = cm_node->dev;
1280 atomic_inc(&send_entry->sqbuf->refcount);
1281 i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
1282 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1283 if (send_entry->send_retrans) {
1284 send_entry->retranscount--;
1285 timetosend = (I40IW_RETRY_TIMEOUT <<
1286 (I40IW_DEFAULT_RETRANS -
1287 send_entry->retranscount));
1288
1289 send_entry->timetosend = jiffies +
1290 min(timetosend, I40IW_MAX_TIMEOUT);
1291 if (nexttimeout > send_entry->timetosend || !settimer) {
1292 nexttimeout = send_entry->timetosend;
1293 settimer = 1;
1294 }
1295 } else {
1296 int close_when_complete;
1297
1298 close_when_complete = send_entry->close_when_complete;
1299 i40iw_debug(cm_node->dev,
1300 I40IW_DEBUG_CM,
1301 "cm_node=%p state=%d\n",
1302 cm_node,
1303 cm_node->state);
1304 i40iw_free_retrans_entry(cm_node);
1305 if (close_when_complete)
1306 i40iw_rem_ref_cm_node(cm_node);
1307 }
1308done:
1309 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1310 i40iw_rem_ref_cm_node(cm_node);
1311 }
1312
1313 if (settimer) {
1314 spin_lock_irqsave(&cm_core->ht_lock, flags);
1315 if (!timer_pending(&cm_core->tcp_timer)) {
1316 cm_core->tcp_timer.expires = nexttimeout;
1317 add_timer(&cm_core->tcp_timer);
1318 }
1319 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1320 }
1321}
1322
1323/**
1324 * i40iw_send_syn - send SYN packet
1325 * @cm_node: connection's node
1326 * @sendack: flag to set ACK bit or not
1327 */
1328int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
1329{
1330 struct i40iw_puda_buf *sqbuf;
1331 int flags = SET_SYN;
1332 char optionsbuffer[sizeof(struct option_mss) +
1333 sizeof(struct option_windowscale) +
1334 sizeof(struct option_base) + TCP_OPTIONS_PADDING];
1335 struct i40iw_kmem_info opts;
1336
1337 int optionssize = 0;
1338 /* Sending MSS option */
1339 union all_known_options *options;
1340
1341 opts.addr = optionsbuffer;
1342 if (!cm_node) {
1343 i40iw_pr_err("no cm_node\n");
1344 return -EINVAL;
1345 }
1346
1347 options = (union all_known_options *)&optionsbuffer[optionssize];
1348 options->as_mss.optionnum = OPTION_NUMBER_MSS;
1349 options->as_mss.length = sizeof(struct option_mss);
1350 options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
1351 optionssize += sizeof(struct option_mss);
1352
1353 options = (union all_known_options *)&optionsbuffer[optionssize];
1354 options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
1355 options->as_windowscale.length = sizeof(struct option_windowscale);
1356 options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
1357 optionssize += sizeof(struct option_windowscale);
1358 options = (union all_known_options *)&optionsbuffer[optionssize];
1359 options->as_end = OPTION_NUMBER_END;
1360 optionssize += 1;
1361
1362 if (sendack)
1363 flags |= SET_ACK;
1364
1365 opts.size = optionssize;
1366
1367 sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
1368 if (!sqbuf) {
1369 i40iw_pr_err("no sqbuf\n");
1370 return -1;
1371 }
1372 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
1373}
1374
1375/**
1376 * i40iw_send_ack - Send ACK packet
1377 * @cm_node: connection's node
1378 */
1379static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
1380{
1381 struct i40iw_puda_buf *sqbuf;
1382
1383 sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
1384 if (sqbuf)
1385 i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
1386 else
1387 i40iw_pr_err("no sqbuf\n");
1388}
1389
1390/**
1391 * i40iw_send_fin - Send FIN pkt
1392 * @cm_node: connection's node
1393 */
1394static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
1395{
1396 struct i40iw_puda_buf *sqbuf;
1397
1398 sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
1399 if (!sqbuf) {
1400 i40iw_pr_err("no sqbuf\n");
1401 return -1;
1402 }
1403 return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
1404}
1405
1406/**
1407 * i40iw_find_node - find a cm node that matches the reference cm node
1408 * @cm_core: cm's core
1409 * @rem_port: remote tcp port num
1410 * @rem_addr: remote ip addr
1411 * @loc_port: local tcp port num
1412 * @loc_addr: loc ip addr
1413 * @add_refcnt: flag to increment refcount of cm_node
1414 */
1415struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
1416 u16 rem_port,
1417 u32 *rem_addr,
1418 u16 loc_port,
1419 u32 *loc_addr,
1420 bool add_refcnt)
1421{
1422 struct list_head *hte;
1423 struct i40iw_cm_node *cm_node;
1424 unsigned long flags;
1425
1426 hte = &cm_core->connected_nodes;
1427
1428 /* walk list and find cm_node associated with this session ID */
1429 spin_lock_irqsave(&cm_core->ht_lock, flags);
1430 list_for_each_entry(cm_node, hte, list) {
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06001431 if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
1432 (cm_node->loc_port == loc_port) &&
1433 !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
1434 (cm_node->rem_port == rem_port)) {
Faisal Latiff27b4742016-01-20 13:40:04 -06001435 if (add_refcnt)
1436 atomic_inc(&cm_node->ref_count);
1437 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1438 return cm_node;
1439 }
1440 }
1441 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1442
1443 /* no owner node */
1444 return NULL;
1445}
1446
1447/**
1448 * i40iw_find_listener - find a cm node listening on this addr-port pair
1449 * @cm_core: cm's core
1450 * @dst_port: listener tcp port num
1451 * @dst_addr: listener ip addr
1452 * @listener_state: state to match with listen node's
1453 */
1454static struct i40iw_cm_listener *i40iw_find_listener(
1455 struct i40iw_cm_core *cm_core,
1456 u32 *dst_addr,
1457 u16 dst_port,
1458 u16 vlan_id,
1459 enum i40iw_cm_listener_state
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06001460 listener_state)
Faisal Latiff27b4742016-01-20 13:40:04 -06001461{
1462 struct i40iw_cm_listener *listen_node;
1463 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1464 u32 listen_addr[4];
1465 u16 listen_port;
1466 unsigned long flags;
1467
1468 /* walk list and find cm_node associated with this session ID */
1469 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1470 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06001471 memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
1472 listen_port = listen_node->loc_port;
Faisal Latiff27b4742016-01-20 13:40:04 -06001473 /* compare node pair, return node handle if a match */
1474 if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
1475 !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
1476 (listen_port == dst_port) &&
1477 (listener_state & listen_node->listener_state)) {
1478 atomic_inc(&listen_node->ref_count);
1479 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1480 return listen_node;
1481 }
1482 }
1483 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1484 return NULL;
1485}
1486
1487/**
1488 * i40iw_add_hte_node - add a cm node to the hash table
1489 * @cm_core: cm's core
1490 * @cm_node: connection's node
1491 */
1492static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
1493 struct i40iw_cm_node *cm_node)
1494{
1495 struct list_head *hte;
1496 unsigned long flags;
1497
1498 if (!cm_node || !cm_core) {
1499 i40iw_pr_err("cm_node or cm_core == NULL\n");
1500 return;
1501 }
1502 spin_lock_irqsave(&cm_core->ht_lock, flags);
1503
1504 /* get a handle on the hash table element (list head for this slot) */
1505 hte = &cm_core->connected_nodes;
1506 list_add_tail(&cm_node->list, hte);
1507 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1508}
1509
1510/**
1511 * listen_port_in_use - determine if port is in use
1512 * @port: Listen port number
1513 */
1514static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
1515{
1516 struct i40iw_cm_listener *listen_node;
1517 unsigned long flags;
1518 bool ret = false;
1519
1520 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1521 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06001522 if (listen_node->loc_port == port) {
Faisal Latiff27b4742016-01-20 13:40:04 -06001523 ret = true;
1524 break;
1525 }
1526 }
1527 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1528 return ret;
1529}
1530
1531/**
1532 * i40iw_del_multiple_qhash - Remove qhash and child listens
1533 * @iwdev: iWarp device
1534 * @cm_info: CM info for parent listen node
1535 * @cm_parent_listen_node: The parent listen node
1536 */
1537static enum i40iw_status_code i40iw_del_multiple_qhash(
1538 struct i40iw_device *iwdev,
1539 struct i40iw_cm_info *cm_info,
1540 struct i40iw_cm_listener *cm_parent_listen_node)
1541{
1542 struct i40iw_cm_listener *child_listen_node;
1543 enum i40iw_status_code ret = I40IW_ERR_CONFIG;
1544 struct list_head *pos, *tpos;
1545 unsigned long flags;
1546
1547 spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1548 list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
1549 child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
1550 if (child_listen_node->ipv4)
1551 i40iw_debug(&iwdev->sc_dev,
1552 I40IW_DEBUG_CM,
1553 "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
1554 child_listen_node->loc_addr,
1555 child_listen_node->loc_port,
1556 child_listen_node->vlan_id);
1557 else
1558 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1559 "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
1560 child_listen_node->loc_addr,
1561 child_listen_node->loc_port,
1562 child_listen_node->vlan_id);
1563 list_del(pos);
Faisal Latiff27b4742016-01-20 13:40:04 -06001564 memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1565 sizeof(cm_info->loc_addr));
1566 cm_info->vlan_id = child_listen_node->vlan_id;
1567 ret = i40iw_manage_qhash(iwdev, cm_info,
1568 I40IW_QHASH_TYPE_TCP_SYN,
1569 I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
1570 kfree(child_listen_node);
1571 cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
1572 i40iw_debug(&iwdev->sc_dev,
1573 I40IW_DEBUG_CM,
1574 "freed pointer = %p\n",
1575 child_listen_node);
1576 }
1577 spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1578
1579 return ret;
1580}
1581
1582/**
1583 * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
1584 * @addr: local IPv6 address
1585 * @vlan_id: vlan id for the given IPv6 address
1586 * @mac: mac address for the given IPv6 address
1587 *
1588 * Returns the net_device of the IPv6 address and also sets the
1589 * vlan id and mac for that address.
1590 */
1591static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
1592{
1593 struct net_device *ip_dev = NULL;
1594#if IS_ENABLED(CONFIG_IPV6)
1595 struct in6_addr laddr6;
1596
1597 i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
1598 if (vlan_id)
1599 *vlan_id = I40IW_NO_VLAN;
1600 if (mac)
1601 eth_zero_addr(mac);
1602 rcu_read_lock();
1603 for_each_netdev_rcu(&init_net, ip_dev) {
1604 if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
1605 if (vlan_id)
1606 *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
1607 if (ip_dev->dev_addr && mac)
1608 ether_addr_copy(mac, ip_dev->dev_addr);
1609 break;
1610 }
1611 }
1612 rcu_read_unlock();
1613#endif
1614 return ip_dev;
1615}
1616
1617/**
1618 * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
1619 * @addr: local IPv4 address
1620 */
1621static u16 i40iw_get_vlan_ipv4(u32 *addr)
1622{
1623 struct net_device *netdev;
1624 u16 vlan_id = I40IW_NO_VLAN;
1625
1626 netdev = ip_dev_find(&init_net, htonl(addr[0]));
1627 if (netdev) {
1628 vlan_id = rdma_vlan_dev_vlan_id(netdev);
1629 dev_put(netdev);
1630 }
1631 return vlan_id;
1632}
1633
1634/**
1635 * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
1636 * @iwdev: iWarp device
1637 * @cm_info: CM info for parent listen node
1638 * @cm_parent_listen_node: The parent listen node
1639 *
1640 * Adds a qhash and a child listen node for every IPv6 address
1641 * on the adapter and adds the associated qhash filter
1642 */
1643static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
1644 struct i40iw_cm_info *cm_info,
1645 struct i40iw_cm_listener *cm_parent_listen_node)
1646{
1647 struct net_device *ip_dev;
1648 struct inet6_dev *idev;
1649 struct inet6_ifaddr *ifp;
1650 enum i40iw_status_code ret = 0;
1651 struct i40iw_cm_listener *child_listen_node;
1652 unsigned long flags;
1653
1654 rtnl_lock();
1655 for_each_netdev_rcu(&init_net, ip_dev) {
1656 if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
1657 (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1658 (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1659 idev = __in6_dev_get(ip_dev);
1660 if (!idev) {
1661 i40iw_pr_err("idev == NULL\n");
1662 break;
1663 }
1664 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1665 i40iw_debug(&iwdev->sc_dev,
1666 I40IW_DEBUG_CM,
1667 "IP=%pI6, vlan_id=%d, MAC=%pM\n",
1668 &ifp->addr,
1669 rdma_vlan_dev_vlan_id(ip_dev),
1670 ip_dev->dev_addr);
1671 child_listen_node =
1672 kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
1673 i40iw_debug(&iwdev->sc_dev,
1674 I40IW_DEBUG_CM,
1675 "Allocating child listener %p\n",
1676 child_listen_node);
1677 if (!child_listen_node) {
1678 i40iw_pr_err("listener memory allocation\n");
1679 ret = I40IW_ERR_NO_MEMORY;
1680 goto exit;
1681 }
1682 cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
1683 cm_parent_listen_node->vlan_id = cm_info->vlan_id;
1684
1685 memcpy(child_listen_node, cm_parent_listen_node,
1686 sizeof(*child_listen_node));
1687
1688 i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
1689 ifp->addr.in6_u.u6_addr32);
Faisal Latiff27b4742016-01-20 13:40:04 -06001690 memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1691 sizeof(cm_info->loc_addr));
1692
1693 ret = i40iw_manage_qhash(iwdev, cm_info,
1694 I40IW_QHASH_TYPE_TCP_SYN,
1695 I40IW_QHASH_MANAGE_TYPE_ADD,
1696 NULL, true);
1697 if (!ret) {
1698 spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1699 list_add(&child_listen_node->child_listen_list,
1700 &cm_parent_listen_node->child_listen_list);
1701 spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1702 cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
1703 } else {
1704 kfree(child_listen_node);
1705 }
1706 }
1707 }
1708 }
1709exit:
1710 rtnl_unlock();
1711 return ret;
1712}
1713
1714/**
1715 * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
1716 * @iwdev: iWarp device
1717 * @cm_info: CM info for parent listen node
1718 * @cm_parent_listen_node: The parent listen node
1719 *
1720 * Adds a qhash and a child listen node for every IPv4 address
1721 * on the adapter and adds the associated qhash filter
1722 */
1723static enum i40iw_status_code i40iw_add_mqh_4(
1724 struct i40iw_device *iwdev,
1725 struct i40iw_cm_info *cm_info,
1726 struct i40iw_cm_listener *cm_parent_listen_node)
1727{
1728 struct net_device *dev;
1729 struct in_device *idev;
1730 struct i40iw_cm_listener *child_listen_node;
1731 enum i40iw_status_code ret = 0;
1732 unsigned long flags;
1733
1734 rtnl_lock();
1735 for_each_netdev(&init_net, dev) {
1736 if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
1737 (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1738 (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1739 idev = in_dev_get(dev);
1740 for_ifa(idev) {
1741 i40iw_debug(&iwdev->sc_dev,
1742 I40IW_DEBUG_CM,
1743 "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
1744 &ifa->ifa_address,
1745 rdma_vlan_dev_vlan_id(dev),
1746 dev->dev_addr);
1747 child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
1748 cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
1749 i40iw_debug(&iwdev->sc_dev,
1750 I40IW_DEBUG_CM,
1751 "Allocating child listener %p\n",
1752 child_listen_node);
1753 if (!child_listen_node) {
1754 i40iw_pr_err("listener memory allocation\n");
1755 in_dev_put(idev);
1756 ret = I40IW_ERR_NO_MEMORY;
1757 goto exit;
1758 }
1759 cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
1760 cm_parent_listen_node->vlan_id = cm_info->vlan_id;
1761 memcpy(child_listen_node,
1762 cm_parent_listen_node,
1763 sizeof(*child_listen_node));
1764
1765 child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
Faisal Latiff27b4742016-01-20 13:40:04 -06001766 memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1767 sizeof(cm_info->loc_addr));
1768
1769 ret = i40iw_manage_qhash(iwdev,
1770 cm_info,
1771 I40IW_QHASH_TYPE_TCP_SYN,
1772 I40IW_QHASH_MANAGE_TYPE_ADD,
1773 NULL,
1774 true);
1775 if (!ret) {
1776 spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1777 list_add(&child_listen_node->child_listen_list,
1778 &cm_parent_listen_node->child_listen_list);
1779 spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1780 } else {
1781 kfree(child_listen_node);
1782 cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
1783 }
1784 }
1785 endfor_ifa(idev);
1786 in_dev_put(idev);
1787 }
1788 }
1789exit:
1790 rtnl_unlock();
1791 return ret;
1792}
1793
1794/**
1795 * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
1796 * @cm_core: cm's core
1797 * @free_hanging_nodes: to free associated cm_nodes
1798 * @apbvt_del: flag to delete the apbvt
1799 */
1800static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
1801 struct i40iw_cm_listener *listener,
1802 int free_hanging_nodes, bool apbvt_del)
1803{
1804 int ret = -EINVAL;
1805 int err = 0;
1806 struct list_head *list_pos;
1807 struct list_head *list_temp;
1808 struct i40iw_cm_node *cm_node;
1809 struct list_head reset_list;
1810 struct i40iw_cm_info nfo;
1811 struct i40iw_cm_node *loopback;
1812 enum i40iw_cm_node_state old_state;
1813 unsigned long flags;
1814
1815 /* free non-accelerated child nodes for this listener */
1816 INIT_LIST_HEAD(&reset_list);
1817 if (free_hanging_nodes) {
1818 spin_lock_irqsave(&cm_core->ht_lock, flags);
1819 list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
1820 cm_node = container_of(list_pos, struct i40iw_cm_node, list);
1821 if ((cm_node->listener == listener) && !cm_node->accelerated) {
1822 atomic_inc(&cm_node->ref_count);
1823 list_add(&cm_node->reset_entry, &reset_list);
1824 }
1825 }
1826 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1827 }
1828
1829 list_for_each_safe(list_pos, list_temp, &reset_list) {
1830 cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
1831 loopback = cm_node->loopbackpartner;
1832 if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
1833 i40iw_rem_ref_cm_node(cm_node);
1834 } else {
1835 if (!loopback) {
1836 i40iw_cleanup_retrans_entry(cm_node);
1837 err = i40iw_send_reset(cm_node);
1838 if (err) {
1839 cm_node->state = I40IW_CM_STATE_CLOSED;
1840 i40iw_pr_err("send reset\n");
1841 } else {
1842 old_state = cm_node->state;
1843 cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
1844 if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
1845 i40iw_rem_ref_cm_node(cm_node);
1846 }
1847 } else {
1848 struct i40iw_cm_event event;
1849
1850 event.cm_node = loopback;
1851 memcpy(event.cm_info.rem_addr,
1852 loopback->rem_addr, sizeof(event.cm_info.rem_addr));
1853 memcpy(event.cm_info.loc_addr,
1854 loopback->loc_addr, sizeof(event.cm_info.loc_addr));
1855 event.cm_info.rem_port = loopback->rem_port;
1856 event.cm_info.loc_port = loopback->loc_port;
1857 event.cm_info.cm_id = loopback->cm_id;
1858 event.cm_info.ipv4 = loopback->ipv4;
1859 atomic_inc(&loopback->ref_count);
1860 loopback->state = I40IW_CM_STATE_CLOSED;
1861 i40iw_event_connect_error(&event);
1862 cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
1863 i40iw_rem_ref_cm_node(cm_node);
1864 }
1865 }
1866 }
1867
1868 if (!atomic_dec_return(&listener->ref_count)) {
1869 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1870 list_del(&listener->list);
1871 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1872
1873 if (listener->iwdev) {
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06001874 if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
Faisal Latiff27b4742016-01-20 13:40:04 -06001875 i40iw_manage_apbvt(listener->iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06001876 listener->loc_port,
Faisal Latiff27b4742016-01-20 13:40:04 -06001877 I40IW_MANAGE_APBVT_DEL);
1878
1879 memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
Faisal Latiff27b4742016-01-20 13:40:04 -06001880 nfo.loc_port = listener->loc_port;
Faisal Latiff27b4742016-01-20 13:40:04 -06001881 nfo.ipv4 = listener->ipv4;
1882 nfo.vlan_id = listener->vlan_id;
1883
Faisal Latiff27b4742016-01-20 13:40:04 -06001884 if (!list_empty(&listener->child_listen_list)) {
1885 i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
1886 } else {
1887 if (listener->qhash_set)
1888 i40iw_manage_qhash(listener->iwdev,
1889 &nfo,
1890 I40IW_QHASH_TYPE_TCP_SYN,
1891 I40IW_QHASH_MANAGE_TYPE_DELETE,
1892 NULL,
1893 false);
1894 }
1895 }
1896
1897 cm_core->stats_listen_destroyed++;
1898 kfree(listener);
1899 cm_core->stats_listen_nodes_destroyed++;
1900 listener = NULL;
1901 ret = 0;
1902 }
1903
1904 if (listener) {
1905 if (atomic_read(&listener->pend_accepts_cnt) > 0)
1906 i40iw_debug(cm_core->dev,
1907 I40IW_DEBUG_CM,
1908 "%s: listener (%p) pending accepts=%u\n",
1909 __func__,
1910 listener,
1911 atomic_read(&listener->pend_accepts_cnt));
1912 }
1913
1914 return ret;
1915}
1916
1917/**
1918 * i40iw_cm_del_listen - delete a linstener
1919 * @cm_core: cm's core
1920 * @listener: passive connection's listener
1921 * @apbvt_del: flag to delete apbvt
1922 */
1923static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
1924 struct i40iw_cm_listener *listener,
1925 bool apbvt_del)
1926{
1927 listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
1928 listener->cm_id = NULL; /* going to be destroyed pretty soon */
1929 return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
1930}
1931
1932/**
1933 * i40iw_addr_resolve_neigh - resolve neighbor address
1934 * @iwdev: iwarp device structure
1935 * @src_ip: local ip address
1936 * @dst_ip: remote ip address
1937 * @arpindex: if there is an arp entry
1938 */
1939static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1940 u32 src_ip,
1941 u32 dst_ip,
1942 int arpindex)
1943{
1944 struct rtable *rt;
1945 struct neighbour *neigh;
1946 int rc = arpindex;
1947 struct net_device *netdev = iwdev->netdev;
1948 __be32 dst_ipaddr = htonl(dst_ip);
1949 __be32 src_ipaddr = htonl(src_ip);
1950
1951 rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
1952 if (IS_ERR(rt)) {
1953 i40iw_pr_err("ip_route_output\n");
1954 return rc;
1955 }
1956
1957 if (netif_is_bond_slave(netdev))
1958 netdev = netdev_master_upper_dev_get(netdev);
1959
1960 neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
1961
1962 rcu_read_lock();
1963 if (neigh) {
1964 if (neigh->nud_state & NUD_VALID) {
1965 if (arpindex >= 0) {
1966 if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
1967 neigh->ha))
1968 /* Mac address same as arp table */
1969 goto resolve_neigh_exit;
1970 i40iw_manage_arp_cache(iwdev,
1971 iwdev->arp_table[arpindex].mac_addr,
1972 &dst_ip,
1973 true,
1974 I40IW_ARP_DELETE);
1975 }
1976
1977 i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
1978 rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
1979 } else {
1980 neigh_event_send(neigh, NULL);
1981 }
1982 }
1983 resolve_neigh_exit:
1984
1985 rcu_read_unlock();
1986 if (neigh)
1987 neigh_release(neigh);
1988
1989 ip_rt_put(rt);
1990 return rc;
1991}
1992
1993/**
1994 * i40iw_get_dst_ipv6
1995 */
Faisal Latiff27b4742016-01-20 13:40:04 -06001996static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
1997 struct sockaddr_in6 *dst_addr)
1998{
1999 struct dst_entry *dst;
2000 struct flowi6 fl6;
2001
2002 memset(&fl6, 0, sizeof(fl6));
2003 fl6.daddr = dst_addr->sin6_addr;
2004 fl6.saddr = src_addr->sin6_addr;
2005 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
2006 fl6.flowi6_oif = dst_addr->sin6_scope_id;
2007
2008 dst = ip6_route_output(&init_net, NULL, &fl6);
2009 return dst;
2010}
Faisal Latiff27b4742016-01-20 13:40:04 -06002011
2012/**
2013 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
2014 * @iwdev: iwarp device structure
2015 * @dst_ip: remote ip address
2016 * @arpindex: if there is an arp entry
2017 */
Faisal Latiff27b4742016-01-20 13:40:04 -06002018static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2019 u32 *src,
2020 u32 *dest,
2021 int arpindex)
2022{
2023 struct neighbour *neigh;
2024 int rc = arpindex;
2025 struct net_device *netdev = iwdev->netdev;
2026 struct dst_entry *dst;
2027 struct sockaddr_in6 dst_addr;
2028 struct sockaddr_in6 src_addr;
2029
2030 memset(&dst_addr, 0, sizeof(dst_addr));
2031 dst_addr.sin6_family = AF_INET6;
2032 i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
2033 memset(&src_addr, 0, sizeof(src_addr));
2034 src_addr.sin6_family = AF_INET6;
2035 i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
2036 dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
2037 if (!dst || dst->error) {
2038 if (dst) {
2039 dst_release(dst);
2040 i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
2041 dst->error);
2042 }
2043 return rc;
2044 }
2045
2046 if (netif_is_bond_slave(netdev))
2047 netdev = netdev_master_upper_dev_get(netdev);
2048
2049 neigh = dst_neigh_lookup(dst, &dst_addr);
2050
2051 rcu_read_lock();
2052 if (neigh) {
2053 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
2054 if (neigh->nud_state & NUD_VALID) {
2055 if (arpindex >= 0) {
2056 if (ether_addr_equal
2057 (iwdev->arp_table[arpindex].mac_addr,
2058 neigh->ha)) {
2059 /* Mac address same as in arp table */
2060 goto resolve_neigh_exit6;
2061 }
2062 i40iw_manage_arp_cache(iwdev,
2063 iwdev->arp_table[arpindex].mac_addr,
2064 dest,
2065 false,
2066 I40IW_ARP_DELETE);
2067 }
2068 i40iw_manage_arp_cache(iwdev,
2069 neigh->ha,
2070 dest,
2071 false,
2072 I40IW_ARP_ADD);
2073 rc = i40iw_arp_table(iwdev,
2074 dest,
2075 false,
2076 NULL,
2077 I40IW_ARP_RESOLVE);
2078 } else {
2079 neigh_event_send(neigh, NULL);
2080 }
2081 }
2082
2083 resolve_neigh_exit6:
2084 rcu_read_unlock();
2085 if (neigh)
2086 neigh_release(neigh);
2087 dst_release(dst);
2088 return rc;
2089}
Faisal Latiff27b4742016-01-20 13:40:04 -06002090
2091/**
2092 * i40iw_ipv4_is_loopback - check if loopback
2093 * @loc_addr: local addr to compare
2094 * @rem_addr: remote address
2095 */
2096static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
2097{
2098 return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
2099}
2100
2101/**
2102 * i40iw_ipv6_is_loopback - check if loopback
2103 * @loc_addr: local addr to compare
2104 * @rem_addr: remote address
2105 */
2106static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
2107{
2108 struct in6_addr raddr6;
2109
2110 i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
Ismail, Mustafaf606d892016-04-18 10:33:02 -05002111 return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
Faisal Latiff27b4742016-01-20 13:40:04 -06002112}
2113
2114/**
2115 * i40iw_make_cm_node - create a new instance of a cm node
2116 * @cm_core: cm's core
2117 * @iwdev: iwarp device structure
2118 * @cm_info: quad info for connection
2119 * @listener: passive connection's listener
2120 */
2121static struct i40iw_cm_node *i40iw_make_cm_node(
2122 struct i40iw_cm_core *cm_core,
2123 struct i40iw_device *iwdev,
2124 struct i40iw_cm_info *cm_info,
2125 struct i40iw_cm_listener *listener)
2126{
2127 struct i40iw_cm_node *cm_node;
2128 struct timespec ts;
2129 int oldarpindex;
2130 int arpindex;
2131 struct net_device *netdev = iwdev->netdev;
2132
2133 /* create an hte and cm_node for this instance */
2134 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
2135 if (!cm_node)
2136 return NULL;
2137
2138 /* set our node specific transport info */
2139 cm_node->ipv4 = cm_info->ipv4;
2140 cm_node->vlan_id = cm_info->vlan_id;
2141 memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
2142 memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
Faisal Latiff27b4742016-01-20 13:40:04 -06002143 cm_node->loc_port = cm_info->loc_port;
2144 cm_node->rem_port = cm_info->rem_port;
Faisal Latiff27b4742016-01-20 13:40:04 -06002145
2146 cm_node->mpa_frame_rev = iwdev->mpa_version;
2147 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
2148 cm_node->ird_size = I40IW_MAX_IRD_SIZE;
2149 cm_node->ord_size = I40IW_MAX_ORD_SIZE;
2150
2151 cm_node->listener = listener;
2152 cm_node->cm_id = cm_info->cm_id;
2153 ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
2154 spin_lock_init(&cm_node->retrans_list_lock);
2155
2156 atomic_set(&cm_node->ref_count, 1);
2157 /* associate our parent CM core */
2158 cm_node->cm_core = cm_core;
2159 cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
2160 cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
2161 cm_node->tcp_cntxt.rcv_wnd =
2162 I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
2163 ts = current_kernel_time();
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05002164 cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
Faisal Latiff27b4742016-01-20 13:40:04 -06002165 cm_node->tcp_cntxt.mss = iwdev->mss;
2166
2167 cm_node->iwdev = iwdev;
2168 cm_node->dev = &iwdev->sc_dev;
2169
2170 if ((cm_node->ipv4 &&
2171 i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
2172 (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
2173 cm_node->rem_addr))) {
2174 arpindex = i40iw_arp_table(iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002175 cm_node->rem_addr,
Faisal Latiff27b4742016-01-20 13:40:04 -06002176 false,
2177 NULL,
2178 I40IW_ARP_RESOLVE);
2179 } else {
2180 oldarpindex = i40iw_arp_table(iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002181 cm_node->rem_addr,
Faisal Latiff27b4742016-01-20 13:40:04 -06002182 false,
2183 NULL,
2184 I40IW_ARP_RESOLVE);
2185 if (cm_node->ipv4)
2186 arpindex = i40iw_addr_resolve_neigh(iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002187 cm_info->loc_addr[0],
2188 cm_info->rem_addr[0],
Faisal Latiff27b4742016-01-20 13:40:04 -06002189 oldarpindex);
Arnd Bergmann2fe78572016-03-23 11:34:36 +01002190 else if (IS_ENABLED(CONFIG_IPV6))
Faisal Latiff27b4742016-01-20 13:40:04 -06002191 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002192 cm_info->loc_addr,
2193 cm_info->rem_addr,
Faisal Latiff27b4742016-01-20 13:40:04 -06002194 oldarpindex);
Arnd Bergmann2fe78572016-03-23 11:34:36 +01002195 else
2196 arpindex = -EINVAL;
Faisal Latiff27b4742016-01-20 13:40:04 -06002197 }
2198 if (arpindex < 0) {
2199 i40iw_pr_err("cm_node arpindex\n");
2200 kfree(cm_node);
2201 return NULL;
2202 }
2203 ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
2204 i40iw_add_hte_node(cm_core, cm_node);
2205 cm_core->stats_nodes_created++;
2206 return cm_node;
2207}
2208
2209/**
2210 * i40iw_rem_ref_cm_node - destroy an instance of a cm node
2211 * @cm_node: connection's node
2212 */
2213static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
2214{
2215 struct i40iw_cm_core *cm_core = cm_node->cm_core;
2216 struct i40iw_qp *iwqp;
2217 struct i40iw_cm_info nfo;
2218 unsigned long flags;
2219
2220 spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
2221 if (atomic_dec_return(&cm_node->ref_count)) {
2222 spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
2223 return;
2224 }
2225 list_del(&cm_node->list);
2226 spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
2227
2228 /* if the node is destroyed before connection was accelerated */
2229 if (!cm_node->accelerated && cm_node->accept_pend) {
2230 pr_err("node destroyed before established\n");
2231 atomic_dec(&cm_node->listener->pend_accepts_cnt);
2232 }
2233 if (cm_node->close_entry)
2234 i40iw_handle_close_entry(cm_node, 0);
2235 if (cm_node->listener) {
2236 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
2237 } else {
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05002238 if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
Faisal Latiff27b4742016-01-20 13:40:04 -06002239 cm_node->apbvt_set && cm_node->iwdev) {
2240 i40iw_manage_apbvt(cm_node->iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002241 cm_node->loc_port,
Faisal Latiff27b4742016-01-20 13:40:04 -06002242 I40IW_MANAGE_APBVT_DEL);
2243 i40iw_get_addr_info(cm_node, &nfo);
Faisal Latiff27b4742016-01-20 13:40:04 -06002244 if (cm_node->qhash_set) {
2245 i40iw_manage_qhash(cm_node->iwdev,
2246 &nfo,
2247 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2248 I40IW_QHASH_MANAGE_TYPE_DELETE,
2249 NULL,
2250 false);
2251 cm_node->qhash_set = 0;
2252 }
2253 }
2254 }
2255
2256 iwqp = cm_node->iwqp;
2257 if (iwqp) {
2258 iwqp->cm_node = NULL;
2259 i40iw_rem_ref(&iwqp->ibqp);
2260 cm_node->iwqp = NULL;
2261 } else if (cm_node->qhash_set) {
2262 i40iw_get_addr_info(cm_node, &nfo);
Faisal Latiff27b4742016-01-20 13:40:04 -06002263 i40iw_manage_qhash(cm_node->iwdev,
2264 &nfo,
2265 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2266 I40IW_QHASH_MANAGE_TYPE_DELETE,
2267 NULL,
2268 false);
2269 cm_node->qhash_set = 0;
2270 }
2271
2272 cm_node->cm_core->stats_nodes_destroyed++;
2273 kfree(cm_node);
2274}
2275
2276/**
2277 * i40iw_handle_fin_pkt - FIN packet received
2278 * @cm_node: connection's node
2279 */
2280static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
2281{
2282 u32 ret;
2283
2284 switch (cm_node->state) {
2285 case I40IW_CM_STATE_SYN_RCVD:
2286 case I40IW_CM_STATE_SYN_SENT:
2287 case I40IW_CM_STATE_ESTABLISHED:
2288 case I40IW_CM_STATE_MPAREJ_RCVD:
2289 cm_node->tcp_cntxt.rcv_nxt++;
2290 i40iw_cleanup_retrans_entry(cm_node);
2291 cm_node->state = I40IW_CM_STATE_LAST_ACK;
2292 i40iw_send_fin(cm_node);
2293 break;
2294 case I40IW_CM_STATE_MPAREQ_SENT:
2295 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
2296 cm_node->tcp_cntxt.rcv_nxt++;
2297 i40iw_cleanup_retrans_entry(cm_node);
2298 cm_node->state = I40IW_CM_STATE_CLOSED;
2299 atomic_inc(&cm_node->ref_count);
2300 i40iw_send_reset(cm_node);
2301 break;
2302 case I40IW_CM_STATE_FIN_WAIT1:
2303 cm_node->tcp_cntxt.rcv_nxt++;
2304 i40iw_cleanup_retrans_entry(cm_node);
2305 cm_node->state = I40IW_CM_STATE_CLOSING;
2306 i40iw_send_ack(cm_node);
2307 /*
2308 * Wait for ACK as this is simultaneous close.
2309 * After we receive ACK, do not send anything.
2310 * Just rm the node.
2311 */
2312 break;
2313 case I40IW_CM_STATE_FIN_WAIT2:
2314 cm_node->tcp_cntxt.rcv_nxt++;
2315 i40iw_cleanup_retrans_entry(cm_node);
2316 cm_node->state = I40IW_CM_STATE_TIME_WAIT;
2317 i40iw_send_ack(cm_node);
2318 ret =
2319 i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
2320 if (ret)
2321 i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
2322 break;
2323 case I40IW_CM_STATE_TIME_WAIT:
2324 cm_node->tcp_cntxt.rcv_nxt++;
2325 i40iw_cleanup_retrans_entry(cm_node);
2326 cm_node->state = I40IW_CM_STATE_CLOSED;
2327 i40iw_rem_ref_cm_node(cm_node);
2328 break;
2329 case I40IW_CM_STATE_OFFLOADED:
2330 default:
2331 i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
2332 break;
2333 }
2334}
2335
2336/**
2337 * i40iw_handle_rst_pkt - process received RST packet
2338 * @cm_node: connection's node
2339 * @rbuf: receive buffer
2340 */
2341static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
2342 struct i40iw_puda_buf *rbuf)
2343{
2344 i40iw_cleanup_retrans_entry(cm_node);
2345 switch (cm_node->state) {
2346 case I40IW_CM_STATE_SYN_SENT:
2347 case I40IW_CM_STATE_MPAREQ_SENT:
2348 switch (cm_node->mpa_frame_rev) {
2349 case IETF_MPA_V2:
2350 cm_node->mpa_frame_rev = IETF_MPA_V1;
2351 /* send a syn and goto syn sent state */
2352 cm_node->state = I40IW_CM_STATE_SYN_SENT;
2353 if (i40iw_send_syn(cm_node, 0))
2354 i40iw_active_open_err(cm_node, false);
2355 break;
2356 case IETF_MPA_V1:
2357 default:
2358 i40iw_active_open_err(cm_node, false);
2359 break;
2360 }
2361 break;
2362 case I40IW_CM_STATE_MPAREQ_RCVD:
2363 atomic_add_return(1, &cm_node->passive_state);
2364 break;
2365 case I40IW_CM_STATE_ESTABLISHED:
2366 case I40IW_CM_STATE_SYN_RCVD:
2367 case I40IW_CM_STATE_LISTENING:
2368 i40iw_pr_err("Bad state state = %d\n", cm_node->state);
2369 i40iw_passive_open_err(cm_node, false);
2370 break;
2371 case I40IW_CM_STATE_OFFLOADED:
2372 i40iw_active_open_err(cm_node, false);
2373 break;
2374 case I40IW_CM_STATE_CLOSED:
2375 break;
2376 case I40IW_CM_STATE_FIN_WAIT2:
2377 case I40IW_CM_STATE_FIN_WAIT1:
2378 case I40IW_CM_STATE_LAST_ACK:
2379 cm_node->cm_id->rem_ref(cm_node->cm_id);
2380 case I40IW_CM_STATE_TIME_WAIT:
2381 cm_node->state = I40IW_CM_STATE_CLOSED;
2382 i40iw_rem_ref_cm_node(cm_node);
2383 break;
2384 default:
2385 break;
2386 }
2387}
2388
2389/**
2390 * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
2391 * @cm_node: connection's node
2392 * @rbuf: receive buffer
2393 */
2394static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
2395 struct i40iw_puda_buf *rbuf)
2396{
2397 int ret;
2398 int datasize = rbuf->datalen;
2399 u8 *dataloc = rbuf->data;
2400
2401 enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
2402 u32 res_type;
2403
2404 ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
2405 if (ret) {
2406 if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
2407 i40iw_active_open_err(cm_node, true);
2408 else
2409 i40iw_passive_open_err(cm_node, true);
2410 return;
2411 }
2412
2413 switch (cm_node->state) {
2414 case I40IW_CM_STATE_ESTABLISHED:
2415 if (res_type == I40IW_MPA_REQUEST_REJECT)
2416 i40iw_pr_err("state for reject\n");
2417 cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
2418 type = I40IW_CM_EVENT_MPA_REQ;
2419 i40iw_send_ack(cm_node); /* ACK received MPA request */
2420 atomic_set(&cm_node->passive_state,
2421 I40IW_PASSIVE_STATE_INDICATED);
2422 break;
2423 case I40IW_CM_STATE_MPAREQ_SENT:
2424 i40iw_cleanup_retrans_entry(cm_node);
2425 if (res_type == I40IW_MPA_REQUEST_REJECT) {
2426 type = I40IW_CM_EVENT_MPA_REJECT;
2427 cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
2428 } else {
2429 type = I40IW_CM_EVENT_CONNECTED;
2430 cm_node->state = I40IW_CM_STATE_OFFLOADED;
2431 i40iw_send_ack(cm_node);
2432 }
2433 break;
2434 default:
2435 pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
2436 break;
2437 }
2438 i40iw_create_event(cm_node, type);
2439}
2440
2441/**
2442 * i40iw_indicate_pkt_err - Send up err event to cm
2443 * @cm_node: connection's node
2444 */
2445static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
2446{
2447 switch (cm_node->state) {
2448 case I40IW_CM_STATE_SYN_SENT:
2449 case I40IW_CM_STATE_MPAREQ_SENT:
2450 i40iw_active_open_err(cm_node, true);
2451 break;
2452 case I40IW_CM_STATE_ESTABLISHED:
2453 case I40IW_CM_STATE_SYN_RCVD:
2454 i40iw_passive_open_err(cm_node, true);
2455 break;
2456 case I40IW_CM_STATE_OFFLOADED:
2457 default:
2458 break;
2459 }
2460}
2461
2462/**
2463 * i40iw_check_syn - Check for error on received syn ack
2464 * @cm_node: connection's node
2465 * @tcph: pointer tcp header
2466 */
2467static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
2468{
2469 int err = 0;
2470
2471 if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
2472 err = 1;
2473 i40iw_active_open_err(cm_node, true);
2474 }
2475 return err;
2476}
2477
2478/**
2479 * i40iw_check_seq - check seq numbers if OK
2480 * @cm_node: connection's node
2481 * @tcph: pointer tcp header
2482 */
2483static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
2484{
2485 int err = 0;
2486 u32 seq;
2487 u32 ack_seq;
2488 u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
2489 u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
2490 u32 rcv_wnd;
2491
2492 seq = ntohl(tcph->seq);
2493 ack_seq = ntohl(tcph->ack_seq);
2494 rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
2495 if (ack_seq != loc_seq_num)
2496 err = -1;
2497 else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
2498 err = -1;
2499 if (err) {
2500 i40iw_pr_err("seq number\n");
2501 i40iw_indicate_pkt_err(cm_node);
2502 }
2503 return err;
2504}
2505
2506/**
2507 * i40iw_handle_syn_pkt - is for Passive node
2508 * @cm_node: connection's node
2509 * @rbuf: receive buffer
2510 */
2511static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
2512 struct i40iw_puda_buf *rbuf)
2513{
2514 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2515 int ret;
2516 u32 inc_sequence;
2517 int optionsize;
2518 struct i40iw_cm_info nfo;
2519
2520 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2521 inc_sequence = ntohl(tcph->seq);
2522
2523 switch (cm_node->state) {
2524 case I40IW_CM_STATE_SYN_SENT:
2525 case I40IW_CM_STATE_MPAREQ_SENT:
2526 /* Rcvd syn on active open connection */
2527 i40iw_active_open_err(cm_node, 1);
2528 break;
2529 case I40IW_CM_STATE_LISTENING:
2530 /* Passive OPEN */
2531 if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
2532 cm_node->listener->backlog) {
2533 cm_node->cm_core->stats_backlog_drops++;
2534 i40iw_passive_open_err(cm_node, false);
2535 break;
2536 }
2537 ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
2538 if (ret) {
2539 i40iw_passive_open_err(cm_node, false);
2540 /* drop pkt */
2541 break;
2542 }
2543 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
2544 cm_node->accept_pend = 1;
2545 atomic_inc(&cm_node->listener->pend_accepts_cnt);
2546
2547 cm_node->state = I40IW_CM_STATE_SYN_RCVD;
2548 i40iw_get_addr_info(cm_node, &nfo);
2549 ret = i40iw_manage_qhash(cm_node->iwdev,
2550 &nfo,
2551 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2552 I40IW_QHASH_MANAGE_TYPE_ADD,
2553 (void *)cm_node,
2554 false);
2555 cm_node->qhash_set = true;
2556 break;
2557 case I40IW_CM_STATE_CLOSED:
2558 i40iw_cleanup_retrans_entry(cm_node);
2559 atomic_inc(&cm_node->ref_count);
2560 i40iw_send_reset(cm_node);
2561 break;
2562 case I40IW_CM_STATE_OFFLOADED:
2563 case I40IW_CM_STATE_ESTABLISHED:
2564 case I40IW_CM_STATE_FIN_WAIT1:
2565 case I40IW_CM_STATE_FIN_WAIT2:
2566 case I40IW_CM_STATE_MPAREQ_RCVD:
2567 case I40IW_CM_STATE_LAST_ACK:
2568 case I40IW_CM_STATE_CLOSING:
2569 case I40IW_CM_STATE_UNKNOWN:
2570 default:
2571 break;
2572 }
2573}
2574
2575/**
2576 * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
2577 * @cm_node: connection's node
2578 * @rbuf: receive buffer
2579 */
2580static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
2581 struct i40iw_puda_buf *rbuf)
2582{
2583 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2584 int ret;
2585 u32 inc_sequence;
2586 int optionsize;
2587
2588 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2589 inc_sequence = ntohl(tcph->seq);
2590 switch (cm_node->state) {
2591 case I40IW_CM_STATE_SYN_SENT:
2592 i40iw_cleanup_retrans_entry(cm_node);
2593 /* active open */
2594 if (i40iw_check_syn(cm_node, tcph)) {
2595 i40iw_pr_err("check syn fail\n");
2596 return;
2597 }
2598 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2599 /* setup options */
2600 ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
2601 if (ret) {
2602 i40iw_debug(cm_node->dev,
2603 I40IW_DEBUG_CM,
2604 "cm_node=%p tcp_options failed\n",
2605 cm_node);
2606 break;
2607 }
2608 i40iw_cleanup_retrans_entry(cm_node);
2609 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
2610 i40iw_send_ack(cm_node); /* ACK for the syn_ack */
2611 ret = i40iw_send_mpa_request(cm_node);
2612 if (ret) {
2613 i40iw_debug(cm_node->dev,
2614 I40IW_DEBUG_CM,
2615 "cm_node=%p i40iw_send_mpa_request failed\n",
2616 cm_node);
2617 break;
2618 }
2619 cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
2620 break;
2621 case I40IW_CM_STATE_MPAREQ_RCVD:
2622 i40iw_passive_open_err(cm_node, true);
2623 break;
2624 case I40IW_CM_STATE_LISTENING:
2625 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
2626 i40iw_cleanup_retrans_entry(cm_node);
2627 cm_node->state = I40IW_CM_STATE_CLOSED;
2628 i40iw_send_reset(cm_node);
2629 break;
2630 case I40IW_CM_STATE_CLOSED:
2631 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
2632 i40iw_cleanup_retrans_entry(cm_node);
2633 atomic_inc(&cm_node->ref_count);
2634 i40iw_send_reset(cm_node);
2635 break;
2636 case I40IW_CM_STATE_ESTABLISHED:
2637 case I40IW_CM_STATE_FIN_WAIT1:
2638 case I40IW_CM_STATE_FIN_WAIT2:
2639 case I40IW_CM_STATE_LAST_ACK:
2640 case I40IW_CM_STATE_OFFLOADED:
2641 case I40IW_CM_STATE_CLOSING:
2642 case I40IW_CM_STATE_UNKNOWN:
2643 case I40IW_CM_STATE_MPAREQ_SENT:
2644 default:
2645 break;
2646 }
2647}
2648
2649/**
2650 * i40iw_handle_ack_pkt - process packet with ACK
2651 * @cm_node: connection's node
2652 * @rbuf: receive buffer
2653 */
2654static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
2655 struct i40iw_puda_buf *rbuf)
2656{
2657 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2658 u32 inc_sequence;
2659 int ret = 0;
2660 int optionsize;
2661 u32 datasize = rbuf->datalen;
2662
2663 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2664
2665 if (i40iw_check_seq(cm_node, tcph))
2666 return -EINVAL;
2667
2668 inc_sequence = ntohl(tcph->seq);
2669 switch (cm_node->state) {
2670 case I40IW_CM_STATE_SYN_RCVD:
2671 i40iw_cleanup_retrans_entry(cm_node);
2672 ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
2673 if (ret)
2674 break;
2675 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2676 cm_node->state = I40IW_CM_STATE_ESTABLISHED;
2677 if (datasize) {
2678 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
Faisal Latiff27b4742016-01-20 13:40:04 -06002679 i40iw_handle_rcv_mpa(cm_node, rbuf);
2680 }
2681 break;
2682 case I40IW_CM_STATE_ESTABLISHED:
2683 i40iw_cleanup_retrans_entry(cm_node);
2684 if (datasize) {
2685 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2686 i40iw_handle_rcv_mpa(cm_node, rbuf);
2687 }
2688 break;
2689 case I40IW_CM_STATE_MPAREQ_SENT:
2690 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2691 if (datasize) {
2692 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2693 i40iw_handle_rcv_mpa(cm_node, rbuf);
2694 }
2695 break;
2696 case I40IW_CM_STATE_LISTENING:
2697 i40iw_cleanup_retrans_entry(cm_node);
2698 cm_node->state = I40IW_CM_STATE_CLOSED;
2699 i40iw_send_reset(cm_node);
2700 break;
2701 case I40IW_CM_STATE_CLOSED:
2702 i40iw_cleanup_retrans_entry(cm_node);
2703 atomic_inc(&cm_node->ref_count);
2704 i40iw_send_reset(cm_node);
2705 break;
2706 case I40IW_CM_STATE_LAST_ACK:
2707 case I40IW_CM_STATE_CLOSING:
2708 i40iw_cleanup_retrans_entry(cm_node);
2709 cm_node->state = I40IW_CM_STATE_CLOSED;
2710 if (!cm_node->accept_pend)
2711 cm_node->cm_id->rem_ref(cm_node->cm_id);
2712 i40iw_rem_ref_cm_node(cm_node);
2713 break;
2714 case I40IW_CM_STATE_FIN_WAIT1:
2715 i40iw_cleanup_retrans_entry(cm_node);
2716 cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
2717 break;
2718 case I40IW_CM_STATE_SYN_SENT:
2719 case I40IW_CM_STATE_FIN_WAIT2:
2720 case I40IW_CM_STATE_OFFLOADED:
2721 case I40IW_CM_STATE_MPAREQ_RCVD:
2722 case I40IW_CM_STATE_UNKNOWN:
2723 default:
2724 i40iw_cleanup_retrans_entry(cm_node);
2725 break;
2726 }
2727 return ret;
2728}
2729
2730/**
2731 * i40iw_process_packet - process cm packet
2732 * @cm_node: connection's node
2733 * @rbuf: receive buffer
2734 */
2735static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
2736 struct i40iw_puda_buf *rbuf)
2737{
2738 enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
2739 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2740 u32 fin_set = 0;
2741 int ret;
2742
2743 if (tcph->rst) {
2744 pkt_type = I40IW_PKT_TYPE_RST;
2745 } else if (tcph->syn) {
2746 pkt_type = I40IW_PKT_TYPE_SYN;
2747 if (tcph->ack)
2748 pkt_type = I40IW_PKT_TYPE_SYNACK;
2749 } else if (tcph->ack) {
2750 pkt_type = I40IW_PKT_TYPE_ACK;
2751 }
2752 if (tcph->fin)
2753 fin_set = 1;
2754
2755 switch (pkt_type) {
2756 case I40IW_PKT_TYPE_SYN:
2757 i40iw_handle_syn_pkt(cm_node, rbuf);
2758 break;
2759 case I40IW_PKT_TYPE_SYNACK:
2760 i40iw_handle_synack_pkt(cm_node, rbuf);
2761 break;
2762 case I40IW_PKT_TYPE_ACK:
2763 ret = i40iw_handle_ack_pkt(cm_node, rbuf);
2764 if (fin_set && !ret)
2765 i40iw_handle_fin_pkt(cm_node);
2766 break;
2767 case I40IW_PKT_TYPE_RST:
2768 i40iw_handle_rst_pkt(cm_node, rbuf);
2769 break;
2770 default:
2771 if (fin_set &&
2772 (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
2773 i40iw_handle_fin_pkt(cm_node);
2774 break;
2775 }
2776}
2777
2778/**
2779 * i40iw_make_listen_node - create a listen node with params
2780 * @cm_core: cm's core
2781 * @iwdev: iwarp device structure
2782 * @cm_info: quad info for connection
2783 */
2784static struct i40iw_cm_listener *i40iw_make_listen_node(
2785 struct i40iw_cm_core *cm_core,
2786 struct i40iw_device *iwdev,
2787 struct i40iw_cm_info *cm_info)
2788{
2789 struct i40iw_cm_listener *listener;
Faisal Latiff27b4742016-01-20 13:40:04 -06002790 unsigned long flags;
2791
2792 /* cannot have multiple matching listeners */
2793 listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
2794 cm_info->loc_port,
2795 cm_info->vlan_id,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002796 I40IW_CM_LISTENER_EITHER_STATE);
Faisal Latiff27b4742016-01-20 13:40:04 -06002797 if (listener &&
2798 (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
2799 atomic_dec(&listener->ref_count);
2800 i40iw_debug(cm_core->dev,
2801 I40IW_DEBUG_CM,
2802 "Not creating listener since it already exists\n");
2803 return NULL;
2804 }
2805
2806 if (!listener) {
Faisal Latiff27b4742016-01-20 13:40:04 -06002807 /* create a CM listen node (1/2 node to compare incoming traffic to) */
2808 listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
2809 if (!listener)
2810 return NULL;
2811 cm_core->stats_listen_nodes_created++;
2812 memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
Faisal Latiff27b4742016-01-20 13:40:04 -06002813 listener->loc_port = cm_info->loc_port;
Faisal Latiff27b4742016-01-20 13:40:04 -06002814
2815 INIT_LIST_HEAD(&listener->child_listen_list);
2816
2817 atomic_set(&listener->ref_count, 1);
2818 } else {
2819 listener->reused_node = 1;
2820 }
2821
2822 listener->cm_id = cm_info->cm_id;
2823 listener->ipv4 = cm_info->ipv4;
2824 listener->vlan_id = cm_info->vlan_id;
2825 atomic_set(&listener->pend_accepts_cnt, 0);
2826 listener->cm_core = cm_core;
2827 listener->iwdev = iwdev;
2828
2829 listener->backlog = cm_info->backlog;
2830 listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
2831
2832 if (!listener->reused_node) {
2833 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
2834 list_add(&listener->list, &cm_core->listen_nodes);
2835 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
2836 }
2837
2838 return listener;
2839}
2840
2841/**
2842 * i40iw_create_cm_node - make a connection node with params
2843 * @cm_core: cm's core
2844 * @iwdev: iwarp device structure
2845 * @private_data_len: len to provate data for mpa request
2846 * @private_data: pointer to private data for connection
2847 * @cm_info: quad info for connection
2848 */
2849static struct i40iw_cm_node *i40iw_create_cm_node(
2850 struct i40iw_cm_core *cm_core,
2851 struct i40iw_device *iwdev,
2852 u16 private_data_len,
2853 void *private_data,
2854 struct i40iw_cm_info *cm_info)
2855{
Faisal Latiff27b4742016-01-20 13:40:04 -06002856 struct i40iw_cm_node *cm_node;
2857 struct i40iw_cm_listener *loopback_remotelistener;
2858 struct i40iw_cm_node *loopback_remotenode;
2859 struct i40iw_cm_info loopback_cm_info;
2860
2861 /* create a CM connection node */
2862 cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
2863 if (!cm_node)
2864 return NULL;
2865 /* set our node side to client (active) side */
2866 cm_node->tcp_cntxt.client = 1;
2867 cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
2868
2869 if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
2870 loopback_remotelistener = i40iw_find_listener(
2871 cm_core,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002872 cm_info->rem_addr,
2873 cm_node->rem_port,
Faisal Latiff27b4742016-01-20 13:40:04 -06002874 cm_node->vlan_id,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06002875 I40IW_CM_LISTENER_ACTIVE_STATE);
Faisal Latiff27b4742016-01-20 13:40:04 -06002876 if (!loopback_remotelistener) {
2877 i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
2878 } else {
2879 loopback_cm_info = *cm_info;
2880 loopback_cm_info.loc_port = cm_info->rem_port;
2881 loopback_cm_info.rem_port = cm_info->loc_port;
Faisal Latiff27b4742016-01-20 13:40:04 -06002882 loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
2883 loopback_cm_info.ipv4 = cm_info->ipv4;
2884 loopback_remotenode = i40iw_make_cm_node(cm_core,
2885 iwdev,
2886 &loopback_cm_info,
2887 loopback_remotelistener);
2888 if (!loopback_remotenode) {
2889 i40iw_rem_ref_cm_node(cm_node);
2890 return NULL;
2891 }
2892 cm_core->stats_loopbacks++;
2893 loopback_remotenode->loopbackpartner = cm_node;
2894 loopback_remotenode->tcp_cntxt.rcv_wscale =
2895 I40IW_CM_DEFAULT_RCV_WND_SCALE;
2896 cm_node->loopbackpartner = loopback_remotenode;
2897 memcpy(loopback_remotenode->pdata_buf, private_data,
2898 private_data_len);
2899 loopback_remotenode->pdata.size = private_data_len;
2900
2901 cm_node->state = I40IW_CM_STATE_OFFLOADED;
2902 cm_node->tcp_cntxt.rcv_nxt =
2903 loopback_remotenode->tcp_cntxt.loc_seq_num;
2904 loopback_remotenode->tcp_cntxt.rcv_nxt =
2905 cm_node->tcp_cntxt.loc_seq_num;
2906 cm_node->tcp_cntxt.max_snd_wnd =
2907 loopback_remotenode->tcp_cntxt.rcv_wnd;
2908 loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
2909 cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
2910 loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
2911 cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
2912 loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
2913 loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
2914 i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
2915 }
2916 return cm_node;
2917 }
2918
2919 cm_node->pdata.size = private_data_len;
2920 cm_node->pdata.addr = cm_node->pdata_buf;
2921
2922 memcpy(cm_node->pdata_buf, private_data, private_data_len);
2923
2924 cm_node->state = I40IW_CM_STATE_SYN_SENT;
Faisal Latiff27b4742016-01-20 13:40:04 -06002925 return cm_node;
2926}
2927
2928/**
2929 * i40iw_cm_reject - reject and teardown a connection
2930 * @cm_node: connection's node
2931 * @pdate: ptr to private data for reject
2932 * @plen: size of private data
2933 */
2934static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
2935{
2936 int ret = 0;
2937 int err;
2938 int passive_state;
2939 struct iw_cm_id *cm_id = cm_node->cm_id;
2940 struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
2941
2942 if (cm_node->tcp_cntxt.client)
2943 return ret;
2944 i40iw_cleanup_retrans_entry(cm_node);
2945
2946 if (!loopback) {
2947 passive_state = atomic_add_return(1, &cm_node->passive_state);
2948 if (passive_state == I40IW_SEND_RESET_EVENT) {
2949 cm_node->state = I40IW_CM_STATE_CLOSED;
2950 i40iw_rem_ref_cm_node(cm_node);
2951 } else {
2952 if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
2953 i40iw_rem_ref_cm_node(cm_node);
2954 } else {
2955 ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
2956 if (ret) {
2957 cm_node->state = I40IW_CM_STATE_CLOSED;
2958 err = i40iw_send_reset(cm_node);
2959 if (err)
2960 i40iw_pr_err("send reset failed\n");
2961 } else {
2962 cm_id->add_ref(cm_id);
2963 }
2964 }
2965 }
2966 } else {
2967 cm_node->cm_id = NULL;
2968 if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
2969 i40iw_rem_ref_cm_node(cm_node);
2970 i40iw_rem_ref_cm_node(loopback);
2971 } else {
2972 ret = i40iw_send_cm_event(loopback,
2973 loopback->cm_id,
2974 IW_CM_EVENT_CONNECT_REPLY,
2975 -ECONNREFUSED);
2976 i40iw_rem_ref_cm_node(cm_node);
2977 loopback->state = I40IW_CM_STATE_CLOSING;
2978
2979 cm_id = loopback->cm_id;
2980 i40iw_rem_ref_cm_node(loopback);
2981 cm_id->rem_ref(cm_id);
2982 }
2983 }
2984
2985 return ret;
2986}
2987
2988/**
2989 * i40iw_cm_close - close of cm connection
2990 * @cm_node: connection's node
2991 */
2992static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
2993{
2994 int ret = 0;
2995
2996 if (!cm_node)
2997 return -EINVAL;
2998
2999 switch (cm_node->state) {
3000 case I40IW_CM_STATE_SYN_RCVD:
3001 case I40IW_CM_STATE_SYN_SENT:
3002 case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
3003 case I40IW_CM_STATE_ESTABLISHED:
3004 case I40IW_CM_STATE_ACCEPTING:
3005 case I40IW_CM_STATE_MPAREQ_SENT:
3006 case I40IW_CM_STATE_MPAREQ_RCVD:
3007 i40iw_cleanup_retrans_entry(cm_node);
3008 i40iw_send_reset(cm_node);
3009 break;
3010 case I40IW_CM_STATE_CLOSE_WAIT:
3011 cm_node->state = I40IW_CM_STATE_LAST_ACK;
3012 i40iw_send_fin(cm_node);
3013 break;
3014 case I40IW_CM_STATE_FIN_WAIT1:
3015 case I40IW_CM_STATE_FIN_WAIT2:
3016 case I40IW_CM_STATE_LAST_ACK:
3017 case I40IW_CM_STATE_TIME_WAIT:
3018 case I40IW_CM_STATE_CLOSING:
3019 ret = -1;
3020 break;
3021 case I40IW_CM_STATE_LISTENING:
3022 i40iw_cleanup_retrans_entry(cm_node);
3023 i40iw_send_reset(cm_node);
3024 break;
3025 case I40IW_CM_STATE_MPAREJ_RCVD:
3026 case I40IW_CM_STATE_UNKNOWN:
3027 case I40IW_CM_STATE_INITED:
3028 case I40IW_CM_STATE_CLOSED:
3029 case I40IW_CM_STATE_LISTENER_DESTROYED:
3030 i40iw_rem_ref_cm_node(cm_node);
3031 break;
3032 case I40IW_CM_STATE_OFFLOADED:
3033 if (cm_node->send_entry)
3034 i40iw_pr_err("send_entry\n");
3035 i40iw_rem_ref_cm_node(cm_node);
3036 break;
3037 }
3038 return ret;
3039}
3040
3041/**
3042 * i40iw_receive_ilq - recv an ETHERNET packet, and process it
3043 * through CM
3044 * @dev: FPK dev struct
3045 * @rbuf: receive buffer
3046 */
3047void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
3048{
3049 struct i40iw_cm_node *cm_node;
3050 struct i40iw_cm_listener *listener;
3051 struct iphdr *iph;
3052 struct ipv6hdr *ip6h;
3053 struct tcphdr *tcph;
3054 struct i40iw_cm_info cm_info;
3055 struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
3056 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3057 struct vlan_ethhdr *ethh;
3058
3059 /* if vlan, then maclen = 18 else 14 */
3060 iph = (struct iphdr *)rbuf->iph;
3061 memset(&cm_info, 0, sizeof(cm_info));
3062
3063 i40iw_debug_buf(dev,
3064 I40IW_DEBUG_ILQ,
3065 "RECEIVE ILQ BUFFER",
3066 rbuf->mem.va,
3067 rbuf->totallen);
3068 ethh = (struct vlan_ethhdr *)rbuf->mem.va;
3069
3070 if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
3071 cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
3072 i40iw_debug(cm_core->dev,
3073 I40IW_DEBUG_CM,
3074 "%s vlan_id=%d\n",
3075 __func__,
3076 cm_info.vlan_id);
3077 } else {
3078 cm_info.vlan_id = I40IW_NO_VLAN;
3079 }
3080 tcph = (struct tcphdr *)rbuf->tcph;
3081
3082 if (rbuf->ipv4) {
3083 cm_info.loc_addr[0] = ntohl(iph->daddr);
3084 cm_info.rem_addr[0] = ntohl(iph->saddr);
3085 cm_info.ipv4 = true;
3086 } else {
3087 ip6h = (struct ipv6hdr *)rbuf->iph;
3088 i40iw_copy_ip_ntohl(cm_info.loc_addr,
3089 ip6h->daddr.in6_u.u6_addr32);
3090 i40iw_copy_ip_ntohl(cm_info.rem_addr,
3091 ip6h->saddr.in6_u.u6_addr32);
3092 cm_info.ipv4 = false;
3093 }
3094 cm_info.loc_port = ntohs(tcph->dest);
3095 cm_info.rem_port = ntohs(tcph->source);
Faisal Latiff27b4742016-01-20 13:40:04 -06003096 cm_node = i40iw_find_node(cm_core,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003097 cm_info.rem_port,
3098 cm_info.rem_addr,
3099 cm_info.loc_port,
3100 cm_info.loc_addr,
Faisal Latiff27b4742016-01-20 13:40:04 -06003101 true);
3102
3103 if (!cm_node) {
3104 /* Only type of packet accepted are for */
3105 /* the PASSIVE open (syn only) */
3106 if (!tcph->syn || tcph->ack)
3107 return;
3108 listener =
3109 i40iw_find_listener(cm_core,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003110 cm_info.loc_addr,
3111 cm_info.loc_port,
Faisal Latiff27b4742016-01-20 13:40:04 -06003112 cm_info.vlan_id,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003113 I40IW_CM_LISTENER_ACTIVE_STATE);
Faisal Latiff27b4742016-01-20 13:40:04 -06003114 if (!listener) {
3115 cm_info.cm_id = NULL;
3116 i40iw_debug(cm_core->dev,
3117 I40IW_DEBUG_CM,
3118 "%s no listener found\n",
3119 __func__);
3120 return;
3121 }
3122 cm_info.cm_id = listener->cm_id;
3123 cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
3124 if (!cm_node) {
3125 i40iw_debug(cm_core->dev,
3126 I40IW_DEBUG_CM,
3127 "%s allocate node failed\n",
3128 __func__);
3129 atomic_dec(&listener->ref_count);
3130 return;
3131 }
3132 if (!tcph->rst && !tcph->fin) {
3133 cm_node->state = I40IW_CM_STATE_LISTENING;
3134 } else {
3135 i40iw_rem_ref_cm_node(cm_node);
3136 return;
3137 }
3138 atomic_inc(&cm_node->ref_count);
3139 } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
3140 i40iw_rem_ref_cm_node(cm_node);
3141 return;
3142 }
3143 i40iw_process_packet(cm_node, rbuf);
3144 i40iw_rem_ref_cm_node(cm_node);
3145}
3146
3147/**
3148 * i40iw_setup_cm_core - allocate a top level instance of a cm
3149 * core
3150 * @iwdev: iwarp device structure
3151 */
3152void i40iw_setup_cm_core(struct i40iw_device *iwdev)
3153{
3154 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3155
3156 cm_core->iwdev = iwdev;
3157 cm_core->dev = &iwdev->sc_dev;
3158
3159 INIT_LIST_HEAD(&cm_core->connected_nodes);
3160 INIT_LIST_HEAD(&cm_core->listen_nodes);
3161
3162 init_timer(&cm_core->tcp_timer);
3163 cm_core->tcp_timer.function = i40iw_cm_timer_tick;
3164 cm_core->tcp_timer.data = (unsigned long)cm_core;
3165
3166 spin_lock_init(&cm_core->ht_lock);
3167 spin_lock_init(&cm_core->listen_list_lock);
3168
3169 cm_core->event_wq = create_singlethread_workqueue("iwewq");
3170 cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
3171}
3172
3173/**
3174 * i40iw_cleanup_cm_core - deallocate a top level instance of a
3175 * cm core
3176 * @cm_core: cm's core
3177 */
3178void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
3179{
3180 unsigned long flags;
3181
3182 if (!cm_core)
3183 return;
3184
3185 spin_lock_irqsave(&cm_core->ht_lock, flags);
3186 if (timer_pending(&cm_core->tcp_timer))
3187 del_timer_sync(&cm_core->tcp_timer);
3188 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
3189
3190 destroy_workqueue(cm_core->event_wq);
3191 destroy_workqueue(cm_core->disconn_wq);
3192}
3193
3194/**
3195 * i40iw_init_tcp_ctx - setup qp context
3196 * @cm_node: connection's node
3197 * @tcp_info: offload info for tcp
3198 * @iwqp: associate qp for the connection
3199 */
3200static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
3201 struct i40iw_tcp_offload_info *tcp_info,
3202 struct i40iw_qp *iwqp)
3203{
3204 tcp_info->ipv4 = cm_node->ipv4;
3205 tcp_info->drop_ooo_seg = true;
3206 tcp_info->wscale = true;
3207 tcp_info->ignore_tcp_opt = true;
3208 tcp_info->ignore_tcp_uns_opt = true;
3209 tcp_info->no_nagle = false;
3210
3211 tcp_info->ttl = I40IW_DEFAULT_TTL;
3212 tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
3213 tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
3214 tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
3215
3216 tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
3217 tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
3218 tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
3219
3220 tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3221 tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
3222 tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
3223 tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3224
3225 tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3226 tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
3227 tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
3228 tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3229 tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
3230 tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
3231 cm_node->tcp_cntxt.rcv_wscale);
3232
3233 tcp_info->flow_label = 0;
3234 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
3235 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
3236 tcp_info->insert_vlan_tag = true;
3237 tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
3238 }
3239 if (cm_node->ipv4) {
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003240 tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
3241 tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
Faisal Latiff27b4742016-01-20 13:40:04 -06003242
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003243 tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
3244 tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05003245 tcp_info->arp_idx =
3246 cpu_to_le16((u16)i40iw_arp_table(
3247 iwqp->iwdev,
3248 &tcp_info->dest_ip_addr3,
3249 true,
3250 NULL,
3251 I40IW_ARP_RESOLVE));
Faisal Latiff27b4742016-01-20 13:40:04 -06003252 } else {
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003253 tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
3254 tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
3255 tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
3256 tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
3257 tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
3258 tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
3259 tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
3260 tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
3261 tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
3262 tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
Ismail, Mustafa20c61f72016-04-18 10:33:07 -05003263 tcp_info->arp_idx =
3264 cpu_to_le16((u16)i40iw_arp_table(
3265 iwqp->iwdev,
3266 &tcp_info->dest_ip_addr0,
3267 false,
3268 NULL,
3269 I40IW_ARP_RESOLVE));
Faisal Latiff27b4742016-01-20 13:40:04 -06003270 }
3271}
3272
3273/**
3274 * i40iw_cm_init_tsa_conn - setup qp for RTS
3275 * @iwqp: associate qp for the connection
3276 * @cm_node: connection's node
3277 */
3278static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
3279 struct i40iw_cm_node *cm_node)
3280{
3281 struct i40iw_tcp_offload_info tcp_info;
3282 struct i40iwarp_offload_info *iwarp_info;
3283 struct i40iw_qp_host_ctx_info *ctx_info;
3284 struct i40iw_device *iwdev = iwqp->iwdev;
3285 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
3286
3287 memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
3288 iwarp_info = &iwqp->iwarp_info;
3289 ctx_info = &iwqp->ctx_info;
3290
3291 ctx_info->tcp_info = &tcp_info;
3292 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
3293 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
3294
3295 iwarp_info->ord_size = cm_node->ord_size;
3296 iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
3297
3298 if (iwarp_info->ord_size == 1)
3299 iwarp_info->ord_size = 2;
3300
3301 iwarp_info->rd_enable = true;
3302 iwarp_info->rdmap_ver = 1;
3303 iwarp_info->ddp_ver = 1;
3304
3305 iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
3306
3307 ctx_info->tcp_info_valid = true;
3308 ctx_info->iwarp_info_valid = true;
3309
3310 i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
3311 if (cm_node->snd_mark_en) {
3312 iwarp_info->snd_mark_en = true;
3313 iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
3314 SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
3315 }
3316
3317 cm_node->state = I40IW_CM_STATE_OFFLOADED;
3318 tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
3319 tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
3320
3321 dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
3322
3323 /* once tcp_info is set, no need to do it again */
3324 ctx_info->tcp_info_valid = false;
3325 ctx_info->iwarp_info_valid = false;
3326}
3327
3328/**
3329 * i40iw_cm_disconn - when a connection is being closed
3330 * @iwqp: associate qp for the connection
3331 */
3332int i40iw_cm_disconn(struct i40iw_qp *iwqp)
3333{
3334 struct disconn_work *work;
3335 struct i40iw_device *iwdev = iwqp->iwdev;
3336 struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3337
3338 work = kzalloc(sizeof(*work), GFP_ATOMIC);
3339 if (!work)
3340 return -ENOMEM; /* Timer will clean up */
3341
3342 i40iw_add_ref(&iwqp->ibqp);
3343 work->iwqp = iwqp;
3344 INIT_WORK(&work->work, i40iw_disconnect_worker);
3345 queue_work(cm_core->disconn_wq, &work->work);
3346 return 0;
3347}
3348
3349/**
3350 * i40iw_loopback_nop - Send a nop
3351 * @qp: associated hw qp
3352 */
3353static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
3354{
3355 u64 *wqe;
3356 u64 header;
3357
3358 wqe = qp->qp_uk.sq_base->elem;
3359 set_64bit_val(wqe, 0, 0);
3360 set_64bit_val(wqe, 8, 0);
3361 set_64bit_val(wqe, 16, 0);
3362
3363 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3364 LS_64(0, I40IWQPSQ_SIGCOMPL) |
3365 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3366 set_64bit_val(wqe, 24, header);
3367}
3368
3369/**
3370 * i40iw_qp_disconnect - free qp and close cm
3371 * @iwqp: associate qp for the connection
3372 */
3373static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
3374{
3375 struct i40iw_device *iwdev;
3376 struct i40iw_ib_device *iwibdev;
3377
3378 iwdev = to_iwdev(iwqp->ibqp.device);
3379 if (!iwdev) {
3380 i40iw_pr_err("iwdev == NULL\n");
3381 return;
3382 }
3383
3384 iwibdev = iwdev->iwibdev;
3385
3386 if (iwqp->active_conn) {
3387 /* indicate this connection is NOT active */
3388 iwqp->active_conn = 0;
3389 } else {
3390 /* Need to free the Last Streaming Mode Message */
3391 if (iwqp->ietf_mem.va) {
3392 if (iwqp->lsmm_mr)
3393 iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
3394 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
3395 }
3396 }
3397
3398 /* close the CM node down if it is still active */
3399 if (iwqp->cm_node) {
3400 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
3401 i40iw_cm_close(iwqp->cm_node);
3402 }
3403}
3404
3405/**
3406 * i40iw_cm_disconn_true - called by worker thread to disconnect qp
3407 * @iwqp: associate qp for the connection
3408 */
3409static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
3410{
3411 struct iw_cm_id *cm_id;
3412 struct i40iw_device *iwdev;
3413 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
3414 u16 last_ae;
3415 u8 original_hw_tcp_state;
3416 u8 original_ibqp_state;
3417 int disconn_status = 0;
3418 int issue_disconn = 0;
3419 int issue_close = 0;
3420 int issue_flush = 0;
3421 struct ib_event ibevent;
3422 unsigned long flags;
3423 int ret;
3424
3425 if (!iwqp) {
3426 i40iw_pr_err("iwqp == NULL\n");
3427 return;
3428 }
3429
3430 spin_lock_irqsave(&iwqp->lock, flags);
3431 cm_id = iwqp->cm_id;
3432 /* make sure we havent already closed this connection */
3433 if (!cm_id) {
3434 spin_unlock_irqrestore(&iwqp->lock, flags);
3435 return;
3436 }
3437
3438 iwdev = to_iwdev(iwqp->ibqp.device);
3439
3440 original_hw_tcp_state = iwqp->hw_tcp_state;
3441 original_ibqp_state = iwqp->ibqp_state;
3442 last_ae = iwqp->last_aeq;
3443
3444 if (qp->term_flags) {
3445 issue_disconn = 1;
3446 issue_close = 1;
3447 iwqp->cm_id = NULL;
3448 /*When term timer expires after cm_timer, don't want
3449 *terminate-handler to issue cm_disconn which can re-free
3450 *a QP even after its refcnt=0.
3451 */
3452 del_timer(&iwqp->terminate_timer);
3453 if (!iwqp->flush_issued) {
3454 iwqp->flush_issued = 1;
3455 issue_flush = 1;
3456 }
3457 } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
3458 ((original_ibqp_state == IB_QPS_RTS) &&
3459 (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
3460 issue_disconn = 1;
3461 if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
3462 disconn_status = -ECONNRESET;
3463 }
3464
3465 if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
3466 (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
3467 (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
3468 (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
3469 issue_close = 1;
3470 iwqp->cm_id = NULL;
3471 if (!iwqp->flush_issued) {
3472 iwqp->flush_issued = 1;
3473 issue_flush = 1;
3474 }
3475 }
3476
3477 spin_unlock_irqrestore(&iwqp->lock, flags);
3478 if (issue_flush && !iwqp->destroyed) {
3479 /* Flush the queues */
3480 i40iw_flush_wqes(iwdev, iwqp);
3481
3482 if (qp->term_flags) {
3483 ibevent.device = iwqp->ibqp.device;
3484 ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
3485 IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
3486 ibevent.element.qp = &iwqp->ibqp;
3487 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
3488 }
3489 }
3490
3491 if (cm_id && cm_id->event_handler) {
3492 if (issue_disconn) {
3493 ret = i40iw_send_cm_event(NULL,
3494 cm_id,
3495 IW_CM_EVENT_DISCONNECT,
3496 disconn_status);
3497
3498 if (ret)
3499 i40iw_debug(&iwdev->sc_dev,
3500 I40IW_DEBUG_CM,
3501 "disconnect event failed %s: - cm_id = %p\n",
3502 __func__, cm_id);
3503 }
3504 if (issue_close) {
3505 i40iw_qp_disconnect(iwqp);
3506 cm_id->provider_data = iwqp;
3507 ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
3508 if (ret)
3509 i40iw_debug(&iwdev->sc_dev,
3510 I40IW_DEBUG_CM,
3511 "close event failed %s: - cm_id = %p\n",
3512 __func__, cm_id);
3513 cm_id->rem_ref(cm_id);
3514 }
3515 }
3516}
3517
3518/**
3519 * i40iw_disconnect_worker - worker for connection close
3520 * @work: points or disconn structure
3521 */
3522static void i40iw_disconnect_worker(struct work_struct *work)
3523{
3524 struct disconn_work *dwork = container_of(work, struct disconn_work, work);
3525 struct i40iw_qp *iwqp = dwork->iwqp;
3526
3527 kfree(dwork);
3528 i40iw_cm_disconn_true(iwqp);
3529 i40iw_rem_ref(&iwqp->ibqp);
3530}
3531
3532/**
3533 * i40iw_accept - registered call for connection to be accepted
3534 * @cm_id: cm information for passive connection
3535 * @conn_param: accpet parameters
3536 */
3537int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3538{
3539 struct ib_qp *ibqp;
3540 struct i40iw_qp *iwqp;
3541 struct i40iw_device *iwdev;
3542 struct i40iw_sc_dev *dev;
3543 struct i40iw_cm_node *cm_node;
3544 struct ib_qp_attr attr;
3545 int passive_state;
Faisal Latiff27b4742016-01-20 13:40:04 -06003546 struct ib_mr *ibmr;
3547 struct i40iw_pd *iwpd;
3548 u16 buf_len = 0;
3549 struct i40iw_kmem_info accept;
3550 enum i40iw_status_code status;
3551 u64 tagged_offset;
3552
3553 memset(&attr, 0, sizeof(attr));
3554 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3555 if (!ibqp)
3556 return -EINVAL;
3557
3558 iwqp = to_iwqp(ibqp);
3559 iwdev = iwqp->iwdev;
3560 dev = &iwdev->sc_dev;
3561 cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
3562
3563 if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
3564 cm_node->ipv4 = true;
3565 cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
3566 } else {
3567 cm_node->ipv4 = false;
3568 i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
3569 }
3570 i40iw_debug(cm_node->dev,
3571 I40IW_DEBUG_CM,
3572 "Accept vlan_id=%d\n",
3573 cm_node->vlan_id);
3574 if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
3575 if (cm_node->loopbackpartner)
3576 i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
3577 i40iw_rem_ref_cm_node(cm_node);
3578 return -EINVAL;
3579 }
3580
3581 passive_state = atomic_add_return(1, &cm_node->passive_state);
3582 if (passive_state == I40IW_SEND_RESET_EVENT) {
3583 i40iw_rem_ref_cm_node(cm_node);
3584 return -ECONNRESET;
3585 }
3586
3587 cm_node->cm_core->stats_accepts++;
3588 iwqp->cm_node = (void *)cm_node;
3589 cm_node->iwqp = iwqp;
3590
3591 buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
3592
3593 status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
3594
3595 if (status)
3596 return -ENOMEM;
3597 cm_node->pdata.size = conn_param->private_data_len;
3598 accept.addr = iwqp->ietf_mem.va;
3599 accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
3600 memcpy(accept.addr + accept.size, conn_param->private_data,
3601 conn_param->private_data_len);
3602
3603 /* setup our first outgoing iWarp send WQE (the IETF frame response) */
3604 if ((cm_node->ipv4 &&
3605 !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
3606 (!cm_node->ipv4 &&
3607 !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
Faisal Latiff27b4742016-01-20 13:40:04 -06003608 iwpd = iwqp->iwpd;
3609 tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
3610 ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
3611 iwqp->ietf_mem.pa,
3612 buf_len,
3613 IB_ACCESS_LOCAL_WRITE,
3614 &tagged_offset);
3615 if (IS_ERR(ibmr)) {
3616 i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
3617 return -ENOMEM;
3618 }
3619
3620 ibmr->pd = &iwpd->ibpd;
3621 ibmr->device = iwpd->ibpd.device;
3622 iwqp->lsmm_mr = ibmr;
3623 if (iwqp->page)
3624 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3625 if (is_remote_ne020_or_chelsio(cm_node))
3626 dev->iw_priv_qp_ops->qp_send_lsmm(
3627 &iwqp->sc_qp,
3628 iwqp->ietf_mem.va,
3629 (accept.size + conn_param->private_data_len),
3630 ibmr->lkey);
3631 else
3632 dev->iw_priv_qp_ops->qp_send_lsmm(
3633 &iwqp->sc_qp,
3634 iwqp->ietf_mem.va,
3635 (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
3636 ibmr->lkey);
3637
3638 } else {
3639 if (iwqp->page)
3640 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3641 i40iw_loopback_nop(&iwqp->sc_qp);
3642 }
3643
3644 if (iwqp->page)
3645 kunmap(iwqp->page);
3646
3647 iwqp->cm_id = cm_id;
3648 cm_node->cm_id = cm_id;
3649
3650 cm_id->provider_data = (void *)iwqp;
3651 iwqp->active_conn = 0;
3652
3653 cm_node->lsmm_size = accept.size + conn_param->private_data_len;
3654 i40iw_cm_init_tsa_conn(iwqp, cm_node);
3655 cm_id->add_ref(cm_id);
3656 i40iw_add_ref(&iwqp->ibqp);
3657
3658 i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
3659
3660 attr.qp_state = IB_QPS_RTS;
3661 cm_node->qhash_set = false;
3662 i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
3663 if (cm_node->loopbackpartner) {
3664 cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
3665
3666 /* copy entire MPA frame to our cm_node's frame */
3667 memcpy(cm_node->loopbackpartner->pdata_buf,
3668 conn_param->private_data,
3669 conn_param->private_data_len);
3670 i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
3671 }
3672
3673 cm_node->accelerated = 1;
3674 if (cm_node->accept_pend) {
3675 if (!cm_node->listener)
3676 i40iw_pr_err("cm_node->listener NULL for passive node\n");
3677 atomic_dec(&cm_node->listener->pend_accepts_cnt);
3678 cm_node->accept_pend = 0;
3679 }
3680 return 0;
3681}
3682
3683/**
3684 * i40iw_reject - registered call for connection to be rejected
3685 * @cm_id: cm information for passive connection
3686 * @pdata: private data to be sent
3687 * @pdata_len: private data length
3688 */
3689int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
3690{
3691 struct i40iw_device *iwdev;
3692 struct i40iw_cm_node *cm_node;
3693 struct i40iw_cm_node *loopback;
3694
3695 cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
3696 loopback = cm_node->loopbackpartner;
3697 cm_node->cm_id = cm_id;
3698 cm_node->pdata.size = pdata_len;
3699
3700 iwdev = to_iwdev(cm_id->device);
3701 if (!iwdev)
3702 return -EINVAL;
3703 cm_node->cm_core->stats_rejects++;
3704
3705 if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
3706 return -EINVAL;
3707
3708 if (loopback) {
3709 memcpy(&loopback->pdata_buf, pdata, pdata_len);
3710 loopback->pdata.size = pdata_len;
3711 }
3712
3713 return i40iw_cm_reject(cm_node, pdata, pdata_len);
3714}
3715
3716/**
3717 * i40iw_connect - registered call for connection to be established
3718 * @cm_id: cm information for passive connection
3719 * @conn_param: Information about the connection
3720 */
3721int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3722{
3723 struct ib_qp *ibqp;
3724 struct i40iw_qp *iwqp;
3725 struct i40iw_device *iwdev;
3726 struct i40iw_cm_node *cm_node;
3727 struct i40iw_cm_info cm_info;
3728 struct sockaddr_in *laddr;
3729 struct sockaddr_in *raddr;
3730 struct sockaddr_in6 *laddr6;
3731 struct sockaddr_in6 *raddr6;
Tatyana Nikolovaccea5f02016-04-22 14:14:29 -05003732 bool qhash_set = false;
Faisal Latiff27b4742016-01-20 13:40:04 -06003733 int apbvt_set = 0;
3734 enum i40iw_status_code status;
Faisal Latiff27b4742016-01-20 13:40:04 -06003735
3736 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3737 if (!ibqp)
3738 return -EINVAL;
3739 iwqp = to_iwqp(ibqp);
3740 if (!iwqp)
3741 return -EINVAL;
3742 iwdev = to_iwdev(iwqp->ibqp.device);
3743 if (!iwdev)
3744 return -EINVAL;
3745
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003746 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3747 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3748 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3749 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
Faisal Latiff27b4742016-01-20 13:40:04 -06003750
3751 if (!(laddr->sin_port) || !(raddr->sin_port))
3752 return -EINVAL;
3753
3754 iwqp->active_conn = 1;
3755 iwqp->cm_id = NULL;
3756 cm_id->provider_data = iwqp;
3757
3758 /* set up the connection params for the node */
3759 if (cm_id->remote_addr.ss_family == AF_INET) {
3760 cm_info.ipv4 = true;
3761 memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
3762 memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
3763 cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
3764 cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
3765 cm_info.loc_port = ntohs(laddr->sin_port);
3766 cm_info.rem_port = ntohs(raddr->sin_port);
3767 cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
3768 } else {
3769 cm_info.ipv4 = false;
3770 i40iw_copy_ip_ntohl(cm_info.loc_addr,
3771 laddr6->sin6_addr.in6_u.u6_addr32);
3772 i40iw_copy_ip_ntohl(cm_info.rem_addr,
3773 raddr6->sin6_addr.in6_u.u6_addr32);
3774 cm_info.loc_port = ntohs(laddr6->sin6_port);
3775 cm_info.rem_port = ntohs(raddr6->sin6_port);
3776 i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
3777 }
Faisal Latiff27b4742016-01-20 13:40:04 -06003778 cm_info.cm_id = cm_id;
Faisal Latiff27b4742016-01-20 13:40:04 -06003779 if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3780 (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3781 raddr6->sin6_addr.in6_u.u6_addr32,
3782 sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3783 status = i40iw_manage_qhash(iwdev,
3784 &cm_info,
3785 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3786 I40IW_QHASH_MANAGE_TYPE_ADD,
3787 NULL,
3788 true);
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003789 if (status)
Faisal Latiff27b4742016-01-20 13:40:04 -06003790 return -EINVAL;
Tatyana Nikolovaccea5f02016-04-22 14:14:29 -05003791 qhash_set = true;
Faisal Latiff27b4742016-01-20 13:40:04 -06003792 }
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003793 status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
Faisal Latiff27b4742016-01-20 13:40:04 -06003794 if (status) {
Faisal Latiff27b4742016-01-20 13:40:04 -06003795 i40iw_manage_qhash(iwdev,
3796 &cm_info,
3797 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3798 I40IW_QHASH_MANAGE_TYPE_DELETE,
3799 NULL,
3800 false);
3801 return -EINVAL;
3802 }
3803
3804 apbvt_set = 1;
3805 cm_id->add_ref(cm_id);
3806 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
3807 conn_param->private_data_len,
3808 (void *)conn_param->private_data,
3809 &cm_info);
Ismail, Mustafab3437e02016-04-18 10:32:56 -05003810 if (!cm_node)
3811 goto err;
Faisal Latiff27b4742016-01-20 13:40:04 -06003812
3813 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
3814 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
3815 !cm_node->ord_size)
3816 cm_node->ord_size = 1;
3817
3818 cm_node->apbvt_set = apbvt_set;
Tatyana Nikolovaccea5f02016-04-22 14:14:29 -05003819 cm_node->qhash_set = qhash_set;
Faisal Latiff27b4742016-01-20 13:40:04 -06003820 iwqp->cm_node = cm_node;
3821 cm_node->iwqp = iwqp;
3822 iwqp->cm_id = cm_id;
3823 i40iw_add_ref(&iwqp->ibqp);
Ismail, Mustafab3437e02016-04-18 10:32:56 -05003824
3825 if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
3826 if (i40iw_send_syn(cm_node, 0)) {
3827 i40iw_rem_ref_cm_node(cm_node);
3828 goto err;
3829 }
3830 }
3831
3832 i40iw_debug(cm_node->dev,
3833 I40IW_DEBUG_CM,
3834 "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
3835 cm_node->rem_port,
3836 cm_node,
3837 cm_node->cm_id);
Faisal Latiff27b4742016-01-20 13:40:04 -06003838 return 0;
Ismail, Mustafab3437e02016-04-18 10:32:56 -05003839
3840err:
3841 if (cm_node) {
3842 if (cm_node->ipv4)
3843 i40iw_debug(cm_node->dev,
3844 I40IW_DEBUG_CM,
3845 "Api - connect() FAILED: dest addr=%pI4",
3846 cm_node->rem_addr);
3847 else
3848 i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
3849 "Api - connect() FAILED: dest addr=%pI6",
3850 cm_node->rem_addr);
3851 }
3852 i40iw_manage_qhash(iwdev,
3853 &cm_info,
3854 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3855 I40IW_QHASH_MANAGE_TYPE_DELETE,
3856 NULL,
3857 false);
3858
3859 if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
3860 cm_info.loc_port))
3861 i40iw_manage_apbvt(iwdev,
3862 cm_info.loc_port,
3863 I40IW_MANAGE_APBVT_DEL);
3864 cm_id->rem_ref(cm_id);
3865 iwdev->cm_core.stats_connect_errs++;
3866 return -ENOMEM;
Faisal Latiff27b4742016-01-20 13:40:04 -06003867}
3868
3869/**
3870 * i40iw_create_listen - registered call creating listener
3871 * @cm_id: cm information for passive connection
3872 * @backlog: to max accept pending count
3873 */
3874int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3875{
3876 struct i40iw_device *iwdev;
3877 struct i40iw_cm_listener *cm_listen_node;
3878 struct i40iw_cm_info cm_info;
3879 enum i40iw_status_code ret;
3880 struct sockaddr_in *laddr;
3881 struct sockaddr_in6 *laddr6;
3882 bool wildcard = false;
3883
3884 iwdev = to_iwdev(cm_id->device);
3885 if (!iwdev)
3886 return -EINVAL;
3887
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003888 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3889 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
Faisal Latiff27b4742016-01-20 13:40:04 -06003890 memset(&cm_info, 0, sizeof(cm_info));
3891 if (laddr->sin_family == AF_INET) {
3892 cm_info.ipv4 = true;
3893 cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
3894 cm_info.loc_port = ntohs(laddr->sin_port);
3895
3896 if (laddr->sin_addr.s_addr != INADDR_ANY)
3897 cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
3898 else
3899 wildcard = true;
3900
3901 } else {
3902 cm_info.ipv4 = false;
3903 i40iw_copy_ip_ntohl(cm_info.loc_addr,
3904 laddr6->sin6_addr.in6_u.u6_addr32);
3905 cm_info.loc_port = ntohs(laddr6->sin6_port);
3906 if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
3907 i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
3908 &cm_info.vlan_id,
3909 NULL);
3910 else
3911 wildcard = true;
3912 }
Faisal Latiff27b4742016-01-20 13:40:04 -06003913 cm_info.backlog = backlog;
3914 cm_info.cm_id = cm_id;
3915
3916 cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
3917 if (!cm_listen_node) {
3918 i40iw_pr_err("cm_listen_node == NULL\n");
3919 return -ENOMEM;
3920 }
3921
3922 cm_id->provider_data = cm_listen_node;
3923
3924 if (!cm_listen_node->reused_node) {
Faisal Latiff27b4742016-01-20 13:40:04 -06003925 if (wildcard) {
3926 if (cm_info.ipv4)
3927 ret = i40iw_add_mqh_4(iwdev,
3928 &cm_info,
3929 cm_listen_node);
3930 else
3931 ret = i40iw_add_mqh_6(iwdev,
3932 &cm_info,
3933 cm_listen_node);
3934 if (ret)
3935 goto error;
3936
3937 ret = i40iw_manage_apbvt(iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003938 cm_info.loc_port,
Faisal Latiff27b4742016-01-20 13:40:04 -06003939 I40IW_MANAGE_APBVT_ADD);
3940
3941 if (ret)
3942 goto error;
3943 } else {
3944 ret = i40iw_manage_qhash(iwdev,
3945 &cm_info,
3946 I40IW_QHASH_TYPE_TCP_SYN,
3947 I40IW_QHASH_MANAGE_TYPE_ADD,
3948 NULL,
3949 true);
3950 if (ret)
3951 goto error;
3952 cm_listen_node->qhash_set = true;
3953 ret = i40iw_manage_apbvt(iwdev,
Faisal Latif8d8cd0b2016-02-26 09:18:01 -06003954 cm_info.loc_port,
Faisal Latiff27b4742016-01-20 13:40:04 -06003955 I40IW_MANAGE_APBVT_ADD);
3956 if (ret)
3957 goto error;
3958 }
3959 }
3960 cm_id->add_ref(cm_id);
3961 cm_listen_node->cm_core->stats_listen_created++;
3962 return 0;
3963 error:
3964 i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
3965 return -EINVAL;
3966}
3967
3968/**
3969 * i40iw_destroy_listen - registered call to destroy listener
3970 * @cm_id: cm information for passive connection
3971 */
3972int i40iw_destroy_listen(struct iw_cm_id *cm_id)
3973{
3974 struct i40iw_device *iwdev;
3975
3976 iwdev = to_iwdev(cm_id->device);
3977 if (cm_id->provider_data)
3978 i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
3979 else
3980 i40iw_pr_err("cm_id->provider_data was NULL\n");
3981
3982 cm_id->rem_ref(cm_id);
3983
3984 return 0;
3985}
3986
3987/**
3988 * i40iw_cm_event_connected - handle connected active node
3989 * @event: the info for cm_node of connection
3990 */
3991static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
3992{
3993 struct i40iw_qp *iwqp;
3994 struct i40iw_device *iwdev;
3995 struct i40iw_cm_node *cm_node;
3996 struct i40iw_sc_dev *dev;
3997 struct ib_qp_attr attr;
3998 struct iw_cm_id *cm_id;
3999 int status;
4000 bool read0;
4001
4002 cm_node = event->cm_node;
4003 cm_id = cm_node->cm_id;
4004 iwqp = (struct i40iw_qp *)cm_id->provider_data;
4005 iwdev = to_iwdev(iwqp->ibqp.device);
4006 dev = &iwdev->sc_dev;
4007
4008 if (iwqp->destroyed) {
4009 status = -ETIMEDOUT;
4010 goto error;
4011 }
4012 i40iw_cm_init_tsa_conn(iwqp, cm_node);
4013 read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
4014 if (iwqp->page)
4015 iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
4016 dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
4017 if (iwqp->page)
4018 kunmap(iwqp->page);
4019 status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
4020 if (status)
4021 i40iw_pr_err("send cm event\n");
4022
4023 memset(&attr, 0, sizeof(attr));
4024 attr.qp_state = IB_QPS_RTS;
4025 cm_node->qhash_set = false;
4026 i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4027
4028 cm_node->accelerated = 1;
4029 if (cm_node->accept_pend) {
4030 if (!cm_node->listener)
4031 i40iw_pr_err("listener is null for passive node\n");
4032 atomic_dec(&cm_node->listener->pend_accepts_cnt);
4033 cm_node->accept_pend = 0;
4034 }
4035 return;
4036
4037error:
4038 iwqp->cm_id = NULL;
4039 cm_id->provider_data = NULL;
4040 i40iw_send_cm_event(event->cm_node,
4041 cm_id,
4042 IW_CM_EVENT_CONNECT_REPLY,
4043 status);
4044 cm_id->rem_ref(cm_id);
4045 i40iw_rem_ref_cm_node(event->cm_node);
4046}
4047
4048/**
4049 * i40iw_cm_event_reset - handle reset
4050 * @event: the info for cm_node of connection
4051 */
4052static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
4053{
4054 struct i40iw_cm_node *cm_node = event->cm_node;
4055 struct iw_cm_id *cm_id = cm_node->cm_id;
4056 struct i40iw_qp *iwqp;
4057
4058 if (!cm_id)
4059 return;
4060
4061 iwqp = cm_id->provider_data;
4062 if (!iwqp)
4063 return;
4064
4065 i40iw_debug(cm_node->dev,
4066 I40IW_DEBUG_CM,
4067 "reset event %p - cm_id = %p\n",
4068 event->cm_node, cm_id);
4069 iwqp->cm_id = NULL;
4070
4071 i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
4072 i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
4073}
4074
4075/**
4076 * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
4077 * @work: pointer of cm event info.
4078 */
4079static void i40iw_cm_event_handler(struct work_struct *work)
4080{
4081 struct i40iw_cm_event *event = container_of(work,
4082 struct i40iw_cm_event,
4083 event_work);
4084 struct i40iw_cm_node *cm_node;
4085
4086 if (!event || !event->cm_node || !event->cm_node->cm_core)
4087 return;
4088
4089 cm_node = event->cm_node;
4090
4091 switch (event->type) {
4092 case I40IW_CM_EVENT_MPA_REQ:
4093 i40iw_send_cm_event(cm_node,
4094 cm_node->cm_id,
4095 IW_CM_EVENT_CONNECT_REQUEST,
4096 0);
4097 break;
4098 case I40IW_CM_EVENT_RESET:
4099 i40iw_cm_event_reset(event);
4100 break;
4101 case I40IW_CM_EVENT_CONNECTED:
4102 if (!event->cm_node->cm_id ||
4103 (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
4104 break;
4105 i40iw_cm_event_connected(event);
4106 break;
4107 case I40IW_CM_EVENT_MPA_REJECT:
4108 if (!event->cm_node->cm_id ||
4109 (cm_node->state == I40IW_CM_STATE_OFFLOADED))
4110 break;
4111 i40iw_send_cm_event(cm_node,
4112 cm_node->cm_id,
4113 IW_CM_EVENT_CONNECT_REPLY,
4114 -ECONNREFUSED);
4115 break;
4116 case I40IW_CM_EVENT_ABORTED:
4117 if (!event->cm_node->cm_id ||
4118 (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
4119 break;
4120 i40iw_event_connect_error(event);
4121 break;
4122 default:
4123 i40iw_pr_err("event type = %d\n", event->type);
4124 break;
4125 }
4126
4127 event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
4128 i40iw_rem_ref_cm_node(event->cm_node);
4129 kfree(event);
4130}
4131
4132/**
4133 * i40iw_cm_post_event - queue event request for worker thread
4134 * @event: cm node's info for up event call
4135 */
4136static void i40iw_cm_post_event(struct i40iw_cm_event *event)
4137{
4138 atomic_inc(&event->cm_node->ref_count);
4139 event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
4140 INIT_WORK(&event->event_work, i40iw_cm_event_handler);
4141
4142 queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
4143}