blob: 2bab57c6bae84285c0fd7baedb0501caa3b07f2d [file] [log] [blame]
Kalderon, Michal67b40dc2017-07-02 10:29:22 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +030032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Kalderon, Michal65a91a62017-07-02 10:29:26 +030034#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/spinlock.h>
37#include <linux/tcp.h>
Kalderon, Michal67b40dc2017-07-02 10:29:22 +030038#include "qed_cxt.h"
39#include "qed_hw.h"
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +030040#include "qed_ll2.h"
Kalderon, Michal67b40dc2017-07-02 10:29:22 +030041#include "qed_rdma.h"
42#include "qed_reg_addr.h"
43#include "qed_sp.h"
44
45#define QED_IWARP_ORD_DEFAULT 32
46#define QED_IWARP_IRD_DEFAULT 32
47#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
48#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
49#define QED_IWARP_TS_EN BIT(0)
50#define QED_IWARP_PARAM_CRC_NEEDED (1)
51#define QED_IWARP_PARAM_P2P (1)
52
53static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
54 u8 fw_event_code, u16 echo,
55 union event_ring_data *data,
56 u8 fw_return_code);
57
58/* Override devinfo with iWARP specific values */
59void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
60{
61 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
62
63 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
64 dev->max_qp = min_t(u32,
65 IWARP_MAX_QPS,
66 p_hwfn->p_rdma_info->num_qps);
67
68 dev->max_cq = dev->max_qp;
69
70 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
71 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
72}
73
74void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
75{
76 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
77 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
78 p_hwfn->b_rdma_enabled_in_prs = true;
79}
80
81static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
82{
83 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
84
85 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
86 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
87 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
88}
89
90static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
91{
92 int rc;
93
94 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
95 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
96 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
97 if (rc) {
98 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
99 return rc;
100 }
101 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
102
103 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
104 if (rc)
105 qed_iwarp_cid_cleaned(p_hwfn, *cid);
106
107 return rc;
108}
109
110int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
111 struct qed_rdma_qp *qp,
112 struct qed_rdma_create_qp_out_params *out_params)
113{
114 struct iwarp_create_qp_ramrod_data *p_ramrod;
115 struct qed_sp_init_data init_data;
116 struct qed_spq_entry *p_ent;
117 u16 physical_queue;
118 u32 cid;
119 int rc;
120
121 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
122 IWARP_SHARED_QUEUE_PAGE_SIZE,
123 &qp->shared_queue_phys_addr,
124 GFP_KERNEL);
125 if (!qp->shared_queue)
126 return -ENOMEM;
127
128 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
129 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
130 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
131 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
132 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
133 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
134 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
135 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
136
137 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
138 if (rc)
139 goto err1;
140
141 qp->icid = (u16)cid;
142
143 memset(&init_data, 0, sizeof(init_data));
144 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
145 init_data.cid = qp->icid;
146 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
147
148 rc = qed_sp_init_request(p_hwfn, &p_ent,
149 IWARP_RAMROD_CMD_ID_CREATE_QP,
150 PROTOCOLID_IWARP, &init_data);
151 if (rc)
152 goto err2;
153
154 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
155
156 SET_FIELD(p_ramrod->flags,
157 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
158 qp->fmr_and_reserved_lkey);
159
160 SET_FIELD(p_ramrod->flags,
161 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
162
163 SET_FIELD(p_ramrod->flags,
164 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
165 qp->incoming_rdma_read_en);
166
167 SET_FIELD(p_ramrod->flags,
168 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
169 qp->incoming_rdma_write_en);
170
171 SET_FIELD(p_ramrod->flags,
172 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
173 qp->incoming_atomic_en);
174
175 SET_FIELD(p_ramrod->flags,
176 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
177
178 p_ramrod->pd = qp->pd;
179 p_ramrod->sq_num_pages = qp->sq_num_pages;
180 p_ramrod->rq_num_pages = qp->rq_num_pages;
181
182 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
183 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
184
185 p_ramrod->cq_cid_for_sq =
186 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
187 p_ramrod->cq_cid_for_rq =
188 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
189
190 p_ramrod->dpi = cpu_to_le16(qp->dpi);
191
192 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
193 p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
194 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
195 p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
196
197 rc = qed_spq_post(p_hwfn, p_ent, NULL);
198 if (rc)
199 goto err2;
200
201 return rc;
202
203err2:
204 qed_iwarp_cid_cleaned(p_hwfn, cid);
205err1:
206 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
207 IWARP_SHARED_QUEUE_PAGE_SIZE,
208 qp->shared_queue, qp->shared_queue_phys_addr);
209
210 return rc;
211}
212
213static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
214{
215 struct iwarp_modify_qp_ramrod_data *p_ramrod;
216 struct qed_sp_init_data init_data;
217 struct qed_spq_entry *p_ent;
218 int rc;
219
220 /* Get SPQ entry */
221 memset(&init_data, 0, sizeof(init_data));
222 init_data.cid = qp->icid;
223 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
224 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
225
226 rc = qed_sp_init_request(p_hwfn, &p_ent,
227 IWARP_RAMROD_CMD_ID_MODIFY_QP,
228 p_hwfn->p_rdma_info->proto, &init_data);
229 if (rc)
230 return rc;
231
232 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
233 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
234 0x1);
235 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
236 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
237 else
238 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
239
240 rc = qed_spq_post(p_hwfn, p_ent, NULL);
241
242 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
243
244 return rc;
245}
246
247enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
248{
249 switch (state) {
250 case QED_ROCE_QP_STATE_RESET:
251 case QED_ROCE_QP_STATE_INIT:
252 case QED_ROCE_QP_STATE_RTR:
253 return QED_IWARP_QP_STATE_IDLE;
254 case QED_ROCE_QP_STATE_RTS:
255 return QED_IWARP_QP_STATE_RTS;
256 case QED_ROCE_QP_STATE_SQD:
257 return QED_IWARP_QP_STATE_CLOSING;
258 case QED_ROCE_QP_STATE_ERR:
259 return QED_IWARP_QP_STATE_ERROR;
260 case QED_ROCE_QP_STATE_SQE:
261 return QED_IWARP_QP_STATE_TERMINATE;
262 default:
263 return QED_IWARP_QP_STATE_ERROR;
264 }
265}
266
267static enum qed_roce_qp_state
268qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
269{
270 switch (state) {
271 case QED_IWARP_QP_STATE_IDLE:
272 return QED_ROCE_QP_STATE_INIT;
273 case QED_IWARP_QP_STATE_RTS:
274 return QED_ROCE_QP_STATE_RTS;
275 case QED_IWARP_QP_STATE_TERMINATE:
276 return QED_ROCE_QP_STATE_SQE;
277 case QED_IWARP_QP_STATE_CLOSING:
278 return QED_ROCE_QP_STATE_SQD;
279 case QED_IWARP_QP_STATE_ERROR:
280 return QED_ROCE_QP_STATE_ERR;
281 default:
282 return QED_ROCE_QP_STATE_ERR;
283 }
284}
285
286const char *iwarp_state_names[] = {
287 "IDLE",
288 "RTS",
289 "TERMINATE",
290 "CLOSING",
291 "ERROR",
292};
293
294int
295qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
296 struct qed_rdma_qp *qp,
297 enum qed_iwarp_qp_state new_state, bool internal)
298{
299 enum qed_iwarp_qp_state prev_iw_state;
300 bool modify_fw = false;
301 int rc = 0;
302
303 /* modify QP can be called from upper-layer or as a result of async
304 * RST/FIN... therefore need to protect
305 */
306 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
307 prev_iw_state = qp->iwarp_state;
308
309 if (prev_iw_state == new_state) {
310 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
311 return 0;
312 }
313
314 switch (prev_iw_state) {
315 case QED_IWARP_QP_STATE_IDLE:
316 switch (new_state) {
317 case QED_IWARP_QP_STATE_RTS:
318 qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
319 break;
320 case QED_IWARP_QP_STATE_ERROR:
321 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
322 if (!internal)
323 modify_fw = true;
324 break;
325 default:
326 break;
327 }
328 break;
329 case QED_IWARP_QP_STATE_RTS:
330 switch (new_state) {
331 case QED_IWARP_QP_STATE_CLOSING:
332 if (!internal)
333 modify_fw = true;
334
335 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
336 break;
337 case QED_IWARP_QP_STATE_ERROR:
338 if (!internal)
339 modify_fw = true;
340 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
341 break;
342 default:
343 break;
344 }
345 break;
346 case QED_IWARP_QP_STATE_ERROR:
347 switch (new_state) {
348 case QED_IWARP_QP_STATE_IDLE:
349
350 qp->iwarp_state = new_state;
351 break;
352 case QED_IWARP_QP_STATE_CLOSING:
353 /* could happen due to race... do nothing.... */
354 break;
355 default:
356 rc = -EINVAL;
357 }
358 break;
359 case QED_IWARP_QP_STATE_TERMINATE:
360 case QED_IWARP_QP_STATE_CLOSING:
361 qp->iwarp_state = new_state;
362 break;
363 default:
364 break;
365 }
366
367 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
368 qp->icid,
369 iwarp_state_names[prev_iw_state],
370 iwarp_state_names[qp->iwarp_state],
371 internal ? "internal" : "");
372
373 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
374
375 if (modify_fw)
376 rc = qed_iwarp_modify_fw(p_hwfn, qp);
377
378 return rc;
379}
380
381int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
382{
383 struct qed_sp_init_data init_data;
384 struct qed_spq_entry *p_ent;
385 int rc;
386
387 /* Get SPQ entry */
388 memset(&init_data, 0, sizeof(init_data));
389 init_data.cid = qp->icid;
390 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
391 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
392
393 rc = qed_sp_init_request(p_hwfn, &p_ent,
394 IWARP_RAMROD_CMD_ID_DESTROY_QP,
395 p_hwfn->p_rdma_info->proto, &init_data);
396 if (rc)
397 return rc;
398
399 rc = qed_spq_post(p_hwfn, p_ent, NULL);
400
401 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
402
403 return rc;
404}
405
406int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
407{
408 int rc = 0;
409
410 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
411 rc = qed_iwarp_modify_qp(p_hwfn, qp,
412 QED_IWARP_QP_STATE_ERROR, false);
413 if (rc)
414 return rc;
415 }
416
417 rc = qed_iwarp_fw_destroy(p_hwfn, qp);
418
419 if (qp->shared_queue)
420 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
421 IWARP_SHARED_QUEUE_PAGE_SIZE,
422 qp->shared_queue, qp->shared_queue_phys_addr);
423
424 return rc;
425}
426
427#define QED_IWARP_MAX_CID_CLEAN_TIME 100
428#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
429
430/* This function waits for all the bits of a bmap to be cleared, as long as
431 * there is progress ( i.e. the number of bits left to be cleared decreases )
432 * the function continues.
433 */
434static int
435qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
436{
437 int prev_weight = 0;
438 int wait_count = 0;
439 int weight = 0;
440
441 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
442 prev_weight = weight;
443
444 while (weight) {
445 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
446
447 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
448
449 if (prev_weight == weight) {
450 wait_count++;
451 } else {
452 prev_weight = weight;
453 wait_count = 0;
454 }
455
456 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
457 DP_NOTICE(p_hwfn,
458 "%s bitmap wait timed out (%d cids pending)\n",
459 bmap->name, weight);
460 return -EBUSY;
461 }
462 }
463 return 0;
464}
465
466static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
467{
468 /* Now wait for all cids to be completed */
469 return qed_iwarp_wait_cid_map_cleared(p_hwfn,
470 &p_hwfn->p_rdma_info->cid_map);
471}
472
473int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
474{
475 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
476
477 return 0;
478}
479
480void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
481{
482}
483
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300484static void
485qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
486 struct qed_iwarp_cm_info *cm_info)
487{
488 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
489 cm_info->ip_version);
490
491 if (cm_info->ip_version == QED_TCP_IPV4)
492 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
493 "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
494 cm_info->remote_ip, cm_info->remote_port,
495 cm_info->local_ip, cm_info->local_port,
496 cm_info->vlan);
497 else
498 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
499 "remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n",
500 cm_info->remote_ip, cm_info->remote_port,
501 cm_info->local_ip, cm_info->local_port,
502 cm_info->vlan);
503
504 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
505 "private_data_len = %x ord = %d, ird = %d\n",
506 cm_info->private_data_len, cm_info->ord, cm_info->ird);
507}
508
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300509static int
510qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
511 struct qed_iwarp_ll2_buff *buf, u8 handle)
512{
513 int rc;
514
515 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
516 (u16)buf->buff_size, buf, 1);
517 if (rc) {
518 DP_NOTICE(p_hwfn,
519 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
520 rc, handle);
521 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
522 buf->data, buf->data_phys_addr);
523 kfree(buf);
524 }
525
526 return rc;
527}
528
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300529static struct qed_iwarp_listener *
530qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
531 struct qed_iwarp_cm_info *cm_info)
532{
533 struct qed_iwarp_listener *listener = NULL;
534 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
535 bool found = false;
536
537 qed_iwarp_print_cm_info(p_hwfn, cm_info);
538
539 list_for_each_entry(listener,
540 &p_hwfn->p_rdma_info->iwarp.listen_list,
541 list_entry) {
542 if (listener->port == cm_info->local_port) {
543 if (!memcmp(listener->ip_addr,
544 ip_zero, sizeof(ip_zero))) {
545 found = true;
546 break;
547 }
548
549 if (!memcmp(listener->ip_addr,
550 cm_info->local_ip,
551 sizeof(cm_info->local_ip)) &&
552 (listener->vlan == cm_info->vlan)) {
553 found = true;
554 break;
555 }
556 }
557 }
558
559 if (found) {
560 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
561 listener);
562 return listener;
563 }
564
565 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
566 return NULL;
567}
568
569static int
570qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
571 struct qed_iwarp_cm_info *cm_info,
572 void *buf,
573 u8 *remote_mac_addr,
574 u8 *local_mac_addr,
575 int *payload_len, int *tcp_start_offset)
576{
577 struct vlan_ethhdr *vethh;
578 bool vlan_valid = false;
579 struct ipv6hdr *ip6h;
580 struct ethhdr *ethh;
581 struct tcphdr *tcph;
582 struct iphdr *iph;
583 int eth_hlen;
584 int ip_hlen;
585 int eth_type;
586 int i;
587
588 ethh = buf;
589 eth_type = ntohs(ethh->h_proto);
590 if (eth_type == ETH_P_8021Q) {
591 vlan_valid = true;
592 vethh = (struct vlan_ethhdr *)ethh;
593 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
594 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
595 }
596
597 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
598
599 memcpy(remote_mac_addr, ethh->h_source, ETH_ALEN);
600
601 memcpy(local_mac_addr, ethh->h_dest, ETH_ALEN);
602
603 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
604 eth_type, ethh->h_source);
605
606 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
607 eth_hlen, ethh->h_dest);
608
609 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
610
611 if (eth_type == ETH_P_IP) {
612 cm_info->local_ip[0] = ntohl(iph->daddr);
613 cm_info->remote_ip[0] = ntohl(iph->saddr);
614 cm_info->ip_version = TCP_IPV4;
615
616 ip_hlen = (iph->ihl) * sizeof(u32);
617 *payload_len = ntohs(iph->tot_len) - ip_hlen;
618 } else if (eth_type == ETH_P_IPV6) {
619 ip6h = (struct ipv6hdr *)iph;
620 for (i = 0; i < 4; i++) {
621 cm_info->local_ip[i] =
622 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
623 cm_info->remote_ip[i] =
624 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
625 }
626 cm_info->ip_version = TCP_IPV6;
627
628 ip_hlen = sizeof(*ip6h);
629 *payload_len = ntohs(ip6h->payload_len);
630 } else {
631 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
632 return -EINVAL;
633 }
634
635 tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
636
637 if (!tcph->syn) {
638 DP_NOTICE(p_hwfn,
639 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
640 iph->ihl, tcph->source, tcph->dest);
641 return -EINVAL;
642 }
643
644 cm_info->local_port = ntohs(tcph->dest);
645 cm_info->remote_port = ntohs(tcph->source);
646
647 qed_iwarp_print_cm_info(p_hwfn, cm_info);
648
649 *tcp_start_offset = eth_hlen + ip_hlen;
650
651 return 0;
652}
653
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300654static void
655qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
656{
657 struct qed_iwarp_ll2_buff *buf = data->cookie;
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300658 struct qed_iwarp_listener *listener;
659 struct qed_ll2_tx_pkt_info tx_pkt;
660 struct qed_iwarp_cm_info cm_info;
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300661 struct qed_hwfn *p_hwfn = cxt;
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300662 u8 remote_mac_addr[ETH_ALEN];
663 u8 local_mac_addr[ETH_ALEN];
664 int tcp_start_offset;
665 u8 ll2_syn_handle;
666 int payload_len;
667 int rc;
668
669 memset(&cm_info, 0, sizeof(cm_info));
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300670
671 if (GET_FIELD(data->parse_flags,
672 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
673 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
674 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
675 goto err;
676 }
677
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300678 rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
679 data->u.placement_offset, remote_mac_addr,
680 local_mac_addr, &payload_len,
681 &tcp_start_offset);
682 if (rc)
683 goto err;
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300684
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300685 /* Check if there is a listener for this 4-tuple+vlan */
686 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
687 listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
688 if (!listener) {
689 DP_VERBOSE(p_hwfn,
690 QED_MSG_RDMA,
691 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
692 data->parse_flags, data->length.packet_length);
693
694 memset(&tx_pkt, 0, sizeof(tx_pkt));
695 tx_pkt.num_of_bds = 1;
696 tx_pkt.vlan = data->vlan;
697
698 if (GET_FIELD(data->parse_flags,
699 PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
700 SET_FIELD(tx_pkt.bd_flags,
701 CORE_TX_BD_DATA_VLAN_INSERTION, 1);
702
703 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
704 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
705 tx_pkt.first_frag = buf->data_phys_addr +
706 data->u.placement_offset;
707 tx_pkt.first_frag_len = data->length.packet_length;
708 tx_pkt.cookie = buf;
709
710 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
711 &tx_pkt, true);
712
713 if (rc) {
714 DP_NOTICE(p_hwfn,
715 "Can't post SYN back to chip rc=%d\n", rc);
716 goto err;
717 }
718 return;
719 }
720
721 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300722err:
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300723 qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300724}
725
726static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
727 void *cookie, dma_addr_t rx_buf_addr,
728 bool b_last_packet)
729{
730 struct qed_iwarp_ll2_buff *buffer = cookie;
731 struct qed_hwfn *p_hwfn = cxt;
732
733 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
734 buffer->data, buffer->data_phys_addr);
735 kfree(buffer);
736}
737
738static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
739 void *cookie, dma_addr_t first_frag_addr,
740 bool b_last_fragment, bool b_last_packet)
741{
742 struct qed_iwarp_ll2_buff *buffer = cookie;
743 struct qed_hwfn *p_hwfn = cxt;
744
745 /* this was originally an rx packet, post it back */
746 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
747}
748
749static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
750 void *cookie, dma_addr_t first_frag_addr,
751 bool b_last_fragment, bool b_last_packet)
752{
753 struct qed_iwarp_ll2_buff *buffer = cookie;
754 struct qed_hwfn *p_hwfn = cxt;
755
756 if (!buffer)
757 return;
758
759 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
760 buffer->data, buffer->data_phys_addr);
761
762 kfree(buffer);
763}
764
765static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
766{
767 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
768 int rc = 0;
769
770 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
771 rc = qed_ll2_terminate_connection(p_hwfn,
772 iwarp_info->ll2_syn_handle);
773 if (rc)
774 DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
775
776 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
777 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
778 }
779
780 qed_llh_remove_mac_filter(p_hwfn,
781 p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
782 return rc;
783}
784
785static int
786qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
787 int num_rx_bufs, int buff_size, u8 ll2_handle)
788{
789 struct qed_iwarp_ll2_buff *buffer;
790 int rc = 0;
791 int i;
792
793 for (i = 0; i < num_rx_bufs; i++) {
794 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
795 if (!buffer) {
796 rc = -ENOMEM;
797 break;
798 }
799
800 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
801 buff_size,
802 &buffer->data_phys_addr,
803 GFP_KERNEL);
804 if (!buffer->data) {
805 kfree(buffer);
806 rc = -ENOMEM;
807 break;
808 }
809
810 buffer->buff_size = buff_size;
811 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
812 if (rc)
813 /* buffers will be deallocated by qed_ll2 */
814 break;
815 }
816 return rc;
817}
818
819#define QED_IWARP_MAX_BUF_SIZE(mtu) \
820 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
821 ETH_CACHE_LINE_SIZE)
822
823static int
824qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
825 struct qed_rdma_start_in_params *params,
826 struct qed_ptt *p_ptt)
827{
828 struct qed_iwarp_info *iwarp_info;
829 struct qed_ll2_acquire_data data;
830 struct qed_ll2_cbs cbs;
831 int rc = 0;
832
833 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
834 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
835
836 iwarp_info->max_mtu = params->max_mtu;
837
838 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
839
840 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
841 if (rc)
842 return rc;
843
844 /* Start SYN connection */
845 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
846 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
847 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
848 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
849 cbs.cookie = p_hwfn;
850
851 memset(&data, 0, sizeof(data));
852 data.input.conn_type = QED_LL2_TYPE_IWARP;
853 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
854 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
855 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
856 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
857 data.input.tx_tc = PKT_LB_TC;
858 data.input.tx_dest = QED_LL2_TX_DEST_LB;
859 data.p_connection_handle = &iwarp_info->ll2_syn_handle;
860 data.cbs = &cbs;
861
862 rc = qed_ll2_acquire_connection(p_hwfn, &data);
863 if (rc) {
864 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
865 qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
866 return rc;
867 }
868
869 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
870 if (rc) {
871 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
872 goto err;
873 }
874
875 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
876 QED_IWARP_LL2_SYN_RX_SIZE,
877 QED_IWARP_MAX_SYN_PKT_SIZE,
878 iwarp_info->ll2_syn_handle);
879 if (rc)
880 goto err;
881
882 return rc;
883err:
884 qed_iwarp_ll2_stop(p_hwfn, p_ptt);
885
886 return rc;
887}
888
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300889int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
890 struct qed_rdma_start_in_params *params)
891{
892 struct qed_iwarp_info *iwarp_info;
893 u32 rcv_wnd_size;
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300894
895 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
896
897 iwarp_info->tcp_flags = QED_IWARP_TS_EN;
898 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
899
900 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
901 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
902 ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
903 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
904 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
905
906 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
907
908 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300909 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300910
911 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
912 qed_iwarp_async_event);
913
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300914 return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300915}
916
917int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
918{
919 int rc;
920
921 rc = qed_iwarp_wait_for_all_cids(p_hwfn);
922 if (rc)
923 return rc;
924
925 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
926
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300927 return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300928}
929
930static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
931 u8 fw_event_code, u16 echo,
932 union event_ring_data *data,
933 u8 fw_return_code)
934{
935 return 0;
936}
937
Kalderon, Michal65a91a62017-07-02 10:29:26 +0300938int
939qed_iwarp_create_listen(void *rdma_cxt,
940 struct qed_iwarp_listen_in *iparams,
941 struct qed_iwarp_listen_out *oparams)
942{
943 struct qed_hwfn *p_hwfn = rdma_cxt;
944 struct qed_iwarp_listener *listener;
945
946 listener = kzalloc(sizeof(*listener), GFP_KERNEL);
947 if (!listener)
948 return -ENOMEM;
949
950 listener->ip_version = iparams->ip_version;
951 memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
952 listener->port = iparams->port;
953 listener->vlan = iparams->vlan;
954
955 listener->event_cb = iparams->event_cb;
956 listener->cb_context = iparams->cb_context;
957 listener->max_backlog = iparams->max_backlog;
958 oparams->handle = listener;
959
960 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
961 list_add_tail(&listener->list_entry,
962 &p_hwfn->p_rdma_info->iwarp.listen_list);
963 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
964
965 DP_VERBOSE(p_hwfn,
966 QED_MSG_RDMA,
967 "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
968 listener->event_cb,
969 listener,
970 listener->ip_addr[0],
971 listener->ip_addr[1],
972 listener->ip_addr[2],
973 listener->ip_addr[3], listener->port, listener->vlan);
974
975 return 0;
976}
977
978int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
979{
980 struct qed_iwarp_listener *listener = handle;
981 struct qed_hwfn *p_hwfn = rdma_cxt;
982
983 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
984
985 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
986 list_del(&listener->list_entry);
987 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
988
989 kfree(listener);
990
991 return 0;
992}
993
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300994void
995qed_iwarp_query_qp(struct qed_rdma_qp *qp,
996 struct qed_rdma_query_qp_out_params *out_params)
997{
998 out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
999}