blob: f3b4b32100f5e91508792efe3392b2463b019e37 [file] [log] [blame]
Kalderon, Michal67b40dc2017-07-02 10:29:22 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +030032#include <linux/if_ether.h>
33#include <linux/if_vlan.h>
Kalderon, Michal67b40dc2017-07-02 10:29:22 +030034#include "qed_cxt.h"
35#include "qed_hw.h"
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +030036#include "qed_ll2.h"
Kalderon, Michal67b40dc2017-07-02 10:29:22 +030037#include "qed_rdma.h"
38#include "qed_reg_addr.h"
39#include "qed_sp.h"
40
41#define QED_IWARP_ORD_DEFAULT 32
42#define QED_IWARP_IRD_DEFAULT 32
43#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
44#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
45#define QED_IWARP_TS_EN BIT(0)
46#define QED_IWARP_PARAM_CRC_NEEDED (1)
47#define QED_IWARP_PARAM_P2P (1)
48
49static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
50 u8 fw_event_code, u16 echo,
51 union event_ring_data *data,
52 u8 fw_return_code);
53
54/* Override devinfo with iWARP specific values */
55void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
56{
57 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
58
59 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
60 dev->max_qp = min_t(u32,
61 IWARP_MAX_QPS,
62 p_hwfn->p_rdma_info->num_qps);
63
64 dev->max_cq = dev->max_qp;
65
66 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
67 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
68}
69
70void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
71{
72 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
73 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
74 p_hwfn->b_rdma_enabled_in_prs = true;
75}
76
77static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
78{
79 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
80
81 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
82 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
83 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
84}
85
86static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
87{
88 int rc;
89
90 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
91 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
92 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
93 if (rc) {
94 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
95 return rc;
96 }
97 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
98
99 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
100 if (rc)
101 qed_iwarp_cid_cleaned(p_hwfn, *cid);
102
103 return rc;
104}
105
106int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
107 struct qed_rdma_qp *qp,
108 struct qed_rdma_create_qp_out_params *out_params)
109{
110 struct iwarp_create_qp_ramrod_data *p_ramrod;
111 struct qed_sp_init_data init_data;
112 struct qed_spq_entry *p_ent;
113 u16 physical_queue;
114 u32 cid;
115 int rc;
116
117 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
118 IWARP_SHARED_QUEUE_PAGE_SIZE,
119 &qp->shared_queue_phys_addr,
120 GFP_KERNEL);
121 if (!qp->shared_queue)
122 return -ENOMEM;
123
124 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
125 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
126 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
127 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
128 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
129 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
130 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
131 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
132
133 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
134 if (rc)
135 goto err1;
136
137 qp->icid = (u16)cid;
138
139 memset(&init_data, 0, sizeof(init_data));
140 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
141 init_data.cid = qp->icid;
142 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
143
144 rc = qed_sp_init_request(p_hwfn, &p_ent,
145 IWARP_RAMROD_CMD_ID_CREATE_QP,
146 PROTOCOLID_IWARP, &init_data);
147 if (rc)
148 goto err2;
149
150 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
151
152 SET_FIELD(p_ramrod->flags,
153 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
154 qp->fmr_and_reserved_lkey);
155
156 SET_FIELD(p_ramrod->flags,
157 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
158
159 SET_FIELD(p_ramrod->flags,
160 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
161 qp->incoming_rdma_read_en);
162
163 SET_FIELD(p_ramrod->flags,
164 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
165 qp->incoming_rdma_write_en);
166
167 SET_FIELD(p_ramrod->flags,
168 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
169 qp->incoming_atomic_en);
170
171 SET_FIELD(p_ramrod->flags,
172 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
173
174 p_ramrod->pd = qp->pd;
175 p_ramrod->sq_num_pages = qp->sq_num_pages;
176 p_ramrod->rq_num_pages = qp->rq_num_pages;
177
178 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
179 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
180
181 p_ramrod->cq_cid_for_sq =
182 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
183 p_ramrod->cq_cid_for_rq =
184 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
185
186 p_ramrod->dpi = cpu_to_le16(qp->dpi);
187
188 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
189 p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
190 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
191 p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
192
193 rc = qed_spq_post(p_hwfn, p_ent, NULL);
194 if (rc)
195 goto err2;
196
197 return rc;
198
199err2:
200 qed_iwarp_cid_cleaned(p_hwfn, cid);
201err1:
202 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
203 IWARP_SHARED_QUEUE_PAGE_SIZE,
204 qp->shared_queue, qp->shared_queue_phys_addr);
205
206 return rc;
207}
208
209static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
210{
211 struct iwarp_modify_qp_ramrod_data *p_ramrod;
212 struct qed_sp_init_data init_data;
213 struct qed_spq_entry *p_ent;
214 int rc;
215
216 /* Get SPQ entry */
217 memset(&init_data, 0, sizeof(init_data));
218 init_data.cid = qp->icid;
219 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
220 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
221
222 rc = qed_sp_init_request(p_hwfn, &p_ent,
223 IWARP_RAMROD_CMD_ID_MODIFY_QP,
224 p_hwfn->p_rdma_info->proto, &init_data);
225 if (rc)
226 return rc;
227
228 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
229 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
230 0x1);
231 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
232 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
233 else
234 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
235
236 rc = qed_spq_post(p_hwfn, p_ent, NULL);
237
238 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
239
240 return rc;
241}
242
243enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
244{
245 switch (state) {
246 case QED_ROCE_QP_STATE_RESET:
247 case QED_ROCE_QP_STATE_INIT:
248 case QED_ROCE_QP_STATE_RTR:
249 return QED_IWARP_QP_STATE_IDLE;
250 case QED_ROCE_QP_STATE_RTS:
251 return QED_IWARP_QP_STATE_RTS;
252 case QED_ROCE_QP_STATE_SQD:
253 return QED_IWARP_QP_STATE_CLOSING;
254 case QED_ROCE_QP_STATE_ERR:
255 return QED_IWARP_QP_STATE_ERROR;
256 case QED_ROCE_QP_STATE_SQE:
257 return QED_IWARP_QP_STATE_TERMINATE;
258 default:
259 return QED_IWARP_QP_STATE_ERROR;
260 }
261}
262
263static enum qed_roce_qp_state
264qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
265{
266 switch (state) {
267 case QED_IWARP_QP_STATE_IDLE:
268 return QED_ROCE_QP_STATE_INIT;
269 case QED_IWARP_QP_STATE_RTS:
270 return QED_ROCE_QP_STATE_RTS;
271 case QED_IWARP_QP_STATE_TERMINATE:
272 return QED_ROCE_QP_STATE_SQE;
273 case QED_IWARP_QP_STATE_CLOSING:
274 return QED_ROCE_QP_STATE_SQD;
275 case QED_IWARP_QP_STATE_ERROR:
276 return QED_ROCE_QP_STATE_ERR;
277 default:
278 return QED_ROCE_QP_STATE_ERR;
279 }
280}
281
282const char *iwarp_state_names[] = {
283 "IDLE",
284 "RTS",
285 "TERMINATE",
286 "CLOSING",
287 "ERROR",
288};
289
290int
291qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
292 struct qed_rdma_qp *qp,
293 enum qed_iwarp_qp_state new_state, bool internal)
294{
295 enum qed_iwarp_qp_state prev_iw_state;
296 bool modify_fw = false;
297 int rc = 0;
298
299 /* modify QP can be called from upper-layer or as a result of async
300 * RST/FIN... therefore need to protect
301 */
302 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
303 prev_iw_state = qp->iwarp_state;
304
305 if (prev_iw_state == new_state) {
306 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
307 return 0;
308 }
309
310 switch (prev_iw_state) {
311 case QED_IWARP_QP_STATE_IDLE:
312 switch (new_state) {
313 case QED_IWARP_QP_STATE_RTS:
314 qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
315 break;
316 case QED_IWARP_QP_STATE_ERROR:
317 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
318 if (!internal)
319 modify_fw = true;
320 break;
321 default:
322 break;
323 }
324 break;
325 case QED_IWARP_QP_STATE_RTS:
326 switch (new_state) {
327 case QED_IWARP_QP_STATE_CLOSING:
328 if (!internal)
329 modify_fw = true;
330
331 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
332 break;
333 case QED_IWARP_QP_STATE_ERROR:
334 if (!internal)
335 modify_fw = true;
336 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
337 break;
338 default:
339 break;
340 }
341 break;
342 case QED_IWARP_QP_STATE_ERROR:
343 switch (new_state) {
344 case QED_IWARP_QP_STATE_IDLE:
345
346 qp->iwarp_state = new_state;
347 break;
348 case QED_IWARP_QP_STATE_CLOSING:
349 /* could happen due to race... do nothing.... */
350 break;
351 default:
352 rc = -EINVAL;
353 }
354 break;
355 case QED_IWARP_QP_STATE_TERMINATE:
356 case QED_IWARP_QP_STATE_CLOSING:
357 qp->iwarp_state = new_state;
358 break;
359 default:
360 break;
361 }
362
363 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
364 qp->icid,
365 iwarp_state_names[prev_iw_state],
366 iwarp_state_names[qp->iwarp_state],
367 internal ? "internal" : "");
368
369 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
370
371 if (modify_fw)
372 rc = qed_iwarp_modify_fw(p_hwfn, qp);
373
374 return rc;
375}
376
377int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
378{
379 struct qed_sp_init_data init_data;
380 struct qed_spq_entry *p_ent;
381 int rc;
382
383 /* Get SPQ entry */
384 memset(&init_data, 0, sizeof(init_data));
385 init_data.cid = qp->icid;
386 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
387 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
388
389 rc = qed_sp_init_request(p_hwfn, &p_ent,
390 IWARP_RAMROD_CMD_ID_DESTROY_QP,
391 p_hwfn->p_rdma_info->proto, &init_data);
392 if (rc)
393 return rc;
394
395 rc = qed_spq_post(p_hwfn, p_ent, NULL);
396
397 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
398
399 return rc;
400}
401
402int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
403{
404 int rc = 0;
405
406 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
407 rc = qed_iwarp_modify_qp(p_hwfn, qp,
408 QED_IWARP_QP_STATE_ERROR, false);
409 if (rc)
410 return rc;
411 }
412
413 rc = qed_iwarp_fw_destroy(p_hwfn, qp);
414
415 if (qp->shared_queue)
416 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
417 IWARP_SHARED_QUEUE_PAGE_SIZE,
418 qp->shared_queue, qp->shared_queue_phys_addr);
419
420 return rc;
421}
422
423#define QED_IWARP_MAX_CID_CLEAN_TIME 100
424#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
425
426/* This function waits for all the bits of a bmap to be cleared, as long as
427 * there is progress ( i.e. the number of bits left to be cleared decreases )
428 * the function continues.
429 */
430static int
431qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
432{
433 int prev_weight = 0;
434 int wait_count = 0;
435 int weight = 0;
436
437 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
438 prev_weight = weight;
439
440 while (weight) {
441 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
442
443 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
444
445 if (prev_weight == weight) {
446 wait_count++;
447 } else {
448 prev_weight = weight;
449 wait_count = 0;
450 }
451
452 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
453 DP_NOTICE(p_hwfn,
454 "%s bitmap wait timed out (%d cids pending)\n",
455 bmap->name, weight);
456 return -EBUSY;
457 }
458 }
459 return 0;
460}
461
462static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
463{
464 /* Now wait for all cids to be completed */
465 return qed_iwarp_wait_cid_map_cleared(p_hwfn,
466 &p_hwfn->p_rdma_info->cid_map);
467}
468
469int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
470{
471 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
472
473 return 0;
474}
475
476void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
477{
478}
479
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300480static int
481qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
482 struct qed_iwarp_ll2_buff *buf, u8 handle)
483{
484 int rc;
485
486 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
487 (u16)buf->buff_size, buf, 1);
488 if (rc) {
489 DP_NOTICE(p_hwfn,
490 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
491 rc, handle);
492 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
493 buf->data, buf->data_phys_addr);
494 kfree(buf);
495 }
496
497 return rc;
498}
499
500static void
501qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
502{
503 struct qed_iwarp_ll2_buff *buf = data->cookie;
504 struct qed_hwfn *p_hwfn = cxt;
505
506 if (GET_FIELD(data->parse_flags,
507 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
508 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
509 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
510 goto err;
511 }
512
513 /* Process SYN packet - added later on in series */
514
515err:
516 qed_iwarp_ll2_post_rx(p_hwfn, buf,
517 p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
518}
519
520static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
521 void *cookie, dma_addr_t rx_buf_addr,
522 bool b_last_packet)
523{
524 struct qed_iwarp_ll2_buff *buffer = cookie;
525 struct qed_hwfn *p_hwfn = cxt;
526
527 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
528 buffer->data, buffer->data_phys_addr);
529 kfree(buffer);
530}
531
532static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
533 void *cookie, dma_addr_t first_frag_addr,
534 bool b_last_fragment, bool b_last_packet)
535{
536 struct qed_iwarp_ll2_buff *buffer = cookie;
537 struct qed_hwfn *p_hwfn = cxt;
538
539 /* this was originally an rx packet, post it back */
540 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
541}
542
543static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
544 void *cookie, dma_addr_t first_frag_addr,
545 bool b_last_fragment, bool b_last_packet)
546{
547 struct qed_iwarp_ll2_buff *buffer = cookie;
548 struct qed_hwfn *p_hwfn = cxt;
549
550 if (!buffer)
551 return;
552
553 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
554 buffer->data, buffer->data_phys_addr);
555
556 kfree(buffer);
557}
558
559static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
560{
561 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
562 int rc = 0;
563
564 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
565 rc = qed_ll2_terminate_connection(p_hwfn,
566 iwarp_info->ll2_syn_handle);
567 if (rc)
568 DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
569
570 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
571 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
572 }
573
574 qed_llh_remove_mac_filter(p_hwfn,
575 p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
576 return rc;
577}
578
579static int
580qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
581 int num_rx_bufs, int buff_size, u8 ll2_handle)
582{
583 struct qed_iwarp_ll2_buff *buffer;
584 int rc = 0;
585 int i;
586
587 for (i = 0; i < num_rx_bufs; i++) {
588 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
589 if (!buffer) {
590 rc = -ENOMEM;
591 break;
592 }
593
594 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
595 buff_size,
596 &buffer->data_phys_addr,
597 GFP_KERNEL);
598 if (!buffer->data) {
599 kfree(buffer);
600 rc = -ENOMEM;
601 break;
602 }
603
604 buffer->buff_size = buff_size;
605 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
606 if (rc)
607 /* buffers will be deallocated by qed_ll2 */
608 break;
609 }
610 return rc;
611}
612
613#define QED_IWARP_MAX_BUF_SIZE(mtu) \
614 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
615 ETH_CACHE_LINE_SIZE)
616
617static int
618qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
619 struct qed_rdma_start_in_params *params,
620 struct qed_ptt *p_ptt)
621{
622 struct qed_iwarp_info *iwarp_info;
623 struct qed_ll2_acquire_data data;
624 struct qed_ll2_cbs cbs;
625 int rc = 0;
626
627 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
628 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
629
630 iwarp_info->max_mtu = params->max_mtu;
631
632 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
633
634 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
635 if (rc)
636 return rc;
637
638 /* Start SYN connection */
639 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
640 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
641 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
642 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
643 cbs.cookie = p_hwfn;
644
645 memset(&data, 0, sizeof(data));
646 data.input.conn_type = QED_LL2_TYPE_IWARP;
647 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
648 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
649 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
650 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
651 data.input.tx_tc = PKT_LB_TC;
652 data.input.tx_dest = QED_LL2_TX_DEST_LB;
653 data.p_connection_handle = &iwarp_info->ll2_syn_handle;
654 data.cbs = &cbs;
655
656 rc = qed_ll2_acquire_connection(p_hwfn, &data);
657 if (rc) {
658 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
659 qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
660 return rc;
661 }
662
663 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
664 if (rc) {
665 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
666 goto err;
667 }
668
669 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
670 QED_IWARP_LL2_SYN_RX_SIZE,
671 QED_IWARP_MAX_SYN_PKT_SIZE,
672 iwarp_info->ll2_syn_handle);
673 if (rc)
674 goto err;
675
676 return rc;
677err:
678 qed_iwarp_ll2_stop(p_hwfn, p_ptt);
679
680 return rc;
681}
682
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300683int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
684 struct qed_rdma_start_in_params *params)
685{
686 struct qed_iwarp_info *iwarp_info;
687 u32 rcv_wnd_size;
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300688
689 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
690
691 iwarp_info->tcp_flags = QED_IWARP_TS_EN;
692 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
693
694 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
695 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
696 ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
697 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
698 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
699
700 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
701
702 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
703
704 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
705 qed_iwarp_async_event);
706
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300707 return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300708}
709
710int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
711{
712 int rc;
713
714 rc = qed_iwarp_wait_for_all_cids(p_hwfn);
715 if (rc)
716 return rc;
717
718 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
719
Kalderon, Michalb5c29ca2017-07-02 10:29:25 +0300720 return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
Kalderon, Michal67b40dc2017-07-02 10:29:22 +0300721}
722
723static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
724 u8 fw_event_code, u16 echo,
725 union event_ring_data *data,
726 u8 fw_return_code)
727{
728 return 0;
729}
730
731void
732qed_iwarp_query_qp(struct qed_rdma_qp *qp,
733 struct qed_rdma_query_qp_out_params *out_params)
734{
735 out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
736}