blob: a8bd5f8edec60754052a1f4ca7696aee87a6d195 [file] [log] [blame]
Kalderon, Michal67b40dc2017-07-02 10:29:22 +03001/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "qed_cxt.h"
33#include "qed_hw.h"
34#include "qed_rdma.h"
35#include "qed_reg_addr.h"
36#include "qed_sp.h"
37
38#define QED_IWARP_ORD_DEFAULT 32
39#define QED_IWARP_IRD_DEFAULT 32
40#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
41#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
42#define QED_IWARP_TS_EN BIT(0)
43#define QED_IWARP_PARAM_CRC_NEEDED (1)
44#define QED_IWARP_PARAM_P2P (1)
45
46static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
47 u8 fw_event_code, u16 echo,
48 union event_ring_data *data,
49 u8 fw_return_code);
50
51/* Override devinfo with iWARP specific values */
52void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
53{
54 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
55
56 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
57 dev->max_qp = min_t(u32,
58 IWARP_MAX_QPS,
59 p_hwfn->p_rdma_info->num_qps);
60
61 dev->max_cq = dev->max_qp;
62
63 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
64 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
65}
66
67void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
68{
69 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
70 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
71 p_hwfn->b_rdma_enabled_in_prs = true;
72}
73
74static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
75{
76 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
77
78 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
79 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
80 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
81}
82
83static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
84{
85 int rc;
86
87 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
88 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
89 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
90 if (rc) {
91 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
92 return rc;
93 }
94 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
95
96 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
97 if (rc)
98 qed_iwarp_cid_cleaned(p_hwfn, *cid);
99
100 return rc;
101}
102
103int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
104 struct qed_rdma_qp *qp,
105 struct qed_rdma_create_qp_out_params *out_params)
106{
107 struct iwarp_create_qp_ramrod_data *p_ramrod;
108 struct qed_sp_init_data init_data;
109 struct qed_spq_entry *p_ent;
110 u16 physical_queue;
111 u32 cid;
112 int rc;
113
114 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
115 IWARP_SHARED_QUEUE_PAGE_SIZE,
116 &qp->shared_queue_phys_addr,
117 GFP_KERNEL);
118 if (!qp->shared_queue)
119 return -ENOMEM;
120
121 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
122 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
123 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
124 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
125 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
126 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
127 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
128 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
129
130 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
131 if (rc)
132 goto err1;
133
134 qp->icid = (u16)cid;
135
136 memset(&init_data, 0, sizeof(init_data));
137 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
138 init_data.cid = qp->icid;
139 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
140
141 rc = qed_sp_init_request(p_hwfn, &p_ent,
142 IWARP_RAMROD_CMD_ID_CREATE_QP,
143 PROTOCOLID_IWARP, &init_data);
144 if (rc)
145 goto err2;
146
147 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
148
149 SET_FIELD(p_ramrod->flags,
150 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
151 qp->fmr_and_reserved_lkey);
152
153 SET_FIELD(p_ramrod->flags,
154 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
155
156 SET_FIELD(p_ramrod->flags,
157 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
158 qp->incoming_rdma_read_en);
159
160 SET_FIELD(p_ramrod->flags,
161 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
162 qp->incoming_rdma_write_en);
163
164 SET_FIELD(p_ramrod->flags,
165 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
166 qp->incoming_atomic_en);
167
168 SET_FIELD(p_ramrod->flags,
169 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
170
171 p_ramrod->pd = qp->pd;
172 p_ramrod->sq_num_pages = qp->sq_num_pages;
173 p_ramrod->rq_num_pages = qp->rq_num_pages;
174
175 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
176 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
177
178 p_ramrod->cq_cid_for_sq =
179 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
180 p_ramrod->cq_cid_for_rq =
181 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
182
183 p_ramrod->dpi = cpu_to_le16(qp->dpi);
184
185 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
186 p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
187 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
188 p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
189
190 rc = qed_spq_post(p_hwfn, p_ent, NULL);
191 if (rc)
192 goto err2;
193
194 return rc;
195
196err2:
197 qed_iwarp_cid_cleaned(p_hwfn, cid);
198err1:
199 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
200 IWARP_SHARED_QUEUE_PAGE_SIZE,
201 qp->shared_queue, qp->shared_queue_phys_addr);
202
203 return rc;
204}
205
206static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
207{
208 struct iwarp_modify_qp_ramrod_data *p_ramrod;
209 struct qed_sp_init_data init_data;
210 struct qed_spq_entry *p_ent;
211 int rc;
212
213 /* Get SPQ entry */
214 memset(&init_data, 0, sizeof(init_data));
215 init_data.cid = qp->icid;
216 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
217 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
218
219 rc = qed_sp_init_request(p_hwfn, &p_ent,
220 IWARP_RAMROD_CMD_ID_MODIFY_QP,
221 p_hwfn->p_rdma_info->proto, &init_data);
222 if (rc)
223 return rc;
224
225 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
226 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
227 0x1);
228 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
229 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
230 else
231 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
232
233 rc = qed_spq_post(p_hwfn, p_ent, NULL);
234
235 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
236
237 return rc;
238}
239
240enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
241{
242 switch (state) {
243 case QED_ROCE_QP_STATE_RESET:
244 case QED_ROCE_QP_STATE_INIT:
245 case QED_ROCE_QP_STATE_RTR:
246 return QED_IWARP_QP_STATE_IDLE;
247 case QED_ROCE_QP_STATE_RTS:
248 return QED_IWARP_QP_STATE_RTS;
249 case QED_ROCE_QP_STATE_SQD:
250 return QED_IWARP_QP_STATE_CLOSING;
251 case QED_ROCE_QP_STATE_ERR:
252 return QED_IWARP_QP_STATE_ERROR;
253 case QED_ROCE_QP_STATE_SQE:
254 return QED_IWARP_QP_STATE_TERMINATE;
255 default:
256 return QED_IWARP_QP_STATE_ERROR;
257 }
258}
259
260static enum qed_roce_qp_state
261qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
262{
263 switch (state) {
264 case QED_IWARP_QP_STATE_IDLE:
265 return QED_ROCE_QP_STATE_INIT;
266 case QED_IWARP_QP_STATE_RTS:
267 return QED_ROCE_QP_STATE_RTS;
268 case QED_IWARP_QP_STATE_TERMINATE:
269 return QED_ROCE_QP_STATE_SQE;
270 case QED_IWARP_QP_STATE_CLOSING:
271 return QED_ROCE_QP_STATE_SQD;
272 case QED_IWARP_QP_STATE_ERROR:
273 return QED_ROCE_QP_STATE_ERR;
274 default:
275 return QED_ROCE_QP_STATE_ERR;
276 }
277}
278
279const char *iwarp_state_names[] = {
280 "IDLE",
281 "RTS",
282 "TERMINATE",
283 "CLOSING",
284 "ERROR",
285};
286
287int
288qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
289 struct qed_rdma_qp *qp,
290 enum qed_iwarp_qp_state new_state, bool internal)
291{
292 enum qed_iwarp_qp_state prev_iw_state;
293 bool modify_fw = false;
294 int rc = 0;
295
296 /* modify QP can be called from upper-layer or as a result of async
297 * RST/FIN... therefore need to protect
298 */
299 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
300 prev_iw_state = qp->iwarp_state;
301
302 if (prev_iw_state == new_state) {
303 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
304 return 0;
305 }
306
307 switch (prev_iw_state) {
308 case QED_IWARP_QP_STATE_IDLE:
309 switch (new_state) {
310 case QED_IWARP_QP_STATE_RTS:
311 qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
312 break;
313 case QED_IWARP_QP_STATE_ERROR:
314 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
315 if (!internal)
316 modify_fw = true;
317 break;
318 default:
319 break;
320 }
321 break;
322 case QED_IWARP_QP_STATE_RTS:
323 switch (new_state) {
324 case QED_IWARP_QP_STATE_CLOSING:
325 if (!internal)
326 modify_fw = true;
327
328 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
329 break;
330 case QED_IWARP_QP_STATE_ERROR:
331 if (!internal)
332 modify_fw = true;
333 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
334 break;
335 default:
336 break;
337 }
338 break;
339 case QED_IWARP_QP_STATE_ERROR:
340 switch (new_state) {
341 case QED_IWARP_QP_STATE_IDLE:
342
343 qp->iwarp_state = new_state;
344 break;
345 case QED_IWARP_QP_STATE_CLOSING:
346 /* could happen due to race... do nothing.... */
347 break;
348 default:
349 rc = -EINVAL;
350 }
351 break;
352 case QED_IWARP_QP_STATE_TERMINATE:
353 case QED_IWARP_QP_STATE_CLOSING:
354 qp->iwarp_state = new_state;
355 break;
356 default:
357 break;
358 }
359
360 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
361 qp->icid,
362 iwarp_state_names[prev_iw_state],
363 iwarp_state_names[qp->iwarp_state],
364 internal ? "internal" : "");
365
366 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
367
368 if (modify_fw)
369 rc = qed_iwarp_modify_fw(p_hwfn, qp);
370
371 return rc;
372}
373
374int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
375{
376 struct qed_sp_init_data init_data;
377 struct qed_spq_entry *p_ent;
378 int rc;
379
380 /* Get SPQ entry */
381 memset(&init_data, 0, sizeof(init_data));
382 init_data.cid = qp->icid;
383 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
384 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
385
386 rc = qed_sp_init_request(p_hwfn, &p_ent,
387 IWARP_RAMROD_CMD_ID_DESTROY_QP,
388 p_hwfn->p_rdma_info->proto, &init_data);
389 if (rc)
390 return rc;
391
392 rc = qed_spq_post(p_hwfn, p_ent, NULL);
393
394 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
395
396 return rc;
397}
398
399int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
400{
401 int rc = 0;
402
403 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
404 rc = qed_iwarp_modify_qp(p_hwfn, qp,
405 QED_IWARP_QP_STATE_ERROR, false);
406 if (rc)
407 return rc;
408 }
409
410 rc = qed_iwarp_fw_destroy(p_hwfn, qp);
411
412 if (qp->shared_queue)
413 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
414 IWARP_SHARED_QUEUE_PAGE_SIZE,
415 qp->shared_queue, qp->shared_queue_phys_addr);
416
417 return rc;
418}
419
420#define QED_IWARP_MAX_CID_CLEAN_TIME 100
421#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
422
423/* This function waits for all the bits of a bmap to be cleared, as long as
424 * there is progress ( i.e. the number of bits left to be cleared decreases )
425 * the function continues.
426 */
427static int
428qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
429{
430 int prev_weight = 0;
431 int wait_count = 0;
432 int weight = 0;
433
434 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
435 prev_weight = weight;
436
437 while (weight) {
438 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
439
440 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
441
442 if (prev_weight == weight) {
443 wait_count++;
444 } else {
445 prev_weight = weight;
446 wait_count = 0;
447 }
448
449 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
450 DP_NOTICE(p_hwfn,
451 "%s bitmap wait timed out (%d cids pending)\n",
452 bmap->name, weight);
453 return -EBUSY;
454 }
455 }
456 return 0;
457}
458
459static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
460{
461 /* Now wait for all cids to be completed */
462 return qed_iwarp_wait_cid_map_cleared(p_hwfn,
463 &p_hwfn->p_rdma_info->cid_map);
464}
465
466int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
467{
468 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
469
470 return 0;
471}
472
473void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
474{
475}
476
477int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
478 struct qed_rdma_start_in_params *params)
479{
480 struct qed_iwarp_info *iwarp_info;
481 u32 rcv_wnd_size;
482 int rc = 0;
483
484 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
485
486 iwarp_info->tcp_flags = QED_IWARP_TS_EN;
487 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
488
489 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
490 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
491 ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
492 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
493 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
494
495 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
496
497 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
498
499 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
500 qed_iwarp_async_event);
501
502 return rc;
503}
504
505int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
506{
507 int rc;
508
509 rc = qed_iwarp_wait_for_all_cids(p_hwfn);
510 if (rc)
511 return rc;
512
513 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
514
515 return 0;
516}
517
518static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
519 u8 fw_event_code, u16 echo,
520 union event_ring_data *data,
521 u8 fw_return_code)
522{
523 return 0;
524}
525
526void
527qed_iwarp_query_qp(struct qed_rdma_qp *qp,
528 struct qed_rdma_query_qp_out_params *out_params)
529{
530 out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
531}