blob: 31e15f31bba57dc55896ca22353375d4d69065a7 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39#include <linux/interrupt.h>
40#include <linux/spinlock.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/pci.h>
44#include <linux/prefetch.h>
45
46#include "roce_hsi.h"
47
48#include "qplib_res.h"
49#include "qplib_rcfw.h"
50#include "qplib_sp.h"
51#include "qplib_fp.h"
52
53static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
Selvin Xavierf218d672017-06-29 12:28:15 -070054static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55
56static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
57{
58 qp->sq.condition = false;
59 qp->sq.send_phantom = false;
60 qp->sq.single = false;
61}
62
63/* Flush list */
64static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
65{
66 struct bnxt_qplib_cq *scq, *rcq;
67
68 scq = qp->scq;
69 rcq = qp->rcq;
70
71 if (!qp->sq.flushed) {
72 dev_dbg(&scq->hwq.pdev->dev,
73 "QPLIB: FP: Adding to SQ Flush list = %p",
74 qp);
75 bnxt_qplib_cancel_phantom_processing(qp);
76 list_add_tail(&qp->sq_flush, &scq->sqf_head);
77 qp->sq.flushed = true;
78 }
79 if (!qp->srq) {
80 if (!qp->rq.flushed) {
81 dev_dbg(&rcq->hwq.pdev->dev,
82 "QPLIB: FP: Adding to RQ Flush list = %p",
83 qp);
84 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
85 qp->rq.flushed = true;
86 }
87 }
88}
89
90void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
91 unsigned long *flags)
92 __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
93{
94 spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
95 if (qp->scq == qp->rcq)
96 __acquire(&qp->rcq->hwq.lock);
97 else
98 spin_lock(&qp->rcq->hwq.lock);
99}
100
101void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
102 unsigned long *flags)
103 __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
104{
105 if (qp->scq == qp->rcq)
106 __release(&qp->rcq->hwq.lock);
107 else
108 spin_unlock(&qp->rcq->hwq.lock);
109 spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
110}
111
112static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
113 struct bnxt_qplib_cq *cq)
114{
115 struct bnxt_qplib_cq *buddy_cq = NULL;
116
117 if (qp->scq == qp->rcq)
118 buddy_cq = NULL;
119 else if (qp->scq == cq)
120 buddy_cq = qp->rcq;
121 else
122 buddy_cq = qp->scq;
123 return buddy_cq;
124}
125
126static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
127 struct bnxt_qplib_cq *cq)
128 __acquires(&buddy_cq->hwq.lock)
129{
130 struct bnxt_qplib_cq *buddy_cq = NULL;
131
132 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
133 if (!buddy_cq)
134 __acquire(&cq->hwq.lock);
135 else
136 spin_lock(&buddy_cq->hwq.lock);
137}
138
139static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
140 struct bnxt_qplib_cq *cq)
141 __releases(&buddy_cq->hwq.lock)
142{
143 struct bnxt_qplib_cq *buddy_cq = NULL;
144
145 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
146 if (!buddy_cq)
147 __release(&cq->hwq.lock);
148 else
149 spin_unlock(&buddy_cq->hwq.lock);
150}
151
152void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
153{
154 unsigned long flags;
155
156 bnxt_qplib_acquire_cq_locks(qp, &flags);
157 __bnxt_qplib_add_flush_qp(qp);
158 bnxt_qplib_release_cq_locks(qp, &flags);
159}
160
161static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
162{
163 struct bnxt_qplib_cq *scq, *rcq;
164
165 scq = qp->scq;
166 rcq = qp->rcq;
167
168 if (qp->sq.flushed) {
169 qp->sq.flushed = false;
170 list_del(&qp->sq_flush);
171 }
172 if (!qp->srq) {
173 if (qp->rq.flushed) {
174 qp->rq.flushed = false;
175 list_del(&qp->rq_flush);
176 }
177 }
178}
179
180void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
181{
182 unsigned long flags;
183
184 bnxt_qplib_acquire_cq_locks(qp, &flags);
185 __clean_cq(qp->scq, (u64)(unsigned long)qp);
186 qp->sq.hwq.prod = 0;
187 qp->sq.hwq.cons = 0;
188 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
189 qp->rq.hwq.prod = 0;
190 qp->rq.hwq.cons = 0;
191
192 __bnxt_qplib_del_flush_qp(qp);
193 bnxt_qplib_release_cq_locks(qp, &flags);
194}
195
196static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
197{
198 struct bnxt_qplib_nq_work *nq_work =
199 container_of(work, struct bnxt_qplib_nq_work, work);
200
201 struct bnxt_qplib_cq *cq = nq_work->cq;
202 struct bnxt_qplib_nq *nq = nq_work->nq;
203
204 if (cq && nq) {
205 spin_lock_bh(&cq->compl_lock);
206 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
207 dev_dbg(&nq->pdev->dev,
208 "%s:Trigger cq = %p event nq = %p\n",
209 __func__, cq, nq);
210 nq->cqn_handler(nq, cq);
211 }
212 spin_unlock_bh(&cq->compl_lock);
213 }
214 kfree(nq_work);
215}
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800216
217static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
218 struct bnxt_qplib_qp *qp)
219{
220 struct bnxt_qplib_q *rq = &qp->rq;
221 struct bnxt_qplib_q *sq = &qp->sq;
222
223 if (qp->rq_hdr_buf)
224 dma_free_coherent(&res->pdev->dev,
225 rq->hwq.max_elements * qp->rq_hdr_buf_size,
226 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
227 if (qp->sq_hdr_buf)
228 dma_free_coherent(&res->pdev->dev,
229 sq->hwq.max_elements * qp->sq_hdr_buf_size,
230 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
231 qp->rq_hdr_buf = NULL;
232 qp->sq_hdr_buf = NULL;
233 qp->rq_hdr_buf_map = 0;
234 qp->sq_hdr_buf_map = 0;
235 qp->sq_hdr_buf_size = 0;
236 qp->rq_hdr_buf_size = 0;
237}
238
239static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
240 struct bnxt_qplib_qp *qp)
241{
242 struct bnxt_qplib_q *rq = &qp->rq;
243 struct bnxt_qplib_q *sq = &qp->rq;
244 int rc = 0;
245
246 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
247 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
248 sq->hwq.max_elements *
249 qp->sq_hdr_buf_size,
250 &qp->sq_hdr_buf_map, GFP_KERNEL);
251 if (!qp->sq_hdr_buf) {
252 rc = -ENOMEM;
253 dev_err(&res->pdev->dev,
254 "QPLIB: Failed to create sq_hdr_buf");
255 goto fail;
256 }
257 }
258
259 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
260 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
261 rq->hwq.max_elements *
262 qp->rq_hdr_buf_size,
263 &qp->rq_hdr_buf_map,
264 GFP_KERNEL);
265 if (!qp->rq_hdr_buf) {
266 rc = -ENOMEM;
267 dev_err(&res->pdev->dev,
268 "QPLIB: Failed to create rq_hdr_buf");
269 goto fail;
270 }
271 }
272 return 0;
273
274fail:
275 bnxt_qplib_free_qp_hdr_buf(res, qp);
276 return rc;
277}
278
279static void bnxt_qplib_service_nq(unsigned long data)
280{
281 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
282 struct bnxt_qplib_hwq *hwq = &nq->hwq;
283 struct nq_base *nqe, **nq_ptr;
Selvin Xavierf218d672017-06-29 12:28:15 -0700284 struct bnxt_qplib_cq *cq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800285 int num_cqne_processed = 0;
286 u32 sw_cons, raw_cons;
287 u16 type;
288 int budget = nq->budget;
289 u64 q_handle;
290
291 /* Service the NQ until empty */
292 raw_cons = hwq->cons;
293 while (budget--) {
294 sw_cons = HWQ_CMP(raw_cons, hwq);
295 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
296 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
297 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
298 break;
299
300 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
301 switch (type) {
302 case NQ_BASE_TYPE_CQ_NOTIFICATION:
303 {
304 struct nq_cn *nqcne = (struct nq_cn *)nqe;
305
306 q_handle = le32_to_cpu(nqcne->cq_handle_low);
307 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
308 << 32;
Selvin Xavierf218d672017-06-29 12:28:15 -0700309 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
310 bnxt_qplib_arm_cq_enable(cq);
311 spin_lock_bh(&cq->compl_lock);
312 atomic_set(&cq->arm_state, 0);
313 if (!nq->cqn_handler(nq, (cq)))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800314 num_cqne_processed++;
315 else
316 dev_warn(&nq->pdev->dev,
317 "QPLIB: cqn - type 0x%x not handled",
318 type);
Selvin Xavierf218d672017-06-29 12:28:15 -0700319 spin_unlock_bh(&cq->compl_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800320 break;
321 }
322 case NQ_BASE_TYPE_DBQ_EVENT:
323 break;
324 default:
325 dev_warn(&nq->pdev->dev,
326 "QPLIB: nqe with type = 0x%x not handled",
327 type);
328 break;
329 }
330 raw_cons++;
331 }
332 if (hwq->cons != raw_cons) {
333 hwq->cons = raw_cons;
334 NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
335 }
336}
337
338static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
339{
340 struct bnxt_qplib_nq *nq = dev_instance;
341 struct bnxt_qplib_hwq *hwq = &nq->hwq;
342 struct nq_base **nq_ptr;
343 u32 sw_cons;
344
345 /* Prefetch the NQ element */
346 sw_cons = HWQ_CMP(hwq->cons, hwq);
347 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
348 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
349
350 /* Fan out to CPU affinitized kthreads? */
351 tasklet_schedule(&nq->worker);
352
353 return IRQ_HANDLED;
354}
355
356void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
357{
Selvin Xavierf218d672017-06-29 12:28:15 -0700358 if (nq->cqn_wq) {
359 destroy_workqueue(nq->cqn_wq);
360 nq->cqn_wq = NULL;
361 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800362 /* Make sure the HW is stopped! */
363 synchronize_irq(nq->vector);
364 tasklet_disable(&nq->worker);
365 tasklet_kill(&nq->worker);
366
367 if (nq->requested) {
368 free_irq(nq->vector, nq);
369 nq->requested = false;
370 }
371 if (nq->bar_reg_iomem)
372 iounmap(nq->bar_reg_iomem);
373 nq->bar_reg_iomem = NULL;
374
375 nq->cqn_handler = NULL;
376 nq->srqn_handler = NULL;
377 nq->vector = 0;
378}
379
380int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
381 int msix_vector, int bar_reg_offset,
382 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
383 struct bnxt_qplib_cq *),
384 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
385 void *, u8 event))
386{
387 resource_size_t nq_base;
Selvin Xavierf218d672017-06-29 12:28:15 -0700388 int rc = -1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800389
390 nq->pdev = pdev;
391 nq->vector = msix_vector;
392
393 nq->cqn_handler = cqn_handler;
394
395 nq->srqn_handler = srqn_handler;
396
397 tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
398
Selvin Xavierf218d672017-06-29 12:28:15 -0700399 /* Have a task to schedule CQ notifiers in post send case */
400 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
401 if (!nq->cqn_wq)
402 goto fail;
403
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800404 nq->requested = false;
405 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, "bnxt_qplib_nq", nq);
406 if (rc) {
407 dev_err(&nq->pdev->dev,
408 "Failed to request IRQ for NQ: %#x", rc);
409 bnxt_qplib_disable_nq(nq);
410 goto fail;
411 }
412 nq->requested = true;
413 nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
414 nq->bar_reg_off = bar_reg_offset;
415 nq_base = pci_resource_start(pdev, nq->bar_reg);
416 if (!nq_base) {
417 rc = -ENOMEM;
418 goto fail;
419 }
420 nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
421 if (!nq->bar_reg_iomem) {
422 rc = -ENOMEM;
423 goto fail;
424 }
425 NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
426
427 return 0;
428fail:
429 bnxt_qplib_disable_nq(nq);
430 return rc;
431}
432
433void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
434{
435 if (nq->hwq.max_elements)
436 bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
437}
438
439int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
440{
441 nq->pdev = pdev;
442 if (!nq->hwq.max_elements ||
443 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
444 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
445
446 if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
447 &nq->hwq.max_elements,
448 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
449 PAGE_SIZE, HWQ_TYPE_L2_CMPL))
450 return -ENOMEM;
451
452 nq->budget = 8;
453 return 0;
454}
455
456/* QP */
457int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
458{
459 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
460 struct cmdq_create_qp1 req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700461 struct creq_create_qp1_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800462 struct bnxt_qplib_pbl *pbl;
463 struct bnxt_qplib_q *sq = &qp->sq;
464 struct bnxt_qplib_q *rq = &qp->rq;
465 int rc;
466 u16 cmd_flags = 0;
467 u32 qp_flags = 0;
468
469 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
470
471 /* General */
472 req.type = qp->type;
473 req.dpi = cpu_to_le32(qp->dpi->dpi);
474 req.qp_handle = cpu_to_le64(qp->qp_handle);
475
476 /* SQ */
477 sq->hwq.max_elements = sq->max_wqe;
478 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
479 &sq->hwq.max_elements,
480 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
481 PAGE_SIZE, HWQ_TYPE_QUEUE);
482 if (rc)
483 goto exit;
484
485 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
486 if (!sq->swq) {
487 rc = -ENOMEM;
488 goto fail_sq;
489 }
490 pbl = &sq->hwq.pbl[PBL_LVL_0];
491 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
492 req.sq_pg_size_sq_lvl =
493 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
494 << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
495 (pbl->pg_size == ROCE_PG_SIZE_4K ?
496 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
497 pbl->pg_size == ROCE_PG_SIZE_8K ?
498 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
499 pbl->pg_size == ROCE_PG_SIZE_64K ?
500 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
501 pbl->pg_size == ROCE_PG_SIZE_2M ?
502 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
503 pbl->pg_size == ROCE_PG_SIZE_8M ?
504 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
505 pbl->pg_size == ROCE_PG_SIZE_1G ?
506 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
507 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
508
509 if (qp->scq)
510 req.scq_cid = cpu_to_le32(qp->scq->id);
511
512 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
513
514 /* RQ */
515 if (rq->max_wqe) {
516 rq->hwq.max_elements = qp->rq.max_wqe;
517 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
518 &rq->hwq.max_elements,
519 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
520 PAGE_SIZE, HWQ_TYPE_QUEUE);
521 if (rc)
522 goto fail_sq;
523
524 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
525 GFP_KERNEL);
526 if (!rq->swq) {
527 rc = -ENOMEM;
528 goto fail_rq;
529 }
530 pbl = &rq->hwq.pbl[PBL_LVL_0];
531 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
532 req.rq_pg_size_rq_lvl =
533 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
534 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
535 (pbl->pg_size == ROCE_PG_SIZE_4K ?
536 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
537 pbl->pg_size == ROCE_PG_SIZE_8K ?
538 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
539 pbl->pg_size == ROCE_PG_SIZE_64K ?
540 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
541 pbl->pg_size == ROCE_PG_SIZE_2M ?
542 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
543 pbl->pg_size == ROCE_PG_SIZE_8M ?
544 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
545 pbl->pg_size == ROCE_PG_SIZE_1G ?
546 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
547 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
548 if (qp->rcq)
549 req.rcq_cid = cpu_to_le32(qp->rcq->id);
550 }
551
552 /* Header buffer - allow hdr_buf pass in */
553 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
554 if (rc) {
555 rc = -ENOMEM;
556 goto fail;
557 }
558 req.qp_flags = cpu_to_le32(qp_flags);
559 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
560 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
561
562 req.sq_fwo_sq_sge =
563 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
564 CMDQ_CREATE_QP1_SQ_SGE_SFT);
565 req.rq_fwo_rq_sge =
566 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
567 CMDQ_CREATE_QP1_RQ_SGE_SFT);
568
569 req.pd_id = cpu_to_le32(qp->pd->id);
570
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700571 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
572 (void *)&resp, NULL, 0);
573 if (rc)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800574 goto fail;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700575
576 qp->id = le32_to_cpu(resp.xid);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800577 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
Selvin Xavierf218d672017-06-29 12:28:15 -0700578 rcfw->qp_tbl[qp->id].qp_id = qp->id;
579 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800580
581 return 0;
582
583fail:
584 bnxt_qplib_free_qp_hdr_buf(res, qp);
585fail_rq:
586 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
587 kfree(rq->swq);
588fail_sq:
589 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
590 kfree(sq->swq);
591exit:
592 return rc;
593}
594
595int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
596{
597 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
598 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
599 struct cmdq_create_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700600 struct creq_create_qp_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800601 struct bnxt_qplib_pbl *pbl;
602 struct sq_psn_search **psn_search_ptr;
603 unsigned long int psn_search, poff = 0;
604 struct bnxt_qplib_q *sq = &qp->sq;
605 struct bnxt_qplib_q *rq = &qp->rq;
606 struct bnxt_qplib_hwq *xrrq;
607 int i, rc, req_size, psn_sz;
608 u16 cmd_flags = 0, max_ssge;
609 u32 sw_prod, qp_flags = 0;
610
611 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
612
613 /* General */
614 req.type = qp->type;
615 req.dpi = cpu_to_le32(qp->dpi->dpi);
616 req.qp_handle = cpu_to_le64(qp->qp_handle);
617
618 /* SQ */
619 psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
620 sizeof(struct sq_psn_search) : 0;
621 sq->hwq.max_elements = sq->max_wqe;
622 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
623 sq->nmap, &sq->hwq.max_elements,
624 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
625 psn_sz,
626 PAGE_SIZE, HWQ_TYPE_QUEUE);
627 if (rc)
628 goto exit;
629
630 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
631 if (!sq->swq) {
632 rc = -ENOMEM;
633 goto fail_sq;
634 }
635 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
636 if (psn_sz) {
637 psn_search_ptr = (struct sq_psn_search **)
638 &hw_sq_send_ptr[get_sqe_pg
639 (sq->hwq.max_elements)];
640 psn_search = (unsigned long int)
641 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
642 [get_sqe_idx(sq->hwq.max_elements)];
643 if (psn_search & ~PAGE_MASK) {
644 /* If the psn_search does not start on a page boundary,
645 * then calculate the offset
646 */
647 poff = (psn_search & ~PAGE_MASK) /
648 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
649 }
650 for (i = 0; i < sq->hwq.max_elements; i++)
651 sq->swq[i].psn_search =
652 &psn_search_ptr[get_psne_pg(i + poff)]
653 [get_psne_idx(i + poff)];
654 }
655 pbl = &sq->hwq.pbl[PBL_LVL_0];
656 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
657 req.sq_pg_size_sq_lvl =
658 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
659 << CMDQ_CREATE_QP_SQ_LVL_SFT) |
660 (pbl->pg_size == ROCE_PG_SIZE_4K ?
661 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
662 pbl->pg_size == ROCE_PG_SIZE_8K ?
663 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
664 pbl->pg_size == ROCE_PG_SIZE_64K ?
665 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
666 pbl->pg_size == ROCE_PG_SIZE_2M ?
667 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
668 pbl->pg_size == ROCE_PG_SIZE_8M ?
669 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
670 pbl->pg_size == ROCE_PG_SIZE_1G ?
671 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
672 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
673
674 /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
675 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
676 for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
677 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
678 [get_sqe_idx(sw_prod)];
679 hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
680 }
681
682 if (qp->scq)
683 req.scq_cid = cpu_to_le32(qp->scq->id);
684
685 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
686 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
687 if (qp->sig_type)
688 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
689
690 /* RQ */
691 if (rq->max_wqe) {
692 rq->hwq.max_elements = rq->max_wqe;
693 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
694 rq->nmap, &rq->hwq.max_elements,
695 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
696 PAGE_SIZE, HWQ_TYPE_QUEUE);
697 if (rc)
698 goto fail_sq;
699
700 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
701 GFP_KERNEL);
702 if (!rq->swq) {
703 rc = -ENOMEM;
704 goto fail_rq;
705 }
706 pbl = &rq->hwq.pbl[PBL_LVL_0];
707 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
708 req.rq_pg_size_rq_lvl =
709 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
710 CMDQ_CREATE_QP_RQ_LVL_SFT) |
711 (pbl->pg_size == ROCE_PG_SIZE_4K ?
712 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
713 pbl->pg_size == ROCE_PG_SIZE_8K ?
714 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
715 pbl->pg_size == ROCE_PG_SIZE_64K ?
716 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
717 pbl->pg_size == ROCE_PG_SIZE_2M ?
718 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
719 pbl->pg_size == ROCE_PG_SIZE_8M ?
720 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
721 pbl->pg_size == ROCE_PG_SIZE_1G ?
722 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
723 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
724 }
725
726 if (qp->rcq)
727 req.rcq_cid = cpu_to_le32(qp->rcq->id);
728 req.qp_flags = cpu_to_le32(qp_flags);
729 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
730 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
731 qp->sq_hdr_buf = NULL;
732 qp->rq_hdr_buf = NULL;
733
734 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
735 if (rc)
736 goto fail_rq;
737
738 /* CTRL-22434: Irrespective of the requested SGE count on the SQ
739 * always create the QP with max send sges possible if the requested
740 * inline size is greater than 0.
741 */
742 max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
743 req.sq_fwo_sq_sge = cpu_to_le16(
744 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
745 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
746 req.rq_fwo_rq_sge = cpu_to_le16(
747 ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
748 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
749 /* ORRQ and IRRQ */
750 if (psn_sz) {
751 xrrq = &qp->orrq;
752 xrrq->max_elements =
753 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
754 req_size = xrrq->max_elements *
755 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
756 req_size &= ~(PAGE_SIZE - 1);
757 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
758 &xrrq->max_elements,
759 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
760 0, req_size, HWQ_TYPE_CTX);
761 if (rc)
762 goto fail_buf_free;
763 pbl = &xrrq->pbl[PBL_LVL_0];
764 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
765
766 xrrq = &qp->irrq;
767 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
768 qp->max_dest_rd_atomic);
769 req_size = xrrq->max_elements *
770 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
771 req_size &= ~(PAGE_SIZE - 1);
772
773 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
774 &xrrq->max_elements,
775 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
776 0, req_size, HWQ_TYPE_CTX);
777 if (rc)
778 goto fail_orrq;
779
780 pbl = &xrrq->pbl[PBL_LVL_0];
781 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
782 }
783 req.pd_id = cpu_to_le32(qp->pd->id);
784
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700785 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
786 (void *)&resp, NULL, 0);
787 if (rc)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800788 goto fail;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700789
790 qp->id = le32_to_cpu(resp.xid);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800791 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
Selvin Xavierf218d672017-06-29 12:28:15 -0700792 INIT_LIST_HEAD(&qp->sq_flush);
793 INIT_LIST_HEAD(&qp->rq_flush);
794 rcfw->qp_tbl[qp->id].qp_id = qp->id;
795 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800796
797 return 0;
798
799fail:
800 if (qp->irrq.max_elements)
801 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
802fail_orrq:
803 if (qp->orrq.max_elements)
804 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
805fail_buf_free:
806 bnxt_qplib_free_qp_hdr_buf(res, qp);
807fail_rq:
808 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
809 kfree(rq->swq);
810fail_sq:
811 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
812 kfree(sq->swq);
813exit:
814 return rc;
815}
816
817static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
818{
819 switch (qp->state) {
820 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
821 /* INIT->RTR, configure the path_mtu to the default
822 * 2048 if not being requested
823 */
824 if (!(qp->modify_flags &
825 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
826 qp->modify_flags |=
827 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
828 qp->path_mtu =
829 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
830 }
831 qp->modify_flags &=
832 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
833 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
834 if (qp->max_dest_rd_atomic < 1)
835 qp->max_dest_rd_atomic = 1;
836 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
837 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
838 if (!(qp->modify_flags &
839 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
840 qp->modify_flags |=
841 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
842 qp->ah.sgid_index = 0;
843 }
844 break;
845 default:
846 break;
847 }
848}
849
850static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
851{
852 switch (qp->state) {
853 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
854 /* Bono FW requires the max_rd_atomic to be >= 1 */
855 if (qp->max_rd_atomic < 1)
856 qp->max_rd_atomic = 1;
857 /* Bono FW does not allow PKEY_INDEX,
858 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
859 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
860 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
861 * modification
862 */
863 qp->modify_flags &=
864 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
865 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
866 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
867 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
868 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
869 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
870 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
871 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
872 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
873 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
874 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
875 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
876 break;
877 default:
878 break;
879 }
880}
881
882static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
883{
884 switch (qp->cur_qp_state) {
885 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
886 break;
887 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
888 __modify_flags_from_init_state(qp);
889 break;
890 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
891 __modify_flags_from_rtr_state(qp);
892 break;
893 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
894 break;
895 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
896 break;
897 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
898 break;
899 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
900 break;
901 default:
902 break;
903 }
904}
905
906int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
907{
908 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
909 struct cmdq_modify_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700910 struct creq_modify_qp_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800911 u16 cmd_flags = 0, pkey;
912 u32 temp32[4];
913 u32 bmask;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700914 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800915
916 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
917
918 /* Filter out the qp_attr_mask based on the state->new transition */
919 __filter_modify_flags(qp);
920 bmask = qp->modify_flags;
921 req.modify_mask = cpu_to_le32(qp->modify_flags);
922 req.qp_cid = cpu_to_le32(qp->id);
923 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
924 req.network_type_en_sqd_async_notify_new_state =
925 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
926 (qp->en_sqd_async_notify ?
927 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
928 }
929 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
930
931 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
932 req.access = qp->access;
933
934 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
935 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
936 qp->pkey_index, &pkey))
937 req.pkey = cpu_to_le16(pkey);
938 }
939 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
940 req.qkey = cpu_to_le32(qp->qkey);
941
942 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
943 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
944 req.dgid[0] = cpu_to_le32(temp32[0]);
945 req.dgid[1] = cpu_to_le32(temp32[1]);
946 req.dgid[2] = cpu_to_le32(temp32[2]);
947 req.dgid[3] = cpu_to_le32(temp32[3]);
948 }
949 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
950 req.flow_label = cpu_to_le32(qp->ah.flow_label);
951
952 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
953 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
954 [qp->ah.sgid_index]);
955
956 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
957 req.hop_limit = qp->ah.hop_limit;
958
959 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
960 req.traffic_class = qp->ah.traffic_class;
961
962 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
963 memcpy(req.dest_mac, qp->ah.dmac, 6);
964
965 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
966 req.path_mtu = qp->path_mtu;
967
968 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
969 req.timeout = qp->timeout;
970
971 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
972 req.retry_cnt = qp->retry_cnt;
973
974 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
975 req.rnr_retry = qp->rnr_retry;
976
977 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
978 req.min_rnr_timer = qp->min_rnr_timer;
979
980 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
981 req.rq_psn = cpu_to_le32(qp->rq.psn);
982
983 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
984 req.sq_psn = cpu_to_le32(qp->sq.psn);
985
986 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
987 req.max_rd_atomic =
988 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
989
990 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
991 req.max_dest_rd_atomic =
992 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
993
994 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
995 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
996 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
997 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
998 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
999 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1000 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1001
1002 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1003
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001004 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1005 (void *)&resp, NULL, 0);
1006 if (rc)
1007 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001008 qp->cur_qp_state = qp->state;
1009 return 0;
1010}
1011
1012int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1013{
1014 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1015 struct cmdq_query_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001016 struct creq_query_qp_resp resp;
1017 struct bnxt_qplib_rcfw_sbuf *sbuf;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001018 struct creq_query_qp_resp_sb *sb;
1019 u16 cmd_flags = 0;
1020 u32 temp32[4];
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001021 int i, rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001022
1023 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1024
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001025 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1026 if (!sbuf)
1027 return -ENOMEM;
1028 sb = sbuf->sb;
1029
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001030 req.qp_cid = cpu_to_le32(qp->id);
1031 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001032 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1033 (void *)sbuf, 0);
1034 if (rc)
1035 goto bail;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001036 /* Extract the context from the side buffer */
1037 qp->state = sb->en_sqd_async_notify_state &
1038 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1039 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1040 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1041 true : false;
1042 qp->access = sb->access;
1043 qp->pkey_index = le16_to_cpu(sb->pkey);
1044 qp->qkey = le32_to_cpu(sb->qkey);
1045
1046 temp32[0] = le32_to_cpu(sb->dgid[0]);
1047 temp32[1] = le32_to_cpu(sb->dgid[1]);
1048 temp32[2] = le32_to_cpu(sb->dgid[2]);
1049 temp32[3] = le32_to_cpu(sb->dgid[3]);
1050 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1051
1052 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1053
1054 qp->ah.sgid_index = 0;
1055 for (i = 0; i < res->sgid_tbl.max; i++) {
1056 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1057 qp->ah.sgid_index = i;
1058 break;
1059 }
1060 }
1061 if (i == res->sgid_tbl.max)
1062 dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1063
1064 qp->ah.hop_limit = sb->hop_limit;
1065 qp->ah.traffic_class = sb->traffic_class;
1066 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1067 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1068 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1069 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1070 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1071 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1072 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1073 qp->timeout = sb->timeout;
1074 qp->retry_cnt = sb->retry_cnt;
1075 qp->rnr_retry = sb->rnr_retry;
1076 qp->min_rnr_timer = sb->min_rnr_timer;
1077 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1078 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1079 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1080 qp->max_dest_rd_atomic =
1081 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1082 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1083 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1084 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1085 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1086 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1087 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1088 memcpy(qp->smac, sb->src_mac, 6);
1089 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001090bail:
1091 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1092 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001093}
1094
1095static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1096{
1097 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1098 struct cq_base *hw_cqe, **hw_cqe_ptr;
1099 int i;
1100
1101 for (i = 0; i < cq_hwq->max_elements; i++) {
1102 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1103 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1104 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1105 continue;
1106 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1107 case CQ_BASE_CQE_TYPE_REQ:
1108 case CQ_BASE_CQE_TYPE_TERMINAL:
1109 {
1110 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1111
1112 if (qp == le64_to_cpu(cqe->qp_handle))
1113 cqe->qp_handle = 0;
1114 break;
1115 }
1116 case CQ_BASE_CQE_TYPE_RES_RC:
1117 case CQ_BASE_CQE_TYPE_RES_UD:
1118 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1119 {
1120 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1121
1122 if (qp == le64_to_cpu(cqe->qp_handle))
1123 cqe->qp_handle = 0;
1124 break;
1125 }
1126 default:
1127 break;
1128 }
1129 }
1130}
1131
1132int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1133 struct bnxt_qplib_qp *qp)
1134{
1135 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1136 struct cmdq_destroy_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001137 struct creq_destroy_qp_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001138 unsigned long flags;
1139 u16 cmd_flags = 0;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001140 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001141
Selvin Xavierf218d672017-06-29 12:28:15 -07001142 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1143 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1144
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001145 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1146
1147 req.qp_cid = cpu_to_le32(qp->id);
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001148 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1149 (void *)&resp, NULL, 0);
Selvin Xavierf218d672017-06-29 12:28:15 -07001150 if (rc) {
1151 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1152 rcfw->qp_tbl[qp->id].qp_handle = qp;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001153 return rc;
Selvin Xavierf218d672017-06-29 12:28:15 -07001154 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001155
1156 /* Must walk the associated CQs to nullified the QP ptr */
1157 spin_lock_irqsave(&qp->scq->hwq.lock, flags);
1158
1159 __clean_cq(qp->scq, (u64)(unsigned long)qp);
1160
1161 if (qp->rcq && qp->rcq != qp->scq) {
1162 spin_lock(&qp->rcq->hwq.lock);
1163 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
1164 spin_unlock(&qp->rcq->hwq.lock);
1165 }
1166
1167 spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
1168
1169 bnxt_qplib_free_qp_hdr_buf(res, qp);
1170 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1171 kfree(qp->sq.swq);
1172
1173 bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1174 kfree(qp->rq.swq);
1175
1176 if (qp->irrq.max_elements)
1177 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1178 if (qp->orrq.max_elements)
1179 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1180
1181 return 0;
1182}
1183
1184void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1185 struct bnxt_qplib_sge *sge)
1186{
1187 struct bnxt_qplib_q *sq = &qp->sq;
1188 u32 sw_prod;
1189
1190 memset(sge, 0, sizeof(*sge));
1191
1192 if (qp->sq_hdr_buf) {
1193 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1194 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1195 sw_prod * qp->sq_hdr_buf_size);
1196 sge->lkey = 0xFFFFFFFF;
1197 sge->size = qp->sq_hdr_buf_size;
1198 return qp->sq_hdr_buf + sw_prod * sge->size;
1199 }
1200 return NULL;
1201}
1202
1203u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1204{
1205 struct bnxt_qplib_q *rq = &qp->rq;
1206
1207 return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1208}
1209
1210dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1211{
1212 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1213}
1214
1215void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1216 struct bnxt_qplib_sge *sge)
1217{
1218 struct bnxt_qplib_q *rq = &qp->rq;
1219 u32 sw_prod;
1220
1221 memset(sge, 0, sizeof(*sge));
1222
1223 if (qp->rq_hdr_buf) {
1224 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1225 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1226 sw_prod * qp->rq_hdr_buf_size);
1227 sge->lkey = 0xFFFFFFFF;
1228 sge->size = qp->rq_hdr_buf_size;
1229 return qp->rq_hdr_buf + sw_prod * sge->size;
1230 }
1231 return NULL;
1232}
1233
1234void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1235{
1236 struct bnxt_qplib_q *sq = &qp->sq;
1237 struct dbr_dbr db_msg = { 0 };
1238 u32 sw_prod;
1239
1240 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1241
1242 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1243 DBR_DBR_INDEX_MASK);
1244 db_msg.type_xid =
1245 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1246 DBR_DBR_TYPE_SQ);
1247 /* Flush all the WQE writes to HW */
1248 wmb();
1249 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1250}
1251
1252int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1253 struct bnxt_qplib_swqe *wqe)
1254{
1255 struct bnxt_qplib_q *sq = &qp->sq;
1256 struct bnxt_qplib_swq *swq;
1257 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1258 struct sq_sge *hw_sge;
Selvin Xavierf218d672017-06-29 12:28:15 -07001259 struct bnxt_qplib_nq_work *nq_work = NULL;
1260 bool sch_handler = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001261 u32 sw_prod;
1262 u8 wqe_size16;
1263 int i, rc = 0, data_len = 0, pkt_num = 0;
1264 __le32 temp32;
1265
1266 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
Selvin Xavierf218d672017-06-29 12:28:15 -07001267 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1268 sch_handler = true;
1269 dev_dbg(&sq->hwq.pdev->dev,
1270 "%s Error QP. Scheduling for poll_cq\n",
1271 __func__);
1272 goto queue_err;
1273 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001274 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07001275
1276 if (bnxt_qplib_queue_full(sq)) {
1277 dev_err(&sq->hwq.pdev->dev,
1278 "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1279 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1280 sq->q_full_delta);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001281 rc = -ENOMEM;
1282 goto done;
1283 }
1284 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1285 swq = &sq->swq[sw_prod];
1286 swq->wr_id = wqe->wr_id;
1287 swq->type = wqe->type;
1288 swq->flags = wqe->flags;
1289 if (qp->sig_type)
1290 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1291 swq->start_psn = sq->psn & BTH_PSN_MASK;
1292
1293 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1294 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1295 [get_sqe_idx(sw_prod)];
1296
1297 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1298
1299 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1300 /* Copy the inline data */
1301 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1302 dev_warn(&sq->hwq.pdev->dev,
1303 "QPLIB: Inline data length > 96 detected");
1304 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1305 } else {
1306 data_len = wqe->inline_len;
1307 }
1308 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1309 wqe_size16 = (data_len + 15) >> 4;
1310 } else {
1311 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1312 i < wqe->num_sge; i++, hw_sge++) {
1313 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1314 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1315 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1316 data_len += wqe->sg_list[i].size;
1317 }
1318 /* Each SGE entry = 1 WQE size16 */
1319 wqe_size16 = wqe->num_sge;
Somnath Koturab69d4c2017-06-29 12:28:09 -07001320 /* HW requires wqe size has room for atleast one SGE even if
1321 * none was supplied by ULP
1322 */
1323 if (!wqe->num_sge)
1324 wqe_size16++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001325 }
1326
1327 /* Specifics */
1328 switch (wqe->type) {
1329 case BNXT_QPLIB_SWQE_TYPE_SEND:
1330 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1331 /* Assemble info for Raw Ethertype QPs */
1332 struct sq_send_raweth_qp1 *sqe =
1333 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1334
1335 sqe->wqe_type = wqe->type;
1336 sqe->flags = wqe->flags;
1337 sqe->wqe_size = wqe_size16 +
1338 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1339 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1340 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1341 sqe->length = cpu_to_le32(data_len);
1342 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1343 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1344 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1345
1346 break;
1347 }
1348 /* else, just fall thru */
1349 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1350 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1351 {
1352 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1353
1354 sqe->wqe_type = wqe->type;
1355 sqe->flags = wqe->flags;
1356 sqe->wqe_size = wqe_size16 +
1357 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1358 sqe->inv_key_or_imm_data = cpu_to_le32(
1359 wqe->send.inv_key);
1360 if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1361 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1362 sqe->dst_qp = cpu_to_le32(
1363 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1364 sqe->length = cpu_to_le32(data_len);
1365 sqe->avid = cpu_to_le32(wqe->send.avid &
1366 SQ_SEND_AVID_MASK);
1367 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1368 } else {
1369 sqe->length = cpu_to_le32(data_len);
1370 sqe->dst_qp = 0;
1371 sqe->avid = 0;
1372 if (qp->mtu)
1373 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1374 if (!pkt_num)
1375 pkt_num = 1;
1376 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1377 }
1378 break;
1379 }
1380 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1381 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1382 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1383 {
1384 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1385
1386 sqe->wqe_type = wqe->type;
1387 sqe->flags = wqe->flags;
1388 sqe->wqe_size = wqe_size16 +
1389 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1390 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1391 sqe->length = cpu_to_le32((u32)data_len);
1392 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1393 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1394 if (qp->mtu)
1395 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1396 if (!pkt_num)
1397 pkt_num = 1;
1398 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1399 break;
1400 }
1401 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1402 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1403 {
1404 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1405
1406 sqe->wqe_type = wqe->type;
1407 sqe->flags = wqe->flags;
1408 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1409 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1410 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1411 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1412 if (qp->mtu)
1413 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1414 if (!pkt_num)
1415 pkt_num = 1;
1416 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1417 break;
1418 }
1419 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1420 {
1421 struct sq_localinvalidate *sqe =
1422 (struct sq_localinvalidate *)hw_sq_send_hdr;
1423
1424 sqe->wqe_type = wqe->type;
1425 sqe->flags = wqe->flags;
1426 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1427
1428 break;
1429 }
1430 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1431 {
1432 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1433
1434 sqe->wqe_type = wqe->type;
1435 sqe->flags = wqe->flags;
1436 sqe->access_cntl = wqe->frmr.access_cntl |
1437 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1438 sqe->zero_based_page_size_log =
1439 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1440 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1441 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1442 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1443 temp32 = cpu_to_le32(wqe->frmr.length);
1444 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1445 sqe->numlevels_pbl_page_size_log =
1446 ((wqe->frmr.pbl_pg_sz_log <<
1447 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1448 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1449 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1450 SQ_FR_PMR_NUMLEVELS_MASK);
1451
1452 for (i = 0; i < wqe->frmr.page_list_len; i++)
1453 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1454 wqe->frmr.page_list[i] |
1455 PTU_PTE_VALID);
1456 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1457 sqe->va = cpu_to_le64(wqe->frmr.va);
1458
1459 break;
1460 }
1461 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1462 {
1463 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1464
1465 sqe->wqe_type = wqe->type;
1466 sqe->flags = wqe->flags;
1467 sqe->access_cntl = wqe->bind.access_cntl;
1468 sqe->mw_type_zero_based = wqe->bind.mw_type |
1469 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1470 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1471 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1472 sqe->va = cpu_to_le64(wqe->bind.va);
1473 temp32 = cpu_to_le32(wqe->bind.length);
1474 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1475 break;
1476 }
1477 default:
1478 /* Bad wqe, return error */
1479 rc = -EINVAL;
1480 goto done;
1481 }
1482 swq->next_psn = sq->psn & BTH_PSN_MASK;
1483 if (swq->psn_search) {
1484 swq->psn_search->opcode_start_psn = cpu_to_le32(
1485 ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1486 SQ_PSN_SEARCH_START_PSN_MASK) |
1487 ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1488 SQ_PSN_SEARCH_OPCODE_MASK));
1489 swq->psn_search->flags_next_psn = cpu_to_le32(
1490 ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1491 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1492 }
Selvin Xavierf218d672017-06-29 12:28:15 -07001493queue_err:
1494 if (sch_handler) {
1495 /* Store the ULP info in the software structures */
1496 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1497 swq = &sq->swq[sw_prod];
1498 swq->wr_id = wqe->wr_id;
1499 swq->type = wqe->type;
1500 swq->flags = wqe->flags;
1501 if (qp->sig_type)
1502 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1503 swq->start_psn = sq->psn & BTH_PSN_MASK;
1504 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001505 sq->hwq.prod++;
Somnath Kotur3fb755b2017-05-22 03:15:36 -07001506 qp->wqe_cnt++;
1507
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001508done:
Selvin Xavierf218d672017-06-29 12:28:15 -07001509 if (sch_handler) {
1510 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1511 if (nq_work) {
1512 nq_work->cq = qp->scq;
1513 nq_work->nq = qp->scq->nq;
1514 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1515 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1516 } else {
1517 dev_err(&sq->hwq.pdev->dev,
1518 "QPLIB: FP: Failed to allocate SQ nq_work!");
1519 rc = -ENOMEM;
1520 }
1521 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001522 return rc;
1523}
1524
1525void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1526{
1527 struct bnxt_qplib_q *rq = &qp->rq;
1528 struct dbr_dbr db_msg = { 0 };
1529 u32 sw_prod;
1530
1531 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1532 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1533 DBR_DBR_INDEX_MASK);
1534 db_msg.type_xid =
1535 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1536 DBR_DBR_TYPE_RQ);
1537
1538 /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1539 wmb();
1540 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1541}
1542
1543int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1544 struct bnxt_qplib_swqe *wqe)
1545{
1546 struct bnxt_qplib_q *rq = &qp->rq;
1547 struct rq_wqe *rqe, **rqe_ptr;
1548 struct sq_sge *hw_sge;
Selvin Xavierf218d672017-06-29 12:28:15 -07001549 struct bnxt_qplib_nq_work *nq_work = NULL;
1550 bool sch_handler = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001551 u32 sw_prod;
1552 int i, rc = 0;
1553
1554 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
Selvin Xavierf218d672017-06-29 12:28:15 -07001555 sch_handler = true;
1556 dev_dbg(&rq->hwq.pdev->dev,
1557 "%s Error QP. Scheduling for poll_cq\n",
1558 __func__);
1559 goto queue_err;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001560 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07001561 if (bnxt_qplib_queue_full(rq)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001562 dev_err(&rq->hwq.pdev->dev,
1563 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1564 rc = -EINVAL;
1565 goto done;
1566 }
1567 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1568 rq->swq[sw_prod].wr_id = wqe->wr_id;
1569
1570 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1571 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1572
1573 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1574
1575 /* Calculate wqe_size16 and data_len */
1576 for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1577 i < wqe->num_sge; i++, hw_sge++) {
1578 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1579 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1580 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1581 }
1582 rqe->wqe_type = wqe->type;
1583 rqe->flags = wqe->flags;
1584 rqe->wqe_size = wqe->num_sge +
1585 ((offsetof(typeof(*rqe), data) + 15) >> 4);
Somnath Koturab69d4c2017-06-29 12:28:09 -07001586 /* HW requires wqe size has room for atleast one SGE even if none
1587 * was supplied by ULP
1588 */
1589 if (!wqe->num_sge)
1590 rqe->wqe_size++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001591
1592 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1593 rqe->wr_id[0] = cpu_to_le32(sw_prod);
1594
Selvin Xavierf218d672017-06-29 12:28:15 -07001595queue_err:
1596 if (sch_handler) {
1597 /* Store the ULP info in the software structures */
1598 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1599 rq->swq[sw_prod].wr_id = wqe->wr_id;
1600 }
1601
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001602 rq->hwq.prod++;
Selvin Xavierf218d672017-06-29 12:28:15 -07001603 if (sch_handler) {
1604 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1605 if (nq_work) {
1606 nq_work->cq = qp->rcq;
1607 nq_work->nq = qp->rcq->nq;
1608 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1609 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1610 } else {
1611 dev_err(&rq->hwq.pdev->dev,
1612 "QPLIB: FP: Failed to allocate RQ nq_work!");
1613 rc = -ENOMEM;
1614 }
1615 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001616done:
1617 return rc;
1618}
1619
1620/* CQ */
1621
1622/* Spinlock must be held */
1623static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1624{
1625 struct dbr_dbr db_msg = { 0 };
1626
1627 db_msg.type_xid =
1628 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1629 DBR_DBR_TYPE_CQ_ARMENA);
1630 /* Flush memory writes before enabling the CQ */
1631 wmb();
1632 __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1633}
1634
1635static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1636{
1637 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1638 struct dbr_dbr db_msg = { 0 };
1639 u32 sw_cons;
1640
1641 /* Ring DB */
1642 sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1643 db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1644 DBR_DBR_INDEX_MASK);
1645 db_msg.type_xid =
1646 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1647 arm_type);
1648 /* flush memory writes before arming the CQ */
1649 wmb();
1650 __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1651}
1652
1653int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1654{
1655 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1656 struct cmdq_create_cq req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001657 struct creq_create_cq_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001658 struct bnxt_qplib_pbl *pbl;
1659 u16 cmd_flags = 0;
1660 int rc;
1661
1662 cq->hwq.max_elements = cq->max_wqe;
1663 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1664 cq->nmap, &cq->hwq.max_elements,
1665 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1666 PAGE_SIZE, HWQ_TYPE_QUEUE);
1667 if (rc)
1668 goto exit;
1669
1670 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1671
1672 if (!cq->dpi) {
1673 dev_err(&rcfw->pdev->dev,
1674 "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1675 return -EINVAL;
1676 }
1677 req.dpi = cpu_to_le32(cq->dpi->dpi);
1678 req.cq_handle = cpu_to_le64(cq->cq_handle);
1679
1680 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1681 pbl = &cq->hwq.pbl[PBL_LVL_0];
1682 req.pg_size_lvl = cpu_to_le32(
1683 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1684 CMDQ_CREATE_CQ_LVL_SFT) |
1685 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1686 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1687 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1688 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1689 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1690 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1691 CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1692
1693 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1694
1695 req.cq_fco_cnq_id = cpu_to_le32(
1696 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1697 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1698
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001699 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1700 (void *)&resp, NULL, 0);
1701 if (rc)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001702 goto fail;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001703
1704 cq->id = le32_to_cpu(resp.xid);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001705 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1706 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1707 init_waitqueue_head(&cq->waitq);
Selvin Xavierf218d672017-06-29 12:28:15 -07001708 INIT_LIST_HEAD(&cq->sqf_head);
1709 INIT_LIST_HEAD(&cq->rqf_head);
1710 spin_lock_init(&cq->compl_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001711
1712 bnxt_qplib_arm_cq_enable(cq);
1713 return 0;
1714
1715fail:
1716 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1717exit:
1718 return rc;
1719}
1720
1721int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1722{
1723 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1724 struct cmdq_destroy_cq req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001725 struct creq_destroy_cq_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001726 u16 cmd_flags = 0;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001727 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001728
1729 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1730
1731 req.cq_cid = cpu_to_le32(cq->id);
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001732 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1733 (void *)&resp, NULL, 0);
1734 if (rc)
1735 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001736 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1737 return 0;
1738}
1739
1740static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
1741 struct bnxt_qplib_cqe **pcqe, int *budget)
1742{
1743 u32 sw_prod, sw_cons;
1744 struct bnxt_qplib_cqe *cqe;
1745 int rc = 0;
1746
1747 /* Now complete all outstanding SQEs with FLUSHED_ERR */
1748 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1749 cqe = *pcqe;
1750 while (*budget) {
1751 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1752 if (sw_cons == sw_prod) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001753 break;
1754 }
Selvin Xavierf218d672017-06-29 12:28:15 -07001755 /* Skip the FENCE WQE completions */
1756 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
1757 bnxt_qplib_cancel_phantom_processing(qp);
1758 goto skip_compl;
1759 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001760 memset(cqe, 0, sizeof(*cqe));
1761 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
1762 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1763 cqe->qp_handle = (u64)(unsigned long)qp;
1764 cqe->wr_id = sq->swq[sw_cons].wr_id;
1765 cqe->src_qp = qp->id;
1766 cqe->type = sq->swq[sw_cons].type;
1767 cqe++;
1768 (*budget)--;
Selvin Xavierf218d672017-06-29 12:28:15 -07001769skip_compl:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001770 sq->hwq.cons++;
1771 }
1772 *pcqe = cqe;
1773 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
1774 /* Out of budget */
1775 rc = -EAGAIN;
1776
1777 return rc;
1778}
1779
1780static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
Selvin Xavierf218d672017-06-29 12:28:15 -07001781 struct bnxt_qplib_cqe **pcqe, int *budget)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001782{
1783 struct bnxt_qplib_cqe *cqe;
1784 u32 sw_prod, sw_cons;
1785 int rc = 0;
Selvin Xavierf218d672017-06-29 12:28:15 -07001786 int opcode = 0;
1787
1788 switch (qp->type) {
1789 case CMDQ_CREATE_QP1_TYPE_GSI:
1790 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
1791 break;
1792 case CMDQ_CREATE_QP_TYPE_RC:
1793 opcode = CQ_BASE_CQE_TYPE_RES_RC;
1794 break;
1795 case CMDQ_CREATE_QP_TYPE_UD:
1796 opcode = CQ_BASE_CQE_TYPE_RES_UD;
1797 break;
1798 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001799
1800 /* Flush the rest of the RQ */
1801 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1802 cqe = *pcqe;
1803 while (*budget) {
1804 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
1805 if (sw_cons == sw_prod)
1806 break;
1807 memset(cqe, 0, sizeof(*cqe));
1808 cqe->status =
1809 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
1810 cqe->opcode = opcode;
1811 cqe->qp_handle = (unsigned long)qp;
1812 cqe->wr_id = rq->swq[sw_cons].wr_id;
1813 cqe++;
1814 (*budget)--;
1815 rq->hwq.cons++;
1816 }
1817 *pcqe = cqe;
1818 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
1819 /* Out of budget */
1820 rc = -EAGAIN;
1821
1822 return rc;
1823}
1824
Selvin Xavierf218d672017-06-29 12:28:15 -07001825void bnxt_qplib_mark_qp_error(void *qp_handle)
1826{
1827 struct bnxt_qplib_qp *qp = qp_handle;
1828
1829 if (!qp)
1830 return;
1831
1832 /* Must block new posting of SQ and RQ */
1833 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
1834 bnxt_qplib_cancel_phantom_processing(qp);
1835
1836 /* Add qp to flush list of the CQ */
1837 __bnxt_qplib_add_flush_qp(qp);
1838}
1839
Eddie Wai9152e0b2017-06-14 03:26:23 -07001840/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1841 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1842 */
1843static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
1844 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
1845{
1846 struct bnxt_qplib_q *sq = &qp->sq;
1847 struct bnxt_qplib_swq *swq;
1848 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
1849 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
1850 struct cq_req *peek_req_hwcqe;
1851 struct bnxt_qplib_qp *peek_qp;
1852 struct bnxt_qplib_q *peek_sq;
1853 int i, rc = 0;
1854
1855 /* Normal mode */
1856 /* Check for the psn_search marking before completing */
1857 swq = &sq->swq[sw_sq_cons];
1858 if (swq->psn_search &&
1859 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
1860 /* Unmark */
1861 swq->psn_search->flags_next_psn = cpu_to_le32
1862 (le32_to_cpu(swq->psn_search->flags_next_psn)
1863 & ~0x80000000);
1864 dev_dbg(&cq->hwq.pdev->dev,
1865 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1866 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1867 sq->condition = true;
1868 sq->send_phantom = true;
1869
1870 /* TODO: Only ARM if the previous SQE is ARMALL */
1871 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
1872
1873 rc = -EAGAIN;
1874 goto out;
1875 }
1876 if (sq->condition) {
1877 /* Peek at the completions */
1878 peek_raw_cq_cons = cq->hwq.cons;
1879 peek_sw_cq_cons = cq_cons;
1880 i = cq->hwq.max_elements;
1881 while (i--) {
1882 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
1883 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1884 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
1885 [CQE_IDX(peek_sw_cq_cons)];
1886 /* If the next hwcqe is VALID */
1887 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
1888 cq->hwq.max_elements)) {
1889 /* If the next hwcqe is a REQ */
1890 if ((peek_hwcqe->cqe_type_toggle &
1891 CQ_BASE_CQE_TYPE_MASK) ==
1892 CQ_BASE_CQE_TYPE_REQ) {
1893 peek_req_hwcqe = (struct cq_req *)
1894 peek_hwcqe;
1895 peek_qp = (struct bnxt_qplib_qp *)
1896 ((unsigned long)
1897 le64_to_cpu
1898 (peek_req_hwcqe->qp_handle));
1899 peek_sq = &peek_qp->sq;
1900 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
1901 peek_req_hwcqe->sq_cons_idx) - 1
1902 , &sq->hwq);
1903 /* If the hwcqe's sq's wr_id matches */
1904 if (peek_sq == sq &&
1905 sq->swq[peek_sq_cons_idx].wr_id ==
1906 BNXT_QPLIB_FENCE_WRID) {
1907 /*
1908 * Unbreak only if the phantom
1909 * comes back
1910 */
1911 dev_dbg(&cq->hwq.pdev->dev,
1912 "FP:Got Phantom CQE");
1913 sq->condition = false;
1914 sq->single = true;
1915 rc = 0;
1916 goto out;
1917 }
1918 }
1919 /* Valid but not the phantom, so keep looping */
1920 } else {
1921 /* Not valid yet, just exit and wait */
1922 rc = -EINVAL;
1923 goto out;
1924 }
1925 peek_sw_cq_cons++;
1926 peek_raw_cq_cons++;
1927 }
1928 dev_err(&cq->hwq.pdev->dev,
1929 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1930 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1931 rc = -EINVAL;
1932 }
1933out:
1934 return rc;
1935}
1936
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001937static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1938 struct cq_req *hwcqe,
Eddie Wai9152e0b2017-06-14 03:26:23 -07001939 struct bnxt_qplib_cqe **pcqe, int *budget,
1940 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001941{
1942 struct bnxt_qplib_qp *qp;
1943 struct bnxt_qplib_q *sq;
1944 struct bnxt_qplib_cqe *cqe;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001945 u32 sw_sq_cons, cqe_sq_cons;
1946 struct bnxt_qplib_swq *swq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001947 int rc = 0;
1948
1949 qp = (struct bnxt_qplib_qp *)((unsigned long)
1950 le64_to_cpu(hwcqe->qp_handle));
1951 if (!qp) {
1952 dev_err(&cq->hwq.pdev->dev,
1953 "QPLIB: FP: Process Req qp is NULL");
1954 return -EINVAL;
1955 }
1956 sq = &qp->sq;
1957
Eddie Wai9152e0b2017-06-14 03:26:23 -07001958 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
1959 if (cqe_sq_cons > sq->hwq.max_elements) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001960 dev_err(&cq->hwq.pdev->dev,
1961 "QPLIB: FP: CQ Process req reported ");
1962 dev_err(&cq->hwq.pdev->dev,
1963 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
Eddie Wai9152e0b2017-06-14 03:26:23 -07001964 cqe_sq_cons, sq->hwq.max_elements);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001965 return -EINVAL;
1966 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001967
Selvin Xavierf218d672017-06-29 12:28:15 -07001968 if (qp->sq.flushed) {
1969 dev_dbg(&cq->hwq.pdev->dev,
1970 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
1971 goto done;
1972 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001973 /* Require to walk the sq's swq to fabricate CQEs for all previously
1974 * signaled SWQEs due to CQE aggregation from the current sq cons
Eddie Wai9152e0b2017-06-14 03:26:23 -07001975 * to the cqe_sq_cons
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001976 */
1977 cqe = *pcqe;
1978 while (*budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001979 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1980 if (sw_sq_cons == cqe_sq_cons)
1981 /* Done */
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001982 break;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001983
1984 swq = &sq->swq[sw_sq_cons];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001985 memset(cqe, 0, sizeof(*cqe));
1986 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1987 cqe->qp_handle = (u64)(unsigned long)qp;
1988 cqe->src_qp = qp->id;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001989 cqe->wr_id = swq->wr_id;
1990 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
1991 goto skip;
1992 cqe->type = swq->type;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001993
1994 /* For the last CQE, check for status. For errors, regardless
1995 * of the request being signaled or not, it must complete with
1996 * the hwcqe error status
1997 */
Eddie Wai9152e0b2017-06-14 03:26:23 -07001998 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001999 hwcqe->status != CQ_REQ_STATUS_OK) {
2000 cqe->status = hwcqe->status;
2001 dev_err(&cq->hwq.pdev->dev,
2002 "QPLIB: FP: CQ Processed Req ");
2003 dev_err(&cq->hwq.pdev->dev,
2004 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
Eddie Wai9152e0b2017-06-14 03:26:23 -07002005 sw_sq_cons, cqe->wr_id, cqe->status);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002006 cqe++;
2007 (*budget)--;
Selvin Xavierf218d672017-06-29 12:28:15 -07002008 bnxt_qplib_lock_buddy_cq(qp, cq);
2009 bnxt_qplib_mark_qp_error(qp);
2010 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002011 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002012 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2013 /* Before we complete, do WA 9060 */
2014 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2015 cqe_sq_cons)) {
2016 *lib_qp = qp;
2017 goto out;
2018 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002019 cqe->status = CQ_REQ_STATUS_OK;
2020 cqe++;
2021 (*budget)--;
2022 }
2023 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07002024skip:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002025 sq->hwq.cons++;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002026 if (sq->single)
2027 break;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002028 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07002029out:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002030 *pcqe = cqe;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002031 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002032 /* Out of budget */
2033 rc = -EAGAIN;
2034 goto done;
2035 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07002036 /*
2037 * Back to normal completion mode only after it has completed all of
2038 * the WC for this CQE
2039 */
2040 sq->single = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002041done:
2042 return rc;
2043}
2044
2045static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2046 struct cq_res_rc *hwcqe,
2047 struct bnxt_qplib_cqe **pcqe,
2048 int *budget)
2049{
2050 struct bnxt_qplib_qp *qp;
2051 struct bnxt_qplib_q *rq;
2052 struct bnxt_qplib_cqe *cqe;
2053 u32 wr_id_idx;
2054 int rc = 0;
2055
2056 qp = (struct bnxt_qplib_qp *)((unsigned long)
2057 le64_to_cpu(hwcqe->qp_handle));
2058 if (!qp) {
2059 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2060 return -EINVAL;
2061 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002062 if (qp->rq.flushed) {
2063 dev_dbg(&cq->hwq.pdev->dev,
2064 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2065 goto done;
2066 }
2067
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002068 cqe = *pcqe;
2069 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2070 cqe->length = le32_to_cpu(hwcqe->length);
2071 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2072 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2073 cqe->flags = le16_to_cpu(hwcqe->flags);
2074 cqe->status = hwcqe->status;
2075 cqe->qp_handle = (u64)(unsigned long)qp;
2076
2077 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2078 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2079 rq = &qp->rq;
2080 if (wr_id_idx > rq->hwq.max_elements) {
2081 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
2082 dev_err(&cq->hwq.pdev->dev,
2083 "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2084 wr_id_idx, rq->hwq.max_elements);
2085 return -EINVAL;
2086 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002087
2088 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2089 cqe++;
2090 (*budget)--;
2091 rq->hwq.cons++;
2092 *pcqe = cqe;
2093
2094 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
Selvin Xavierf218d672017-06-29 12:28:15 -07002095 /* Add qp to flush list of the CQ */
2096 bnxt_qplib_lock_buddy_cq(qp, cq);
2097 __bnxt_qplib_add_flush_qp(qp);
2098 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002099 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002100
2101done:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002102 return rc;
2103}
2104
2105static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2106 struct cq_res_ud *hwcqe,
2107 struct bnxt_qplib_cqe **pcqe,
2108 int *budget)
2109{
2110 struct bnxt_qplib_qp *qp;
2111 struct bnxt_qplib_q *rq;
2112 struct bnxt_qplib_cqe *cqe;
2113 u32 wr_id_idx;
2114 int rc = 0;
2115
2116 qp = (struct bnxt_qplib_qp *)((unsigned long)
2117 le64_to_cpu(hwcqe->qp_handle));
2118 if (!qp) {
2119 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2120 return -EINVAL;
2121 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002122 if (qp->rq.flushed) {
2123 dev_dbg(&cq->hwq.pdev->dev,
2124 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2125 goto done;
2126 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002127 cqe = *pcqe;
2128 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2129 cqe->length = le32_to_cpu(hwcqe->length);
2130 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2131 cqe->flags = le16_to_cpu(hwcqe->flags);
2132 cqe->status = hwcqe->status;
2133 cqe->qp_handle = (u64)(unsigned long)qp;
2134 memcpy(cqe->smac, hwcqe->src_mac, 6);
2135 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2136 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2137 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2138 ((le32_to_cpu(
2139 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2140 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2141
2142 rq = &qp->rq;
2143 if (wr_id_idx > rq->hwq.max_elements) {
2144 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
2145 dev_err(&cq->hwq.pdev->dev,
2146 "QPLIB: wr_id idx %#x exceeded RQ max %#x",
2147 wr_id_idx, rq->hwq.max_elements);
2148 return -EINVAL;
2149 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002150
2151 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2152 cqe++;
2153 (*budget)--;
2154 rq->hwq.cons++;
2155 *pcqe = cqe;
2156
2157 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
Selvin Xavierf218d672017-06-29 12:28:15 -07002158 /* Add qp to flush list of the CQ */
2159 bnxt_qplib_lock_buddy_cq(qp, cq);
2160 __bnxt_qplib_add_flush_qp(qp);
2161 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002162 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002163done:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002164 return rc;
2165}
2166
Selvin Xavier499e4562017-06-29 12:28:18 -07002167bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2168{
2169 struct cq_base *hw_cqe, **hw_cqe_ptr;
2170 unsigned long flags;
2171 u32 sw_cons, raw_cons;
2172 bool rc = true;
2173
2174 spin_lock_irqsave(&cq->hwq.lock, flags);
2175 raw_cons = cq->hwq.cons;
2176 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2177 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2178 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2179
2180 /* Check for Valid bit. If the CQE is valid, return false */
2181 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2182 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2183 return rc;
2184}
2185
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002186static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2187 struct cq_res_raweth_qp1 *hwcqe,
2188 struct bnxt_qplib_cqe **pcqe,
2189 int *budget)
2190{
2191 struct bnxt_qplib_qp *qp;
2192 struct bnxt_qplib_q *rq;
2193 struct bnxt_qplib_cqe *cqe;
2194 u32 wr_id_idx;
2195 int rc = 0;
2196
2197 qp = (struct bnxt_qplib_qp *)((unsigned long)
2198 le64_to_cpu(hwcqe->qp_handle));
2199 if (!qp) {
2200 dev_err(&cq->hwq.pdev->dev,
2201 "QPLIB: process_cq Raw/QP1 qp is NULL");
2202 return -EINVAL;
2203 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002204 if (qp->rq.flushed) {
2205 dev_dbg(&cq->hwq.pdev->dev,
2206 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2207 goto done;
2208 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002209 cqe = *pcqe;
2210 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2211 cqe->flags = le16_to_cpu(hwcqe->flags);
2212 cqe->qp_handle = (u64)(unsigned long)qp;
2213
2214 wr_id_idx =
2215 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2216 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2217 cqe->src_qp = qp->id;
2218 if (qp->id == 1 && !cqe->length) {
2219 /* Add workaround for the length misdetection */
2220 cqe->length = 296;
2221 } else {
2222 cqe->length = le16_to_cpu(hwcqe->length);
2223 }
2224 cqe->pkey_index = qp->pkey_index;
2225 memcpy(cqe->smac, qp->smac, 6);
2226
2227 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2228 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2229
2230 rq = &qp->rq;
2231 if (wr_id_idx > rq->hwq.max_elements) {
2232 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2233 dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
2234 wr_id_idx, rq->hwq.max_elements);
2235 return -EINVAL;
2236 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002237
2238 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2239 cqe++;
2240 (*budget)--;
2241 rq->hwq.cons++;
2242 *pcqe = cqe;
2243
2244 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
Selvin Xavierf218d672017-06-29 12:28:15 -07002245 /* Add qp to flush list of the CQ */
2246 bnxt_qplib_lock_buddy_cq(qp, cq);
2247 __bnxt_qplib_add_flush_qp(qp);
2248 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002249 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002250
2251done:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002252 return rc;
2253}
2254
2255static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2256 struct cq_terminal *hwcqe,
2257 struct bnxt_qplib_cqe **pcqe,
2258 int *budget)
2259{
2260 struct bnxt_qplib_qp *qp;
2261 struct bnxt_qplib_q *sq, *rq;
2262 struct bnxt_qplib_cqe *cqe;
2263 u32 sw_cons = 0, cqe_cons;
2264 int rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002265
2266 /* Check the Status */
2267 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2268 dev_warn(&cq->hwq.pdev->dev,
2269 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2270 hwcqe->status);
2271
2272 qp = (struct bnxt_qplib_qp *)((unsigned long)
2273 le64_to_cpu(hwcqe->qp_handle));
2274 if (!qp) {
2275 dev_err(&cq->hwq.pdev->dev,
2276 "QPLIB: FP: CQ Process terminal qp is NULL");
2277 return -EINVAL;
2278 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002279
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002280 /* Must block new posting of SQ and RQ */
2281 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2282
2283 sq = &qp->sq;
2284 rq = &qp->rq;
2285
2286 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2287 if (cqe_cons == 0xFFFF)
2288 goto do_rq;
2289
2290 if (cqe_cons > sq->hwq.max_elements) {
2291 dev_err(&cq->hwq.pdev->dev,
2292 "QPLIB: FP: CQ Process terminal reported ");
2293 dev_err(&cq->hwq.pdev->dev,
2294 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2295 cqe_cons, sq->hwq.max_elements);
2296 goto do_rq;
2297 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002298
2299 if (qp->sq.flushed) {
2300 dev_dbg(&cq->hwq.pdev->dev,
2301 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2302 goto sq_done;
2303 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002304
2305 /* Terminal CQE can also include aggregated successful CQEs prior.
2306 * So we must complete all CQEs from the current sq's cons to the
2307 * cq_cons with status OK
2308 */
2309 cqe = *pcqe;
2310 while (*budget) {
2311 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2312 if (sw_cons == cqe_cons)
2313 break;
2314 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2315 memset(cqe, 0, sizeof(*cqe));
2316 cqe->status = CQ_REQ_STATUS_OK;
2317 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2318 cqe->qp_handle = (u64)(unsigned long)qp;
2319 cqe->src_qp = qp->id;
2320 cqe->wr_id = sq->swq[sw_cons].wr_id;
2321 cqe->type = sq->swq[sw_cons].type;
2322 cqe++;
2323 (*budget)--;
2324 }
2325 sq->hwq.cons++;
2326 }
2327 *pcqe = cqe;
2328 if (!(*budget) && sw_cons != cqe_cons) {
2329 /* Out of budget */
2330 rc = -EAGAIN;
2331 goto sq_done;
2332 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002333sq_done:
2334 if (rc)
2335 return rc;
2336do_rq:
2337 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2338 if (cqe_cons == 0xFFFF) {
2339 goto done;
2340 } else if (cqe_cons > rq->hwq.max_elements) {
2341 dev_err(&cq->hwq.pdev->dev,
2342 "QPLIB: FP: CQ Processed terminal ");
2343 dev_err(&cq->hwq.pdev->dev,
2344 "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2345 cqe_cons, rq->hwq.max_elements);
2346 goto done;
2347 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002348
2349 if (qp->rq.flushed) {
2350 dev_dbg(&cq->hwq.pdev->dev,
2351 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2352 rc = 0;
2353 goto done;
2354 }
2355
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002356 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2357 * from the current rq->cons to the rq->prod regardless what the
2358 * rq->cons the terminal CQE indicates
2359 */
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002360
Selvin Xavierf218d672017-06-29 12:28:15 -07002361 /* Add qp to flush list of the CQ */
2362 bnxt_qplib_lock_buddy_cq(qp, cq);
2363 __bnxt_qplib_add_flush_qp(qp);
2364 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002365done:
2366 return rc;
2367}
2368
2369static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2370 struct cq_cutoff *hwcqe)
2371{
2372 /* Check the Status */
2373 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2374 dev_err(&cq->hwq.pdev->dev,
2375 "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2376 hwcqe->status);
2377 return -EINVAL;
2378 }
2379 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2380 wake_up_interruptible(&cq->waitq);
2381
2382 return 0;
2383}
2384
Selvin Xavierf218d672017-06-29 12:28:15 -07002385int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2386 struct bnxt_qplib_cqe *cqe,
2387 int num_cqes)
2388{
2389 struct bnxt_qplib_qp *qp = NULL;
2390 u32 budget = num_cqes;
2391 unsigned long flags;
2392
2393 spin_lock_irqsave(&cq->hwq.lock, flags);
2394 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2395 dev_dbg(&cq->hwq.pdev->dev,
2396 "QPLIB: FP: Flushing SQ QP= %p",
2397 qp);
2398 __flush_sq(&qp->sq, qp, &cqe, &budget);
2399 }
2400
2401 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2402 dev_dbg(&cq->hwq.pdev->dev,
2403 "QPLIB: FP: Flushing RQ QP= %p",
2404 qp);
2405 __flush_rq(&qp->rq, qp, &cqe, &budget);
2406 }
2407 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2408
2409 return num_cqes - budget;
2410}
2411
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002412int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
Eddie Wai9152e0b2017-06-14 03:26:23 -07002413 int num_cqes, struct bnxt_qplib_qp **lib_qp)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002414{
2415 struct cq_base *hw_cqe, **hw_cqe_ptr;
2416 unsigned long flags;
2417 u32 sw_cons, raw_cons;
2418 int budget, rc = 0;
2419
2420 spin_lock_irqsave(&cq->hwq.lock, flags);
2421 raw_cons = cq->hwq.cons;
2422 budget = num_cqes;
2423
2424 while (budget) {
2425 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2426 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2427 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2428
2429 /* Check for Valid bit */
2430 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2431 break;
2432
2433 /* From the device's respective CQE format to qplib_wc*/
2434 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2435 case CQ_BASE_CQE_TYPE_REQ:
2436 rc = bnxt_qplib_cq_process_req(cq,
2437 (struct cq_req *)hw_cqe,
Eddie Wai9152e0b2017-06-14 03:26:23 -07002438 &cqe, &budget,
2439 sw_cons, lib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002440 break;
2441 case CQ_BASE_CQE_TYPE_RES_RC:
2442 rc = bnxt_qplib_cq_process_res_rc(cq,
2443 (struct cq_res_rc *)
2444 hw_cqe, &cqe,
2445 &budget);
2446 break;
2447 case CQ_BASE_CQE_TYPE_RES_UD:
2448 rc = bnxt_qplib_cq_process_res_ud
2449 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2450 &budget);
2451 break;
2452 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2453 rc = bnxt_qplib_cq_process_res_raweth_qp1
2454 (cq, (struct cq_res_raweth_qp1 *)
2455 hw_cqe, &cqe, &budget);
2456 break;
2457 case CQ_BASE_CQE_TYPE_TERMINAL:
2458 rc = bnxt_qplib_cq_process_terminal
2459 (cq, (struct cq_terminal *)hw_cqe,
2460 &cqe, &budget);
2461 break;
2462 case CQ_BASE_CQE_TYPE_CUT_OFF:
2463 bnxt_qplib_cq_process_cutoff
2464 (cq, (struct cq_cutoff *)hw_cqe);
2465 /* Done processing this CQ */
2466 goto exit;
2467 default:
2468 dev_err(&cq->hwq.pdev->dev,
2469 "QPLIB: process_cq unknown type 0x%lx",
2470 hw_cqe->cqe_type_toggle &
2471 CQ_BASE_CQE_TYPE_MASK);
2472 rc = -EINVAL;
2473 break;
2474 }
2475 if (rc < 0) {
2476 if (rc == -EAGAIN)
2477 break;
2478 /* Error while processing the CQE, just skip to the
2479 * next one
2480 */
2481 dev_err(&cq->hwq.pdev->dev,
2482 "QPLIB: process_cqe error rc = 0x%x", rc);
2483 }
2484 raw_cons++;
2485 }
2486 if (cq->hwq.cons != raw_cons) {
2487 cq->hwq.cons = raw_cons;
2488 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2489 }
2490exit:
2491 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2492 return num_cqes - budget;
2493}
2494
2495void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2496{
2497 unsigned long flags;
2498
2499 spin_lock_irqsave(&cq->hwq.lock, flags);
2500 if (arm_type)
2501 bnxt_qplib_arm_cq(cq, arm_type);
Selvin Xavierf218d672017-06-29 12:28:15 -07002502 /* Using cq->arm_state variable to track whether to issue cq handler */
2503 atomic_set(&cq->arm_state, 1);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002504 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2505}