blob: eb7195c20b88f06761b983e491d989eeebd55005 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39#include <linux/interrupt.h>
40#include <linux/spinlock.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/pci.h>
44#include <linux/prefetch.h>
45
46#include "roce_hsi.h"
47
48#include "qplib_res.h"
49#include "qplib_rcfw.h"
50#include "qplib_sp.h"
51#include "qplib_fp.h"
52
53static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
Selvin Xavierf218d672017-06-29 12:28:15 -070054static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55
56static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
57{
58 qp->sq.condition = false;
59 qp->sq.send_phantom = false;
60 qp->sq.single = false;
61}
62
63/* Flush list */
64static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
65{
66 struct bnxt_qplib_cq *scq, *rcq;
67
68 scq = qp->scq;
69 rcq = qp->rcq;
70
71 if (!qp->sq.flushed) {
72 dev_dbg(&scq->hwq.pdev->dev,
73 "QPLIB: FP: Adding to SQ Flush list = %p",
74 qp);
75 bnxt_qplib_cancel_phantom_processing(qp);
76 list_add_tail(&qp->sq_flush, &scq->sqf_head);
77 qp->sq.flushed = true;
78 }
79 if (!qp->srq) {
80 if (!qp->rq.flushed) {
81 dev_dbg(&rcq->hwq.pdev->dev,
82 "QPLIB: FP: Adding to RQ Flush list = %p",
83 qp);
84 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
85 qp->rq.flushed = true;
86 }
87 }
88}
89
90void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
91 unsigned long *flags)
92 __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
93{
94 spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
95 if (qp->scq == qp->rcq)
96 __acquire(&qp->rcq->hwq.lock);
97 else
98 spin_lock(&qp->rcq->hwq.lock);
99}
100
101void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
102 unsigned long *flags)
103 __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
104{
105 if (qp->scq == qp->rcq)
106 __release(&qp->rcq->hwq.lock);
107 else
108 spin_unlock(&qp->rcq->hwq.lock);
109 spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
110}
111
112static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
113 struct bnxt_qplib_cq *cq)
114{
115 struct bnxt_qplib_cq *buddy_cq = NULL;
116
117 if (qp->scq == qp->rcq)
118 buddy_cq = NULL;
119 else if (qp->scq == cq)
120 buddy_cq = qp->rcq;
121 else
122 buddy_cq = qp->scq;
123 return buddy_cq;
124}
125
126static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
127 struct bnxt_qplib_cq *cq)
128 __acquires(&buddy_cq->hwq.lock)
129{
130 struct bnxt_qplib_cq *buddy_cq = NULL;
131
132 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
133 if (!buddy_cq)
134 __acquire(&cq->hwq.lock);
135 else
136 spin_lock(&buddy_cq->hwq.lock);
137}
138
139static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
140 struct bnxt_qplib_cq *cq)
141 __releases(&buddy_cq->hwq.lock)
142{
143 struct bnxt_qplib_cq *buddy_cq = NULL;
144
145 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
146 if (!buddy_cq)
147 __release(&cq->hwq.lock);
148 else
149 spin_unlock(&buddy_cq->hwq.lock);
150}
151
152void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
153{
154 unsigned long flags;
155
156 bnxt_qplib_acquire_cq_locks(qp, &flags);
157 __bnxt_qplib_add_flush_qp(qp);
158 bnxt_qplib_release_cq_locks(qp, &flags);
159}
160
161static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
162{
Selvin Xavierf218d672017-06-29 12:28:15 -0700163 if (qp->sq.flushed) {
164 qp->sq.flushed = false;
165 list_del(&qp->sq_flush);
166 }
167 if (!qp->srq) {
168 if (qp->rq.flushed) {
169 qp->rq.flushed = false;
170 list_del(&qp->rq_flush);
171 }
172 }
173}
174
175void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
176{
177 unsigned long flags;
178
179 bnxt_qplib_acquire_cq_locks(qp, &flags);
180 __clean_cq(qp->scq, (u64)(unsigned long)qp);
181 qp->sq.hwq.prod = 0;
182 qp->sq.hwq.cons = 0;
183 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
184 qp->rq.hwq.prod = 0;
185 qp->rq.hwq.cons = 0;
186
187 __bnxt_qplib_del_flush_qp(qp);
188 bnxt_qplib_release_cq_locks(qp, &flags);
189}
190
191static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
192{
193 struct bnxt_qplib_nq_work *nq_work =
194 container_of(work, struct bnxt_qplib_nq_work, work);
195
196 struct bnxt_qplib_cq *cq = nq_work->cq;
197 struct bnxt_qplib_nq *nq = nq_work->nq;
198
199 if (cq && nq) {
200 spin_lock_bh(&cq->compl_lock);
201 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
202 dev_dbg(&nq->pdev->dev,
203 "%s:Trigger cq = %p event nq = %p\n",
204 __func__, cq, nq);
205 nq->cqn_handler(nq, cq);
206 }
207 spin_unlock_bh(&cq->compl_lock);
208 }
209 kfree(nq_work);
210}
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800211
212static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
213 struct bnxt_qplib_qp *qp)
214{
215 struct bnxt_qplib_q *rq = &qp->rq;
216 struct bnxt_qplib_q *sq = &qp->sq;
217
218 if (qp->rq_hdr_buf)
219 dma_free_coherent(&res->pdev->dev,
220 rq->hwq.max_elements * qp->rq_hdr_buf_size,
221 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
222 if (qp->sq_hdr_buf)
223 dma_free_coherent(&res->pdev->dev,
224 sq->hwq.max_elements * qp->sq_hdr_buf_size,
225 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
226 qp->rq_hdr_buf = NULL;
227 qp->sq_hdr_buf = NULL;
228 qp->rq_hdr_buf_map = 0;
229 qp->sq_hdr_buf_map = 0;
230 qp->sq_hdr_buf_size = 0;
231 qp->rq_hdr_buf_size = 0;
232}
233
234static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
235 struct bnxt_qplib_qp *qp)
236{
237 struct bnxt_qplib_q *rq = &qp->rq;
238 struct bnxt_qplib_q *sq = &qp->rq;
239 int rc = 0;
240
241 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
242 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
243 sq->hwq.max_elements *
244 qp->sq_hdr_buf_size,
245 &qp->sq_hdr_buf_map, GFP_KERNEL);
246 if (!qp->sq_hdr_buf) {
247 rc = -ENOMEM;
248 dev_err(&res->pdev->dev,
249 "QPLIB: Failed to create sq_hdr_buf");
250 goto fail;
251 }
252 }
253
254 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
255 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
256 rq->hwq.max_elements *
257 qp->rq_hdr_buf_size,
258 &qp->rq_hdr_buf_map,
259 GFP_KERNEL);
260 if (!qp->rq_hdr_buf) {
261 rc = -ENOMEM;
262 dev_err(&res->pdev->dev,
263 "QPLIB: Failed to create rq_hdr_buf");
264 goto fail;
265 }
266 }
267 return 0;
268
269fail:
270 bnxt_qplib_free_qp_hdr_buf(res, qp);
271 return rc;
272}
273
274static void bnxt_qplib_service_nq(unsigned long data)
275{
276 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
277 struct bnxt_qplib_hwq *hwq = &nq->hwq;
278 struct nq_base *nqe, **nq_ptr;
Selvin Xavierf218d672017-06-29 12:28:15 -0700279 struct bnxt_qplib_cq *cq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800280 int num_cqne_processed = 0;
281 u32 sw_cons, raw_cons;
282 u16 type;
283 int budget = nq->budget;
284 u64 q_handle;
285
286 /* Service the NQ until empty */
287 raw_cons = hwq->cons;
288 while (budget--) {
289 sw_cons = HWQ_CMP(raw_cons, hwq);
290 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
291 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
292 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
293 break;
294
Somnath Kotur9b401832017-11-06 08:07:29 -0800295 /*
296 * The valid test of the entry must be done first before
297 * reading any further.
298 */
299 dma_rmb();
300
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800301 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
302 switch (type) {
303 case NQ_BASE_TYPE_CQ_NOTIFICATION:
304 {
305 struct nq_cn *nqcne = (struct nq_cn *)nqe;
306
307 q_handle = le32_to_cpu(nqcne->cq_handle_low);
308 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
309 << 32;
Selvin Xavierf218d672017-06-29 12:28:15 -0700310 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
311 bnxt_qplib_arm_cq_enable(cq);
312 spin_lock_bh(&cq->compl_lock);
313 atomic_set(&cq->arm_state, 0);
314 if (!nq->cqn_handler(nq, (cq)))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800315 num_cqne_processed++;
316 else
317 dev_warn(&nq->pdev->dev,
318 "QPLIB: cqn - type 0x%x not handled",
319 type);
Selvin Xavierf218d672017-06-29 12:28:15 -0700320 spin_unlock_bh(&cq->compl_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800321 break;
322 }
323 case NQ_BASE_TYPE_DBQ_EVENT:
324 break;
325 default:
326 dev_warn(&nq->pdev->dev,
327 "QPLIB: nqe with type = 0x%x not handled",
328 type);
329 break;
330 }
331 raw_cons++;
332 }
333 if (hwq->cons != raw_cons) {
334 hwq->cons = raw_cons;
335 NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
336 }
337}
338
339static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
340{
341 struct bnxt_qplib_nq *nq = dev_instance;
342 struct bnxt_qplib_hwq *hwq = &nq->hwq;
343 struct nq_base **nq_ptr;
344 u32 sw_cons;
345
346 /* Prefetch the NQ element */
347 sw_cons = HWQ_CMP(hwq->cons, hwq);
348 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
349 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
350
351 /* Fan out to CPU affinitized kthreads? */
352 tasklet_schedule(&nq->worker);
353
354 return IRQ_HANDLED;
355}
356
357void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
358{
Selvin Xavierf218d672017-06-29 12:28:15 -0700359 if (nq->cqn_wq) {
360 destroy_workqueue(nq->cqn_wq);
361 nq->cqn_wq = NULL;
362 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800363 /* Make sure the HW is stopped! */
364 synchronize_irq(nq->vector);
365 tasklet_disable(&nq->worker);
366 tasklet_kill(&nq->worker);
367
368 if (nq->requested) {
Selvin Xavier6a5df912017-08-02 01:46:18 -0700369 irq_set_affinity_hint(nq->vector, NULL);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800370 free_irq(nq->vector, nq);
371 nq->requested = false;
372 }
373 if (nq->bar_reg_iomem)
374 iounmap(nq->bar_reg_iomem);
375 nq->bar_reg_iomem = NULL;
376
377 nq->cqn_handler = NULL;
378 nq->srqn_handler = NULL;
379 nq->vector = 0;
380}
381
382int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
Selvin Xavier6a5df912017-08-02 01:46:18 -0700383 int nq_idx, int msix_vector, int bar_reg_offset,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800384 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
385 struct bnxt_qplib_cq *),
386 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
387 void *, u8 event))
388{
389 resource_size_t nq_base;
Selvin Xavierf218d672017-06-29 12:28:15 -0700390 int rc = -1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800391
392 nq->pdev = pdev;
393 nq->vector = msix_vector;
394
395 nq->cqn_handler = cqn_handler;
396
397 nq->srqn_handler = srqn_handler;
398
399 tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
400
Selvin Xavierf218d672017-06-29 12:28:15 -0700401 /* Have a task to schedule CQ notifiers in post send case */
402 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
403 if (!nq->cqn_wq)
404 goto fail;
405
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800406 nq->requested = false;
Selvin Xavier6a5df912017-08-02 01:46:18 -0700407 memset(nq->name, 0, 32);
408 sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
409 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800410 if (rc) {
411 dev_err(&nq->pdev->dev,
412 "Failed to request IRQ for NQ: %#x", rc);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800413 goto fail;
414 }
Selvin Xavier6a5df912017-08-02 01:46:18 -0700415
416 cpumask_clear(&nq->mask);
417 cpumask_set_cpu(nq_idx, &nq->mask);
418 rc = irq_set_affinity_hint(nq->vector, &nq->mask);
419 if (rc) {
420 dev_warn(&nq->pdev->dev,
421 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
422 nq->vector, nq_idx);
423 }
424
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800425 nq->requested = true;
426 nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
427 nq->bar_reg_off = bar_reg_offset;
428 nq_base = pci_resource_start(pdev, nq->bar_reg);
429 if (!nq_base) {
430 rc = -ENOMEM;
431 goto fail;
432 }
433 nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
434 if (!nq->bar_reg_iomem) {
435 rc = -ENOMEM;
436 goto fail;
437 }
438 NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
439
440 return 0;
441fail:
442 bnxt_qplib_disable_nq(nq);
443 return rc;
444}
445
446void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
447{
Selvin Xavier6a5df912017-08-02 01:46:18 -0700448 if (nq->hwq.max_elements) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800449 bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
Selvin Xavier6a5df912017-08-02 01:46:18 -0700450 nq->hwq.max_elements = 0;
451 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800452}
453
454int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
455{
456 nq->pdev = pdev;
457 if (!nq->hwq.max_elements ||
458 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
459 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
460
461 if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
462 &nq->hwq.max_elements,
463 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
464 PAGE_SIZE, HWQ_TYPE_L2_CMPL))
465 return -ENOMEM;
466
467 nq->budget = 8;
468 return 0;
469}
470
471/* QP */
472int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
473{
474 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
475 struct cmdq_create_qp1 req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700476 struct creq_create_qp1_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800477 struct bnxt_qplib_pbl *pbl;
478 struct bnxt_qplib_q *sq = &qp->sq;
479 struct bnxt_qplib_q *rq = &qp->rq;
480 int rc;
481 u16 cmd_flags = 0;
482 u32 qp_flags = 0;
483
484 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
485
486 /* General */
487 req.type = qp->type;
488 req.dpi = cpu_to_le32(qp->dpi->dpi);
489 req.qp_handle = cpu_to_le64(qp->qp_handle);
490
491 /* SQ */
492 sq->hwq.max_elements = sq->max_wqe;
493 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
494 &sq->hwq.max_elements,
495 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
496 PAGE_SIZE, HWQ_TYPE_QUEUE);
497 if (rc)
498 goto exit;
499
500 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
501 if (!sq->swq) {
502 rc = -ENOMEM;
503 goto fail_sq;
504 }
505 pbl = &sq->hwq.pbl[PBL_LVL_0];
506 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
507 req.sq_pg_size_sq_lvl =
508 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
509 << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
510 (pbl->pg_size == ROCE_PG_SIZE_4K ?
511 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
512 pbl->pg_size == ROCE_PG_SIZE_8K ?
513 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
514 pbl->pg_size == ROCE_PG_SIZE_64K ?
515 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
516 pbl->pg_size == ROCE_PG_SIZE_2M ?
517 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
518 pbl->pg_size == ROCE_PG_SIZE_8M ?
519 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
520 pbl->pg_size == ROCE_PG_SIZE_1G ?
521 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
522 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
523
524 if (qp->scq)
525 req.scq_cid = cpu_to_le32(qp->scq->id);
526
527 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
528
529 /* RQ */
530 if (rq->max_wqe) {
531 rq->hwq.max_elements = qp->rq.max_wqe;
532 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
533 &rq->hwq.max_elements,
534 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
535 PAGE_SIZE, HWQ_TYPE_QUEUE);
536 if (rc)
537 goto fail_sq;
538
539 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
540 GFP_KERNEL);
541 if (!rq->swq) {
542 rc = -ENOMEM;
543 goto fail_rq;
544 }
545 pbl = &rq->hwq.pbl[PBL_LVL_0];
546 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
547 req.rq_pg_size_rq_lvl =
548 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
549 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
550 (pbl->pg_size == ROCE_PG_SIZE_4K ?
551 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
552 pbl->pg_size == ROCE_PG_SIZE_8K ?
553 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
554 pbl->pg_size == ROCE_PG_SIZE_64K ?
555 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
556 pbl->pg_size == ROCE_PG_SIZE_2M ?
557 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
558 pbl->pg_size == ROCE_PG_SIZE_8M ?
559 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
560 pbl->pg_size == ROCE_PG_SIZE_1G ?
561 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
562 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
563 if (qp->rcq)
564 req.rcq_cid = cpu_to_le32(qp->rcq->id);
565 }
566
567 /* Header buffer - allow hdr_buf pass in */
568 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
569 if (rc) {
570 rc = -ENOMEM;
571 goto fail;
572 }
573 req.qp_flags = cpu_to_le32(qp_flags);
574 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
575 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
576
577 req.sq_fwo_sq_sge =
578 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
579 CMDQ_CREATE_QP1_SQ_SGE_SFT);
580 req.rq_fwo_rq_sge =
581 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
582 CMDQ_CREATE_QP1_RQ_SGE_SFT);
583
584 req.pd_id = cpu_to_le32(qp->pd->id);
585
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700586 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
587 (void *)&resp, NULL, 0);
588 if (rc)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800589 goto fail;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700590
591 qp->id = le32_to_cpu(resp.xid);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800592 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
Selvin Xavierf218d672017-06-29 12:28:15 -0700593 rcfw->qp_tbl[qp->id].qp_id = qp->id;
594 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800595
596 return 0;
597
598fail:
599 bnxt_qplib_free_qp_hdr_buf(res, qp);
600fail_rq:
601 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
602 kfree(rq->swq);
603fail_sq:
604 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
605 kfree(sq->swq);
606exit:
607 return rc;
608}
609
610int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
611{
612 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
613 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
614 struct cmdq_create_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700615 struct creq_create_qp_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800616 struct bnxt_qplib_pbl *pbl;
617 struct sq_psn_search **psn_search_ptr;
618 unsigned long int psn_search, poff = 0;
619 struct bnxt_qplib_q *sq = &qp->sq;
620 struct bnxt_qplib_q *rq = &qp->rq;
621 struct bnxt_qplib_hwq *xrrq;
622 int i, rc, req_size, psn_sz;
623 u16 cmd_flags = 0, max_ssge;
624 u32 sw_prod, qp_flags = 0;
625
626 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
627
628 /* General */
629 req.type = qp->type;
630 req.dpi = cpu_to_le32(qp->dpi->dpi);
631 req.qp_handle = cpu_to_le64(qp->qp_handle);
632
633 /* SQ */
634 psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
635 sizeof(struct sq_psn_search) : 0;
636 sq->hwq.max_elements = sq->max_wqe;
637 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
638 sq->nmap, &sq->hwq.max_elements,
639 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
640 psn_sz,
641 PAGE_SIZE, HWQ_TYPE_QUEUE);
642 if (rc)
643 goto exit;
644
645 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
646 if (!sq->swq) {
647 rc = -ENOMEM;
648 goto fail_sq;
649 }
650 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
651 if (psn_sz) {
652 psn_search_ptr = (struct sq_psn_search **)
653 &hw_sq_send_ptr[get_sqe_pg
654 (sq->hwq.max_elements)];
655 psn_search = (unsigned long int)
656 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
657 [get_sqe_idx(sq->hwq.max_elements)];
658 if (psn_search & ~PAGE_MASK) {
659 /* If the psn_search does not start on a page boundary,
660 * then calculate the offset
661 */
662 poff = (psn_search & ~PAGE_MASK) /
663 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
664 }
665 for (i = 0; i < sq->hwq.max_elements; i++)
666 sq->swq[i].psn_search =
667 &psn_search_ptr[get_psne_pg(i + poff)]
668 [get_psne_idx(i + poff)];
669 }
670 pbl = &sq->hwq.pbl[PBL_LVL_0];
671 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
672 req.sq_pg_size_sq_lvl =
673 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
674 << CMDQ_CREATE_QP_SQ_LVL_SFT) |
675 (pbl->pg_size == ROCE_PG_SIZE_4K ?
676 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
677 pbl->pg_size == ROCE_PG_SIZE_8K ?
678 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
679 pbl->pg_size == ROCE_PG_SIZE_64K ?
680 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
681 pbl->pg_size == ROCE_PG_SIZE_2M ?
682 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
683 pbl->pg_size == ROCE_PG_SIZE_8M ?
684 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
685 pbl->pg_size == ROCE_PG_SIZE_1G ?
686 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
687 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
688
689 /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
690 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
691 for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
692 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
693 [get_sqe_idx(sw_prod)];
694 hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
695 }
696
697 if (qp->scq)
698 req.scq_cid = cpu_to_le32(qp->scq->id);
699
700 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
701 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
702 if (qp->sig_type)
703 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
704
705 /* RQ */
706 if (rq->max_wqe) {
707 rq->hwq.max_elements = rq->max_wqe;
708 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
709 rq->nmap, &rq->hwq.max_elements,
710 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
711 PAGE_SIZE, HWQ_TYPE_QUEUE);
712 if (rc)
713 goto fail_sq;
714
715 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
716 GFP_KERNEL);
717 if (!rq->swq) {
718 rc = -ENOMEM;
719 goto fail_rq;
720 }
721 pbl = &rq->hwq.pbl[PBL_LVL_0];
722 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
723 req.rq_pg_size_rq_lvl =
724 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
725 CMDQ_CREATE_QP_RQ_LVL_SFT) |
726 (pbl->pg_size == ROCE_PG_SIZE_4K ?
727 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
728 pbl->pg_size == ROCE_PG_SIZE_8K ?
729 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
730 pbl->pg_size == ROCE_PG_SIZE_64K ?
731 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
732 pbl->pg_size == ROCE_PG_SIZE_2M ?
733 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
734 pbl->pg_size == ROCE_PG_SIZE_8M ?
735 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
736 pbl->pg_size == ROCE_PG_SIZE_1G ?
737 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
738 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
739 }
740
741 if (qp->rcq)
742 req.rcq_cid = cpu_to_le32(qp->rcq->id);
743 req.qp_flags = cpu_to_le32(qp_flags);
744 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
745 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
746 qp->sq_hdr_buf = NULL;
747 qp->rq_hdr_buf = NULL;
748
749 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
750 if (rc)
751 goto fail_rq;
752
753 /* CTRL-22434: Irrespective of the requested SGE count on the SQ
754 * always create the QP with max send sges possible if the requested
755 * inline size is greater than 0.
756 */
757 max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
758 req.sq_fwo_sq_sge = cpu_to_le16(
759 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
760 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
761 req.rq_fwo_rq_sge = cpu_to_le16(
762 ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
763 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
764 /* ORRQ and IRRQ */
765 if (psn_sz) {
766 xrrq = &qp->orrq;
767 xrrq->max_elements =
768 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
769 req_size = xrrq->max_elements *
770 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
771 req_size &= ~(PAGE_SIZE - 1);
772 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
773 &xrrq->max_elements,
774 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
775 0, req_size, HWQ_TYPE_CTX);
776 if (rc)
777 goto fail_buf_free;
778 pbl = &xrrq->pbl[PBL_LVL_0];
779 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
780
781 xrrq = &qp->irrq;
782 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
783 qp->max_dest_rd_atomic);
784 req_size = xrrq->max_elements *
785 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
786 req_size &= ~(PAGE_SIZE - 1);
787
788 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
789 &xrrq->max_elements,
790 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
791 0, req_size, HWQ_TYPE_CTX);
792 if (rc)
793 goto fail_orrq;
794
795 pbl = &xrrq->pbl[PBL_LVL_0];
796 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
797 }
798 req.pd_id = cpu_to_le32(qp->pd->id);
799
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700800 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
801 (void *)&resp, NULL, 0);
802 if (rc)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800803 goto fail;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700804
805 qp->id = le32_to_cpu(resp.xid);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800806 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
Selvin Xavierf218d672017-06-29 12:28:15 -0700807 INIT_LIST_HEAD(&qp->sq_flush);
808 INIT_LIST_HEAD(&qp->rq_flush);
809 rcfw->qp_tbl[qp->id].qp_id = qp->id;
810 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800811
812 return 0;
813
814fail:
815 if (qp->irrq.max_elements)
816 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
817fail_orrq:
818 if (qp->orrq.max_elements)
819 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
820fail_buf_free:
821 bnxt_qplib_free_qp_hdr_buf(res, qp);
822fail_rq:
823 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
824 kfree(rq->swq);
825fail_sq:
826 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
827 kfree(sq->swq);
828exit:
829 return rc;
830}
831
832static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
833{
834 switch (qp->state) {
835 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
836 /* INIT->RTR, configure the path_mtu to the default
837 * 2048 if not being requested
838 */
839 if (!(qp->modify_flags &
840 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
841 qp->modify_flags |=
842 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
843 qp->path_mtu =
844 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
845 }
846 qp->modify_flags &=
847 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
848 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
849 if (qp->max_dest_rd_atomic < 1)
850 qp->max_dest_rd_atomic = 1;
851 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
852 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
853 if (!(qp->modify_flags &
854 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
855 qp->modify_flags |=
856 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
857 qp->ah.sgid_index = 0;
858 }
859 break;
860 default:
861 break;
862 }
863}
864
865static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
866{
867 switch (qp->state) {
868 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
869 /* Bono FW requires the max_rd_atomic to be >= 1 */
870 if (qp->max_rd_atomic < 1)
871 qp->max_rd_atomic = 1;
872 /* Bono FW does not allow PKEY_INDEX,
873 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
874 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
875 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
876 * modification
877 */
878 qp->modify_flags &=
879 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
880 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
881 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
882 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
883 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
884 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
885 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
886 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
887 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
888 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
889 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
890 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
891 break;
892 default:
893 break;
894 }
895}
896
897static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
898{
899 switch (qp->cur_qp_state) {
900 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
901 break;
902 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
903 __modify_flags_from_init_state(qp);
904 break;
905 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
906 __modify_flags_from_rtr_state(qp);
907 break;
908 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
909 break;
910 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
911 break;
912 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
913 break;
914 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
915 break;
916 default:
917 break;
918 }
919}
920
921int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
922{
923 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
924 struct cmdq_modify_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700925 struct creq_modify_qp_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800926 u16 cmd_flags = 0, pkey;
927 u32 temp32[4];
928 u32 bmask;
Devesh Sharmacc1ec762017-05-22 03:15:31 -0700929 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800930
931 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
932
933 /* Filter out the qp_attr_mask based on the state->new transition */
934 __filter_modify_flags(qp);
935 bmask = qp->modify_flags;
936 req.modify_mask = cpu_to_le32(qp->modify_flags);
937 req.qp_cid = cpu_to_le32(qp->id);
938 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
939 req.network_type_en_sqd_async_notify_new_state =
940 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
941 (qp->en_sqd_async_notify ?
942 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
943 }
944 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
945
946 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
947 req.access = qp->access;
948
949 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
950 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
951 qp->pkey_index, &pkey))
952 req.pkey = cpu_to_le16(pkey);
953 }
954 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
955 req.qkey = cpu_to_le32(qp->qkey);
956
957 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
958 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
959 req.dgid[0] = cpu_to_le32(temp32[0]);
960 req.dgid[1] = cpu_to_le32(temp32[1]);
961 req.dgid[2] = cpu_to_le32(temp32[2]);
962 req.dgid[3] = cpu_to_le32(temp32[3]);
963 }
964 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
965 req.flow_label = cpu_to_le32(qp->ah.flow_label);
966
967 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
968 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
969 [qp->ah.sgid_index]);
970
971 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
972 req.hop_limit = qp->ah.hop_limit;
973
974 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
975 req.traffic_class = qp->ah.traffic_class;
976
977 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
978 memcpy(req.dest_mac, qp->ah.dmac, 6);
979
980 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
981 req.path_mtu = qp->path_mtu;
982
983 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
984 req.timeout = qp->timeout;
985
986 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
987 req.retry_cnt = qp->retry_cnt;
988
989 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
990 req.rnr_retry = qp->rnr_retry;
991
992 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
993 req.min_rnr_timer = qp->min_rnr_timer;
994
995 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
996 req.rq_psn = cpu_to_le32(qp->rq.psn);
997
998 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
999 req.sq_psn = cpu_to_le32(qp->sq.psn);
1000
1001 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1002 req.max_rd_atomic =
1003 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1004
1005 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1006 req.max_dest_rd_atomic =
1007 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1008
1009 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1010 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1011 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1012 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1013 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1014 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1015 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1016
1017 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1018
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001019 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1020 (void *)&resp, NULL, 0);
1021 if (rc)
1022 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001023 qp->cur_qp_state = qp->state;
1024 return 0;
1025}
1026
1027int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1028{
1029 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1030 struct cmdq_query_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001031 struct creq_query_qp_resp resp;
1032 struct bnxt_qplib_rcfw_sbuf *sbuf;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001033 struct creq_query_qp_resp_sb *sb;
1034 u16 cmd_flags = 0;
1035 u32 temp32[4];
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001036 int i, rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001037
1038 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1039
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001040 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1041 if (!sbuf)
1042 return -ENOMEM;
1043 sb = sbuf->sb;
1044
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001045 req.qp_cid = cpu_to_le32(qp->id);
1046 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001047 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1048 (void *)sbuf, 0);
1049 if (rc)
1050 goto bail;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001051 /* Extract the context from the side buffer */
1052 qp->state = sb->en_sqd_async_notify_state &
1053 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1054 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1055 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1056 true : false;
1057 qp->access = sb->access;
1058 qp->pkey_index = le16_to_cpu(sb->pkey);
1059 qp->qkey = le32_to_cpu(sb->qkey);
1060
1061 temp32[0] = le32_to_cpu(sb->dgid[0]);
1062 temp32[1] = le32_to_cpu(sb->dgid[1]);
1063 temp32[2] = le32_to_cpu(sb->dgid[2]);
1064 temp32[3] = le32_to_cpu(sb->dgid[3]);
1065 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1066
1067 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1068
1069 qp->ah.sgid_index = 0;
1070 for (i = 0; i < res->sgid_tbl.max; i++) {
1071 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1072 qp->ah.sgid_index = i;
1073 break;
1074 }
1075 }
1076 if (i == res->sgid_tbl.max)
1077 dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1078
1079 qp->ah.hop_limit = sb->hop_limit;
1080 qp->ah.traffic_class = sb->traffic_class;
1081 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1082 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1083 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1084 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1085 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1086 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1087 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1088 qp->timeout = sb->timeout;
1089 qp->retry_cnt = sb->retry_cnt;
1090 qp->rnr_retry = sb->rnr_retry;
1091 qp->min_rnr_timer = sb->min_rnr_timer;
1092 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1093 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1094 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1095 qp->max_dest_rd_atomic =
1096 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1097 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1098 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1099 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1100 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1101 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1102 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1103 memcpy(qp->smac, sb->src_mac, 6);
1104 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001105bail:
1106 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1107 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001108}
1109
1110static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1111{
1112 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1113 struct cq_base *hw_cqe, **hw_cqe_ptr;
1114 int i;
1115
1116 for (i = 0; i < cq_hwq->max_elements; i++) {
1117 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1118 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1119 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1120 continue;
Somnath Kotur9b401832017-11-06 08:07:29 -08001121 /*
1122 * The valid test of the entry must be done first before
1123 * reading any further.
1124 */
1125 dma_rmb();
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001126 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1127 case CQ_BASE_CQE_TYPE_REQ:
1128 case CQ_BASE_CQE_TYPE_TERMINAL:
1129 {
1130 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1131
1132 if (qp == le64_to_cpu(cqe->qp_handle))
1133 cqe->qp_handle = 0;
1134 break;
1135 }
1136 case CQ_BASE_CQE_TYPE_RES_RC:
1137 case CQ_BASE_CQE_TYPE_RES_UD:
1138 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1139 {
1140 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1141
1142 if (qp == le64_to_cpu(cqe->qp_handle))
1143 cqe->qp_handle = 0;
1144 break;
1145 }
1146 default:
1147 break;
1148 }
1149 }
1150}
1151
1152int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1153 struct bnxt_qplib_qp *qp)
1154{
1155 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1156 struct cmdq_destroy_qp req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001157 struct creq_destroy_qp_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001158 unsigned long flags;
1159 u16 cmd_flags = 0;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001160 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001161
Selvin Xavierf218d672017-06-29 12:28:15 -07001162 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1163 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1164
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001165 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1166
1167 req.qp_cid = cpu_to_le32(qp->id);
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001168 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1169 (void *)&resp, NULL, 0);
Selvin Xavierf218d672017-06-29 12:28:15 -07001170 if (rc) {
1171 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1172 rcfw->qp_tbl[qp->id].qp_handle = qp;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001173 return rc;
Selvin Xavierf218d672017-06-29 12:28:15 -07001174 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001175
1176 /* Must walk the associated CQs to nullified the QP ptr */
1177 spin_lock_irqsave(&qp->scq->hwq.lock, flags);
1178
1179 __clean_cq(qp->scq, (u64)(unsigned long)qp);
1180
1181 if (qp->rcq && qp->rcq != qp->scq) {
1182 spin_lock(&qp->rcq->hwq.lock);
1183 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
1184 spin_unlock(&qp->rcq->hwq.lock);
1185 }
1186
1187 spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
1188
1189 bnxt_qplib_free_qp_hdr_buf(res, qp);
1190 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1191 kfree(qp->sq.swq);
1192
1193 bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1194 kfree(qp->rq.swq);
1195
1196 if (qp->irrq.max_elements)
1197 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1198 if (qp->orrq.max_elements)
1199 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1200
1201 return 0;
1202}
1203
1204void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1205 struct bnxt_qplib_sge *sge)
1206{
1207 struct bnxt_qplib_q *sq = &qp->sq;
1208 u32 sw_prod;
1209
1210 memset(sge, 0, sizeof(*sge));
1211
1212 if (qp->sq_hdr_buf) {
1213 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1214 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1215 sw_prod * qp->sq_hdr_buf_size);
1216 sge->lkey = 0xFFFFFFFF;
1217 sge->size = qp->sq_hdr_buf_size;
1218 return qp->sq_hdr_buf + sw_prod * sge->size;
1219 }
1220 return NULL;
1221}
1222
1223u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1224{
1225 struct bnxt_qplib_q *rq = &qp->rq;
1226
1227 return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1228}
1229
1230dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1231{
1232 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1233}
1234
1235void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1236 struct bnxt_qplib_sge *sge)
1237{
1238 struct bnxt_qplib_q *rq = &qp->rq;
1239 u32 sw_prod;
1240
1241 memset(sge, 0, sizeof(*sge));
1242
1243 if (qp->rq_hdr_buf) {
1244 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1245 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1246 sw_prod * qp->rq_hdr_buf_size);
1247 sge->lkey = 0xFFFFFFFF;
1248 sge->size = qp->rq_hdr_buf_size;
1249 return qp->rq_hdr_buf + sw_prod * sge->size;
1250 }
1251 return NULL;
1252}
1253
1254void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1255{
1256 struct bnxt_qplib_q *sq = &qp->sq;
1257 struct dbr_dbr db_msg = { 0 };
1258 u32 sw_prod;
1259
1260 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1261
1262 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1263 DBR_DBR_INDEX_MASK);
1264 db_msg.type_xid =
1265 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1266 DBR_DBR_TYPE_SQ);
1267 /* Flush all the WQE writes to HW */
1268 wmb();
1269 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1270}
1271
1272int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1273 struct bnxt_qplib_swqe *wqe)
1274{
1275 struct bnxt_qplib_q *sq = &qp->sq;
1276 struct bnxt_qplib_swq *swq;
1277 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1278 struct sq_sge *hw_sge;
Selvin Xavierf218d672017-06-29 12:28:15 -07001279 struct bnxt_qplib_nq_work *nq_work = NULL;
1280 bool sch_handler = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001281 u32 sw_prod;
1282 u8 wqe_size16;
1283 int i, rc = 0, data_len = 0, pkt_num = 0;
1284 __le32 temp32;
1285
1286 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
Selvin Xavierf218d672017-06-29 12:28:15 -07001287 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1288 sch_handler = true;
1289 dev_dbg(&sq->hwq.pdev->dev,
1290 "%s Error QP. Scheduling for poll_cq\n",
1291 __func__);
1292 goto queue_err;
1293 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001294 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07001295
1296 if (bnxt_qplib_queue_full(sq)) {
1297 dev_err(&sq->hwq.pdev->dev,
1298 "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1299 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1300 sq->q_full_delta);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001301 rc = -ENOMEM;
1302 goto done;
1303 }
1304 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1305 swq = &sq->swq[sw_prod];
1306 swq->wr_id = wqe->wr_id;
1307 swq->type = wqe->type;
1308 swq->flags = wqe->flags;
1309 if (qp->sig_type)
1310 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1311 swq->start_psn = sq->psn & BTH_PSN_MASK;
1312
1313 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1314 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1315 [get_sqe_idx(sw_prod)];
1316
1317 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1318
1319 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1320 /* Copy the inline data */
1321 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1322 dev_warn(&sq->hwq.pdev->dev,
1323 "QPLIB: Inline data length > 96 detected");
1324 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1325 } else {
1326 data_len = wqe->inline_len;
1327 }
1328 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1329 wqe_size16 = (data_len + 15) >> 4;
1330 } else {
1331 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1332 i < wqe->num_sge; i++, hw_sge++) {
1333 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1334 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1335 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1336 data_len += wqe->sg_list[i].size;
1337 }
1338 /* Each SGE entry = 1 WQE size16 */
1339 wqe_size16 = wqe->num_sge;
Somnath Koturab69d4c2017-06-29 12:28:09 -07001340 /* HW requires wqe size has room for atleast one SGE even if
1341 * none was supplied by ULP
1342 */
1343 if (!wqe->num_sge)
1344 wqe_size16++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001345 }
1346
1347 /* Specifics */
1348 switch (wqe->type) {
1349 case BNXT_QPLIB_SWQE_TYPE_SEND:
1350 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1351 /* Assemble info for Raw Ethertype QPs */
1352 struct sq_send_raweth_qp1 *sqe =
1353 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1354
1355 sqe->wqe_type = wqe->type;
1356 sqe->flags = wqe->flags;
1357 sqe->wqe_size = wqe_size16 +
1358 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1359 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1360 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1361 sqe->length = cpu_to_le32(data_len);
1362 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1363 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1364 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1365
1366 break;
1367 }
Bart Van Assche38c8a712017-10-11 10:48:48 -07001368 /* fall thru */
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001369 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1370 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1371 {
1372 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1373
1374 sqe->wqe_type = wqe->type;
1375 sqe->flags = wqe->flags;
1376 sqe->wqe_size = wqe_size16 +
1377 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1378 sqe->inv_key_or_imm_data = cpu_to_le32(
1379 wqe->send.inv_key);
1380 if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1381 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1382 sqe->dst_qp = cpu_to_le32(
1383 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1384 sqe->length = cpu_to_le32(data_len);
1385 sqe->avid = cpu_to_le32(wqe->send.avid &
1386 SQ_SEND_AVID_MASK);
1387 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1388 } else {
1389 sqe->length = cpu_to_le32(data_len);
1390 sqe->dst_qp = 0;
1391 sqe->avid = 0;
1392 if (qp->mtu)
1393 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1394 if (!pkt_num)
1395 pkt_num = 1;
1396 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1397 }
1398 break;
1399 }
1400 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1401 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1402 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1403 {
1404 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1405
1406 sqe->wqe_type = wqe->type;
1407 sqe->flags = wqe->flags;
1408 sqe->wqe_size = wqe_size16 +
1409 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1410 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1411 sqe->length = cpu_to_le32((u32)data_len);
1412 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1413 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1414 if (qp->mtu)
1415 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1416 if (!pkt_num)
1417 pkt_num = 1;
1418 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1419 break;
1420 }
1421 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1422 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1423 {
1424 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1425
1426 sqe->wqe_type = wqe->type;
1427 sqe->flags = wqe->flags;
1428 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1429 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1430 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1431 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1432 if (qp->mtu)
1433 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1434 if (!pkt_num)
1435 pkt_num = 1;
1436 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1437 break;
1438 }
1439 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1440 {
1441 struct sq_localinvalidate *sqe =
1442 (struct sq_localinvalidate *)hw_sq_send_hdr;
1443
1444 sqe->wqe_type = wqe->type;
1445 sqe->flags = wqe->flags;
1446 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1447
1448 break;
1449 }
1450 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1451 {
1452 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1453
1454 sqe->wqe_type = wqe->type;
1455 sqe->flags = wqe->flags;
1456 sqe->access_cntl = wqe->frmr.access_cntl |
1457 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1458 sqe->zero_based_page_size_log =
1459 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1460 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1461 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1462 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1463 temp32 = cpu_to_le32(wqe->frmr.length);
1464 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1465 sqe->numlevels_pbl_page_size_log =
1466 ((wqe->frmr.pbl_pg_sz_log <<
1467 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1468 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1469 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1470 SQ_FR_PMR_NUMLEVELS_MASK);
1471
1472 for (i = 0; i < wqe->frmr.page_list_len; i++)
1473 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1474 wqe->frmr.page_list[i] |
1475 PTU_PTE_VALID);
1476 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1477 sqe->va = cpu_to_le64(wqe->frmr.va);
1478
1479 break;
1480 }
1481 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1482 {
1483 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1484
1485 sqe->wqe_type = wqe->type;
1486 sqe->flags = wqe->flags;
1487 sqe->access_cntl = wqe->bind.access_cntl;
1488 sqe->mw_type_zero_based = wqe->bind.mw_type |
1489 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1490 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1491 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1492 sqe->va = cpu_to_le64(wqe->bind.va);
1493 temp32 = cpu_to_le32(wqe->bind.length);
1494 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1495 break;
1496 }
1497 default:
1498 /* Bad wqe, return error */
1499 rc = -EINVAL;
1500 goto done;
1501 }
1502 swq->next_psn = sq->psn & BTH_PSN_MASK;
1503 if (swq->psn_search) {
1504 swq->psn_search->opcode_start_psn = cpu_to_le32(
1505 ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1506 SQ_PSN_SEARCH_START_PSN_MASK) |
1507 ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1508 SQ_PSN_SEARCH_OPCODE_MASK));
1509 swq->psn_search->flags_next_psn = cpu_to_le32(
1510 ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1511 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1512 }
Selvin Xavierf218d672017-06-29 12:28:15 -07001513queue_err:
1514 if (sch_handler) {
1515 /* Store the ULP info in the software structures */
1516 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1517 swq = &sq->swq[sw_prod];
1518 swq->wr_id = wqe->wr_id;
1519 swq->type = wqe->type;
1520 swq->flags = wqe->flags;
1521 if (qp->sig_type)
1522 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1523 swq->start_psn = sq->psn & BTH_PSN_MASK;
1524 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001525 sq->hwq.prod++;
Somnath Kotur3fb755b2017-05-22 03:15:36 -07001526 qp->wqe_cnt++;
1527
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001528done:
Selvin Xavierf218d672017-06-29 12:28:15 -07001529 if (sch_handler) {
1530 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1531 if (nq_work) {
1532 nq_work->cq = qp->scq;
1533 nq_work->nq = qp->scq->nq;
1534 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1535 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1536 } else {
1537 dev_err(&sq->hwq.pdev->dev,
1538 "QPLIB: FP: Failed to allocate SQ nq_work!");
1539 rc = -ENOMEM;
1540 }
1541 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001542 return rc;
1543}
1544
1545void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1546{
1547 struct bnxt_qplib_q *rq = &qp->rq;
1548 struct dbr_dbr db_msg = { 0 };
1549 u32 sw_prod;
1550
1551 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1552 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1553 DBR_DBR_INDEX_MASK);
1554 db_msg.type_xid =
1555 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1556 DBR_DBR_TYPE_RQ);
1557
1558 /* Flush the writes to HW Rx WQE before the ringing Rx DB */
1559 wmb();
1560 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1561}
1562
1563int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1564 struct bnxt_qplib_swqe *wqe)
1565{
1566 struct bnxt_qplib_q *rq = &qp->rq;
1567 struct rq_wqe *rqe, **rqe_ptr;
1568 struct sq_sge *hw_sge;
Selvin Xavierf218d672017-06-29 12:28:15 -07001569 struct bnxt_qplib_nq_work *nq_work = NULL;
1570 bool sch_handler = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001571 u32 sw_prod;
1572 int i, rc = 0;
1573
1574 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
Selvin Xavierf218d672017-06-29 12:28:15 -07001575 sch_handler = true;
1576 dev_dbg(&rq->hwq.pdev->dev,
1577 "%s Error QP. Scheduling for poll_cq\n",
1578 __func__);
1579 goto queue_err;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001580 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07001581 if (bnxt_qplib_queue_full(rq)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001582 dev_err(&rq->hwq.pdev->dev,
1583 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1584 rc = -EINVAL;
1585 goto done;
1586 }
1587 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1588 rq->swq[sw_prod].wr_id = wqe->wr_id;
1589
1590 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1591 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1592
1593 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1594
1595 /* Calculate wqe_size16 and data_len */
1596 for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1597 i < wqe->num_sge; i++, hw_sge++) {
1598 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1599 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1600 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1601 }
1602 rqe->wqe_type = wqe->type;
1603 rqe->flags = wqe->flags;
1604 rqe->wqe_size = wqe->num_sge +
1605 ((offsetof(typeof(*rqe), data) + 15) >> 4);
Somnath Koturab69d4c2017-06-29 12:28:09 -07001606 /* HW requires wqe size has room for atleast one SGE even if none
1607 * was supplied by ULP
1608 */
1609 if (!wqe->num_sge)
1610 rqe->wqe_size++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001611
1612 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1613 rqe->wr_id[0] = cpu_to_le32(sw_prod);
1614
Selvin Xavierf218d672017-06-29 12:28:15 -07001615queue_err:
1616 if (sch_handler) {
1617 /* Store the ULP info in the software structures */
1618 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1619 rq->swq[sw_prod].wr_id = wqe->wr_id;
1620 }
1621
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001622 rq->hwq.prod++;
Selvin Xavierf218d672017-06-29 12:28:15 -07001623 if (sch_handler) {
1624 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1625 if (nq_work) {
1626 nq_work->cq = qp->rcq;
1627 nq_work->nq = qp->rcq->nq;
1628 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1629 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1630 } else {
1631 dev_err(&rq->hwq.pdev->dev,
1632 "QPLIB: FP: Failed to allocate RQ nq_work!");
1633 rc = -ENOMEM;
1634 }
1635 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001636done:
1637 return rc;
1638}
1639
1640/* CQ */
1641
1642/* Spinlock must be held */
1643static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1644{
1645 struct dbr_dbr db_msg = { 0 };
1646
1647 db_msg.type_xid =
1648 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1649 DBR_DBR_TYPE_CQ_ARMENA);
1650 /* Flush memory writes before enabling the CQ */
1651 wmb();
1652 __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1653}
1654
1655static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1656{
1657 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1658 struct dbr_dbr db_msg = { 0 };
1659 u32 sw_cons;
1660
1661 /* Ring DB */
1662 sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1663 db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1664 DBR_DBR_INDEX_MASK);
1665 db_msg.type_xid =
1666 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1667 arm_type);
1668 /* flush memory writes before arming the CQ */
1669 wmb();
1670 __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1671}
1672
1673int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1674{
1675 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1676 struct cmdq_create_cq req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001677 struct creq_create_cq_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001678 struct bnxt_qplib_pbl *pbl;
1679 u16 cmd_flags = 0;
1680 int rc;
1681
1682 cq->hwq.max_elements = cq->max_wqe;
1683 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1684 cq->nmap, &cq->hwq.max_elements,
1685 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1686 PAGE_SIZE, HWQ_TYPE_QUEUE);
1687 if (rc)
1688 goto exit;
1689
1690 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1691
1692 if (!cq->dpi) {
1693 dev_err(&rcfw->pdev->dev,
1694 "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1695 return -EINVAL;
1696 }
1697 req.dpi = cpu_to_le32(cq->dpi->dpi);
1698 req.cq_handle = cpu_to_le64(cq->cq_handle);
1699
1700 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1701 pbl = &cq->hwq.pbl[PBL_LVL_0];
1702 req.pg_size_lvl = cpu_to_le32(
1703 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1704 CMDQ_CREATE_CQ_LVL_SFT) |
1705 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1706 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1707 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1708 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1709 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1710 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1711 CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1712
1713 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1714
1715 req.cq_fco_cnq_id = cpu_to_le32(
1716 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1717 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1718
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001719 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1720 (void *)&resp, NULL, 0);
1721 if (rc)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001722 goto fail;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001723
1724 cq->id = le32_to_cpu(resp.xid);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001725 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1726 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1727 init_waitqueue_head(&cq->waitq);
Selvin Xavierf218d672017-06-29 12:28:15 -07001728 INIT_LIST_HEAD(&cq->sqf_head);
1729 INIT_LIST_HEAD(&cq->rqf_head);
1730 spin_lock_init(&cq->compl_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001731
1732 bnxt_qplib_arm_cq_enable(cq);
1733 return 0;
1734
1735fail:
1736 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1737exit:
1738 return rc;
1739}
1740
1741int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1742{
1743 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1744 struct cmdq_destroy_cq req;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001745 struct creq_destroy_cq_resp resp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001746 u16 cmd_flags = 0;
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001747 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001748
1749 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1750
1751 req.cq_cid = cpu_to_le32(cq->id);
Devesh Sharmacc1ec762017-05-22 03:15:31 -07001752 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1753 (void *)&resp, NULL, 0);
1754 if (rc)
1755 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001756 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1757 return 0;
1758}
1759
1760static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
1761 struct bnxt_qplib_cqe **pcqe, int *budget)
1762{
1763 u32 sw_prod, sw_cons;
1764 struct bnxt_qplib_cqe *cqe;
1765 int rc = 0;
1766
1767 /* Now complete all outstanding SQEs with FLUSHED_ERR */
1768 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1769 cqe = *pcqe;
1770 while (*budget) {
1771 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
1772 if (sw_cons == sw_prod) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001773 break;
1774 }
Selvin Xavierf218d672017-06-29 12:28:15 -07001775 /* Skip the FENCE WQE completions */
1776 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
1777 bnxt_qplib_cancel_phantom_processing(qp);
1778 goto skip_compl;
1779 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001780 memset(cqe, 0, sizeof(*cqe));
1781 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
1782 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
1783 cqe->qp_handle = (u64)(unsigned long)qp;
1784 cqe->wr_id = sq->swq[sw_cons].wr_id;
1785 cqe->src_qp = qp->id;
1786 cqe->type = sq->swq[sw_cons].type;
1787 cqe++;
1788 (*budget)--;
Selvin Xavierf218d672017-06-29 12:28:15 -07001789skip_compl:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001790 sq->hwq.cons++;
1791 }
1792 *pcqe = cqe;
1793 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
1794 /* Out of budget */
1795 rc = -EAGAIN;
1796
1797 return rc;
1798}
1799
1800static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
Selvin Xavierf218d672017-06-29 12:28:15 -07001801 struct bnxt_qplib_cqe **pcqe, int *budget)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001802{
1803 struct bnxt_qplib_cqe *cqe;
1804 u32 sw_prod, sw_cons;
1805 int rc = 0;
Selvin Xavierf218d672017-06-29 12:28:15 -07001806 int opcode = 0;
1807
1808 switch (qp->type) {
1809 case CMDQ_CREATE_QP1_TYPE_GSI:
1810 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
1811 break;
1812 case CMDQ_CREATE_QP_TYPE_RC:
1813 opcode = CQ_BASE_CQE_TYPE_RES_RC;
1814 break;
1815 case CMDQ_CREATE_QP_TYPE_UD:
1816 opcode = CQ_BASE_CQE_TYPE_RES_UD;
1817 break;
1818 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001819
1820 /* Flush the rest of the RQ */
1821 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1822 cqe = *pcqe;
1823 while (*budget) {
1824 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
1825 if (sw_cons == sw_prod)
1826 break;
1827 memset(cqe, 0, sizeof(*cqe));
1828 cqe->status =
1829 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
1830 cqe->opcode = opcode;
1831 cqe->qp_handle = (unsigned long)qp;
1832 cqe->wr_id = rq->swq[sw_cons].wr_id;
1833 cqe++;
1834 (*budget)--;
1835 rq->hwq.cons++;
1836 }
1837 *pcqe = cqe;
1838 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
1839 /* Out of budget */
1840 rc = -EAGAIN;
1841
1842 return rc;
1843}
1844
Selvin Xavierf218d672017-06-29 12:28:15 -07001845void bnxt_qplib_mark_qp_error(void *qp_handle)
1846{
1847 struct bnxt_qplib_qp *qp = qp_handle;
1848
1849 if (!qp)
1850 return;
1851
1852 /* Must block new posting of SQ and RQ */
1853 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
1854 bnxt_qplib_cancel_phantom_processing(qp);
1855
1856 /* Add qp to flush list of the CQ */
1857 __bnxt_qplib_add_flush_qp(qp);
1858}
1859
Eddie Wai9152e0b2017-06-14 03:26:23 -07001860/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
1861 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
1862 */
1863static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
1864 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
1865{
1866 struct bnxt_qplib_q *sq = &qp->sq;
1867 struct bnxt_qplib_swq *swq;
1868 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
1869 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
1870 struct cq_req *peek_req_hwcqe;
1871 struct bnxt_qplib_qp *peek_qp;
1872 struct bnxt_qplib_q *peek_sq;
1873 int i, rc = 0;
1874
1875 /* Normal mode */
1876 /* Check for the psn_search marking before completing */
1877 swq = &sq->swq[sw_sq_cons];
1878 if (swq->psn_search &&
1879 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
1880 /* Unmark */
1881 swq->psn_search->flags_next_psn = cpu_to_le32
1882 (le32_to_cpu(swq->psn_search->flags_next_psn)
1883 & ~0x80000000);
1884 dev_dbg(&cq->hwq.pdev->dev,
1885 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
1886 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1887 sq->condition = true;
1888 sq->send_phantom = true;
1889
1890 /* TODO: Only ARM if the previous SQE is ARMALL */
1891 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
1892
1893 rc = -EAGAIN;
1894 goto out;
1895 }
1896 if (sq->condition) {
1897 /* Peek at the completions */
1898 peek_raw_cq_cons = cq->hwq.cons;
1899 peek_sw_cq_cons = cq_cons;
1900 i = cq->hwq.max_elements;
1901 while (i--) {
1902 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
1903 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1904 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
1905 [CQE_IDX(peek_sw_cq_cons)];
1906 /* If the next hwcqe is VALID */
1907 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
1908 cq->hwq.max_elements)) {
Somnath Kotur9b401832017-11-06 08:07:29 -08001909 /*
1910 * The valid test of the entry must be done first before
1911 * reading any further.
1912 */
1913 dma_rmb();
Eddie Wai9152e0b2017-06-14 03:26:23 -07001914 /* If the next hwcqe is a REQ */
1915 if ((peek_hwcqe->cqe_type_toggle &
1916 CQ_BASE_CQE_TYPE_MASK) ==
1917 CQ_BASE_CQE_TYPE_REQ) {
1918 peek_req_hwcqe = (struct cq_req *)
1919 peek_hwcqe;
1920 peek_qp = (struct bnxt_qplib_qp *)
1921 ((unsigned long)
1922 le64_to_cpu
1923 (peek_req_hwcqe->qp_handle));
1924 peek_sq = &peek_qp->sq;
1925 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
1926 peek_req_hwcqe->sq_cons_idx) - 1
1927 , &sq->hwq);
1928 /* If the hwcqe's sq's wr_id matches */
1929 if (peek_sq == sq &&
1930 sq->swq[peek_sq_cons_idx].wr_id ==
1931 BNXT_QPLIB_FENCE_WRID) {
1932 /*
1933 * Unbreak only if the phantom
1934 * comes back
1935 */
1936 dev_dbg(&cq->hwq.pdev->dev,
1937 "FP:Got Phantom CQE");
1938 sq->condition = false;
1939 sq->single = true;
1940 rc = 0;
1941 goto out;
1942 }
1943 }
1944 /* Valid but not the phantom, so keep looping */
1945 } else {
1946 /* Not valid yet, just exit and wait */
1947 rc = -EINVAL;
1948 goto out;
1949 }
1950 peek_sw_cq_cons++;
1951 peek_raw_cq_cons++;
1952 }
1953 dev_err(&cq->hwq.pdev->dev,
1954 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
1955 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
1956 rc = -EINVAL;
1957 }
1958out:
1959 return rc;
1960}
1961
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001962static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
1963 struct cq_req *hwcqe,
Eddie Wai9152e0b2017-06-14 03:26:23 -07001964 struct bnxt_qplib_cqe **pcqe, int *budget,
1965 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001966{
1967 struct bnxt_qplib_qp *qp;
1968 struct bnxt_qplib_q *sq;
1969 struct bnxt_qplib_cqe *cqe;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001970 u32 sw_sq_cons, cqe_sq_cons;
1971 struct bnxt_qplib_swq *swq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001972 int rc = 0;
1973
1974 qp = (struct bnxt_qplib_qp *)((unsigned long)
1975 le64_to_cpu(hwcqe->qp_handle));
1976 if (!qp) {
1977 dev_err(&cq->hwq.pdev->dev,
1978 "QPLIB: FP: Process Req qp is NULL");
1979 return -EINVAL;
1980 }
1981 sq = &qp->sq;
1982
Eddie Wai9152e0b2017-06-14 03:26:23 -07001983 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
1984 if (cqe_sq_cons > sq->hwq.max_elements) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001985 dev_err(&cq->hwq.pdev->dev,
1986 "QPLIB: FP: CQ Process req reported ");
1987 dev_err(&cq->hwq.pdev->dev,
1988 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
Eddie Wai9152e0b2017-06-14 03:26:23 -07001989 cqe_sq_cons, sq->hwq.max_elements);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001990 return -EINVAL;
1991 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001992
Selvin Xavierf218d672017-06-29 12:28:15 -07001993 if (qp->sq.flushed) {
1994 dev_dbg(&cq->hwq.pdev->dev,
1995 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
1996 goto done;
1997 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001998 /* Require to walk the sq's swq to fabricate CQEs for all previously
1999 * signaled SWQEs due to CQE aggregation from the current sq cons
Eddie Wai9152e0b2017-06-14 03:26:23 -07002000 * to the cqe_sq_cons
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002001 */
2002 cqe = *pcqe;
2003 while (*budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002004 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2005 if (sw_sq_cons == cqe_sq_cons)
2006 /* Done */
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002007 break;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002008
2009 swq = &sq->swq[sw_sq_cons];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002010 memset(cqe, 0, sizeof(*cqe));
2011 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2012 cqe->qp_handle = (u64)(unsigned long)qp;
2013 cqe->src_qp = qp->id;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002014 cqe->wr_id = swq->wr_id;
2015 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2016 goto skip;
2017 cqe->type = swq->type;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002018
2019 /* For the last CQE, check for status. For errors, regardless
2020 * of the request being signaled or not, it must complete with
2021 * the hwcqe error status
2022 */
Eddie Wai9152e0b2017-06-14 03:26:23 -07002023 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002024 hwcqe->status != CQ_REQ_STATUS_OK) {
2025 cqe->status = hwcqe->status;
2026 dev_err(&cq->hwq.pdev->dev,
2027 "QPLIB: FP: CQ Processed Req ");
2028 dev_err(&cq->hwq.pdev->dev,
2029 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
Eddie Wai9152e0b2017-06-14 03:26:23 -07002030 sw_sq_cons, cqe->wr_id, cqe->status);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002031 cqe++;
2032 (*budget)--;
Selvin Xavierf218d672017-06-29 12:28:15 -07002033 bnxt_qplib_lock_buddy_cq(qp, cq);
2034 bnxt_qplib_mark_qp_error(qp);
2035 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002036 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002037 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2038 /* Before we complete, do WA 9060 */
2039 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2040 cqe_sq_cons)) {
2041 *lib_qp = qp;
2042 goto out;
2043 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002044 cqe->status = CQ_REQ_STATUS_OK;
2045 cqe++;
2046 (*budget)--;
2047 }
2048 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07002049skip:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002050 sq->hwq.cons++;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002051 if (sq->single)
2052 break;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002053 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07002054out:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002055 *pcqe = cqe;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002056 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002057 /* Out of budget */
2058 rc = -EAGAIN;
2059 goto done;
2060 }
Eddie Wai9152e0b2017-06-14 03:26:23 -07002061 /*
2062 * Back to normal completion mode only after it has completed all of
2063 * the WC for this CQE
2064 */
2065 sq->single = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002066done:
2067 return rc;
2068}
2069
2070static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2071 struct cq_res_rc *hwcqe,
2072 struct bnxt_qplib_cqe **pcqe,
2073 int *budget)
2074{
2075 struct bnxt_qplib_qp *qp;
2076 struct bnxt_qplib_q *rq;
2077 struct bnxt_qplib_cqe *cqe;
2078 u32 wr_id_idx;
2079 int rc = 0;
2080
2081 qp = (struct bnxt_qplib_qp *)((unsigned long)
2082 le64_to_cpu(hwcqe->qp_handle));
2083 if (!qp) {
2084 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2085 return -EINVAL;
2086 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002087 if (qp->rq.flushed) {
2088 dev_dbg(&cq->hwq.pdev->dev,
2089 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2090 goto done;
2091 }
2092
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002093 cqe = *pcqe;
2094 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2095 cqe->length = le32_to_cpu(hwcqe->length);
2096 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2097 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2098 cqe->flags = le16_to_cpu(hwcqe->flags);
2099 cqe->status = hwcqe->status;
2100 cqe->qp_handle = (u64)(unsigned long)qp;
2101
2102 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2103 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2104 rq = &qp->rq;
2105 if (wr_id_idx > rq->hwq.max_elements) {
2106 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
2107 dev_err(&cq->hwq.pdev->dev,
2108 "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2109 wr_id_idx, rq->hwq.max_elements);
2110 return -EINVAL;
2111 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002112
2113 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2114 cqe++;
2115 (*budget)--;
2116 rq->hwq.cons++;
2117 *pcqe = cqe;
2118
2119 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
Selvin Xavier237379f2017-11-06 08:07:30 -08002120 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
Selvin Xavierf218d672017-06-29 12:28:15 -07002121 /* Add qp to flush list of the CQ */
2122 bnxt_qplib_lock_buddy_cq(qp, cq);
2123 __bnxt_qplib_add_flush_qp(qp);
2124 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002125 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002126
2127done:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002128 return rc;
2129}
2130
2131static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2132 struct cq_res_ud *hwcqe,
2133 struct bnxt_qplib_cqe **pcqe,
2134 int *budget)
2135{
2136 struct bnxt_qplib_qp *qp;
2137 struct bnxt_qplib_q *rq;
2138 struct bnxt_qplib_cqe *cqe;
2139 u32 wr_id_idx;
2140 int rc = 0;
2141
2142 qp = (struct bnxt_qplib_qp *)((unsigned long)
2143 le64_to_cpu(hwcqe->qp_handle));
2144 if (!qp) {
2145 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2146 return -EINVAL;
2147 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002148 if (qp->rq.flushed) {
2149 dev_dbg(&cq->hwq.pdev->dev,
2150 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2151 goto done;
2152 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002153 cqe = *pcqe;
2154 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2155 cqe->length = le32_to_cpu(hwcqe->length);
2156 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2157 cqe->flags = le16_to_cpu(hwcqe->flags);
2158 cqe->status = hwcqe->status;
2159 cqe->qp_handle = (u64)(unsigned long)qp;
2160 memcpy(cqe->smac, hwcqe->src_mac, 6);
2161 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2162 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2163 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2164 ((le32_to_cpu(
2165 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2166 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2167
2168 rq = &qp->rq;
2169 if (wr_id_idx > rq->hwq.max_elements) {
2170 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
2171 dev_err(&cq->hwq.pdev->dev,
2172 "QPLIB: wr_id idx %#x exceeded RQ max %#x",
2173 wr_id_idx, rq->hwq.max_elements);
2174 return -EINVAL;
2175 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002176
2177 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2178 cqe++;
2179 (*budget)--;
2180 rq->hwq.cons++;
2181 *pcqe = cqe;
2182
2183 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
Selvin Xavier237379f2017-11-06 08:07:30 -08002184 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
Selvin Xavierf218d672017-06-29 12:28:15 -07002185 /* Add qp to flush list of the CQ */
2186 bnxt_qplib_lock_buddy_cq(qp, cq);
2187 __bnxt_qplib_add_flush_qp(qp);
2188 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002189 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002190done:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002191 return rc;
2192}
2193
Selvin Xavier499e4562017-06-29 12:28:18 -07002194bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2195{
2196 struct cq_base *hw_cqe, **hw_cqe_ptr;
2197 unsigned long flags;
2198 u32 sw_cons, raw_cons;
2199 bool rc = true;
2200
2201 spin_lock_irqsave(&cq->hwq.lock, flags);
2202 raw_cons = cq->hwq.cons;
2203 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2204 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2205 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2206
2207 /* Check for Valid bit. If the CQE is valid, return false */
2208 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2209 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2210 return rc;
2211}
2212
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002213static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2214 struct cq_res_raweth_qp1 *hwcqe,
2215 struct bnxt_qplib_cqe **pcqe,
2216 int *budget)
2217{
2218 struct bnxt_qplib_qp *qp;
2219 struct bnxt_qplib_q *rq;
2220 struct bnxt_qplib_cqe *cqe;
2221 u32 wr_id_idx;
2222 int rc = 0;
2223
2224 qp = (struct bnxt_qplib_qp *)((unsigned long)
2225 le64_to_cpu(hwcqe->qp_handle));
2226 if (!qp) {
2227 dev_err(&cq->hwq.pdev->dev,
2228 "QPLIB: process_cq Raw/QP1 qp is NULL");
2229 return -EINVAL;
2230 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002231 if (qp->rq.flushed) {
2232 dev_dbg(&cq->hwq.pdev->dev,
2233 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2234 goto done;
2235 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002236 cqe = *pcqe;
2237 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2238 cqe->flags = le16_to_cpu(hwcqe->flags);
2239 cqe->qp_handle = (u64)(unsigned long)qp;
2240
2241 wr_id_idx =
2242 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2243 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2244 cqe->src_qp = qp->id;
2245 if (qp->id == 1 && !cqe->length) {
2246 /* Add workaround for the length misdetection */
2247 cqe->length = 296;
2248 } else {
2249 cqe->length = le16_to_cpu(hwcqe->length);
2250 }
2251 cqe->pkey_index = qp->pkey_index;
2252 memcpy(cqe->smac, qp->smac, 6);
2253
2254 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2255 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
Devesh Sharma84511452017-11-08 02:48:45 -05002256 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002257
2258 rq = &qp->rq;
2259 if (wr_id_idx > rq->hwq.max_elements) {
2260 dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2261 dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
2262 wr_id_idx, rq->hwq.max_elements);
2263 return -EINVAL;
2264 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002265
2266 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2267 cqe++;
2268 (*budget)--;
2269 rq->hwq.cons++;
2270 *pcqe = cqe;
2271
2272 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
Selvin Xavier237379f2017-11-06 08:07:30 -08002273 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
Selvin Xavierf218d672017-06-29 12:28:15 -07002274 /* Add qp to flush list of the CQ */
2275 bnxt_qplib_lock_buddy_cq(qp, cq);
2276 __bnxt_qplib_add_flush_qp(qp);
2277 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002278 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002279
2280done:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002281 return rc;
2282}
2283
2284static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2285 struct cq_terminal *hwcqe,
2286 struct bnxt_qplib_cqe **pcqe,
2287 int *budget)
2288{
2289 struct bnxt_qplib_qp *qp;
2290 struct bnxt_qplib_q *sq, *rq;
2291 struct bnxt_qplib_cqe *cqe;
2292 u32 sw_cons = 0, cqe_cons;
2293 int rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002294
2295 /* Check the Status */
2296 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2297 dev_warn(&cq->hwq.pdev->dev,
2298 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2299 hwcqe->status);
2300
2301 qp = (struct bnxt_qplib_qp *)((unsigned long)
2302 le64_to_cpu(hwcqe->qp_handle));
2303 if (!qp) {
2304 dev_err(&cq->hwq.pdev->dev,
2305 "QPLIB: FP: CQ Process terminal qp is NULL");
2306 return -EINVAL;
2307 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002308
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002309 /* Must block new posting of SQ and RQ */
2310 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2311
2312 sq = &qp->sq;
2313 rq = &qp->rq;
2314
2315 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2316 if (cqe_cons == 0xFFFF)
2317 goto do_rq;
2318
2319 if (cqe_cons > sq->hwq.max_elements) {
2320 dev_err(&cq->hwq.pdev->dev,
2321 "QPLIB: FP: CQ Process terminal reported ");
2322 dev_err(&cq->hwq.pdev->dev,
2323 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2324 cqe_cons, sq->hwq.max_elements);
2325 goto do_rq;
2326 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002327
2328 if (qp->sq.flushed) {
2329 dev_dbg(&cq->hwq.pdev->dev,
2330 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2331 goto sq_done;
2332 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002333
2334 /* Terminal CQE can also include aggregated successful CQEs prior.
2335 * So we must complete all CQEs from the current sq's cons to the
2336 * cq_cons with status OK
2337 */
2338 cqe = *pcqe;
2339 while (*budget) {
2340 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2341 if (sw_cons == cqe_cons)
2342 break;
2343 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2344 memset(cqe, 0, sizeof(*cqe));
2345 cqe->status = CQ_REQ_STATUS_OK;
2346 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2347 cqe->qp_handle = (u64)(unsigned long)qp;
2348 cqe->src_qp = qp->id;
2349 cqe->wr_id = sq->swq[sw_cons].wr_id;
2350 cqe->type = sq->swq[sw_cons].type;
2351 cqe++;
2352 (*budget)--;
2353 }
2354 sq->hwq.cons++;
2355 }
2356 *pcqe = cqe;
2357 if (!(*budget) && sw_cons != cqe_cons) {
2358 /* Out of budget */
2359 rc = -EAGAIN;
2360 goto sq_done;
2361 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002362sq_done:
2363 if (rc)
2364 return rc;
2365do_rq:
2366 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2367 if (cqe_cons == 0xFFFF) {
2368 goto done;
2369 } else if (cqe_cons > rq->hwq.max_elements) {
2370 dev_err(&cq->hwq.pdev->dev,
2371 "QPLIB: FP: CQ Processed terminal ");
2372 dev_err(&cq->hwq.pdev->dev,
2373 "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2374 cqe_cons, rq->hwq.max_elements);
2375 goto done;
2376 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002377
2378 if (qp->rq.flushed) {
2379 dev_dbg(&cq->hwq.pdev->dev,
2380 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2381 rc = 0;
2382 goto done;
2383 }
2384
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002385 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2386 * from the current rq->cons to the rq->prod regardless what the
2387 * rq->cons the terminal CQE indicates
2388 */
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002389
Selvin Xavierf218d672017-06-29 12:28:15 -07002390 /* Add qp to flush list of the CQ */
2391 bnxt_qplib_lock_buddy_cq(qp, cq);
2392 __bnxt_qplib_add_flush_qp(qp);
2393 bnxt_qplib_unlock_buddy_cq(qp, cq);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002394done:
2395 return rc;
2396}
2397
2398static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2399 struct cq_cutoff *hwcqe)
2400{
2401 /* Check the Status */
2402 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2403 dev_err(&cq->hwq.pdev->dev,
2404 "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2405 hwcqe->status);
2406 return -EINVAL;
2407 }
2408 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2409 wake_up_interruptible(&cq->waitq);
2410
2411 return 0;
2412}
2413
Selvin Xavierf218d672017-06-29 12:28:15 -07002414int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2415 struct bnxt_qplib_cqe *cqe,
2416 int num_cqes)
2417{
2418 struct bnxt_qplib_qp *qp = NULL;
2419 u32 budget = num_cqes;
2420 unsigned long flags;
2421
2422 spin_lock_irqsave(&cq->hwq.lock, flags);
2423 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2424 dev_dbg(&cq->hwq.pdev->dev,
2425 "QPLIB: FP: Flushing SQ QP= %p",
2426 qp);
2427 __flush_sq(&qp->sq, qp, &cqe, &budget);
2428 }
2429
2430 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2431 dev_dbg(&cq->hwq.pdev->dev,
2432 "QPLIB: FP: Flushing RQ QP= %p",
2433 qp);
2434 __flush_rq(&qp->rq, qp, &cqe, &budget);
2435 }
2436 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2437
2438 return num_cqes - budget;
2439}
2440
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002441int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
Eddie Wai9152e0b2017-06-14 03:26:23 -07002442 int num_cqes, struct bnxt_qplib_qp **lib_qp)
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002443{
2444 struct cq_base *hw_cqe, **hw_cqe_ptr;
2445 unsigned long flags;
2446 u32 sw_cons, raw_cons;
2447 int budget, rc = 0;
2448
2449 spin_lock_irqsave(&cq->hwq.lock, flags);
2450 raw_cons = cq->hwq.cons;
2451 budget = num_cqes;
2452
2453 while (budget) {
2454 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2455 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2456 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2457
2458 /* Check for Valid bit */
2459 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2460 break;
2461
Somnath Kotur9b401832017-11-06 08:07:29 -08002462 /*
2463 * The valid test of the entry must be done first before
2464 * reading any further.
2465 */
2466 dma_rmb();
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002467 /* From the device's respective CQE format to qplib_wc*/
2468 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2469 case CQ_BASE_CQE_TYPE_REQ:
2470 rc = bnxt_qplib_cq_process_req(cq,
2471 (struct cq_req *)hw_cqe,
Eddie Wai9152e0b2017-06-14 03:26:23 -07002472 &cqe, &budget,
2473 sw_cons, lib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002474 break;
2475 case CQ_BASE_CQE_TYPE_RES_RC:
2476 rc = bnxt_qplib_cq_process_res_rc(cq,
2477 (struct cq_res_rc *)
2478 hw_cqe, &cqe,
2479 &budget);
2480 break;
2481 case CQ_BASE_CQE_TYPE_RES_UD:
2482 rc = bnxt_qplib_cq_process_res_ud
2483 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2484 &budget);
2485 break;
2486 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2487 rc = bnxt_qplib_cq_process_res_raweth_qp1
2488 (cq, (struct cq_res_raweth_qp1 *)
2489 hw_cqe, &cqe, &budget);
2490 break;
2491 case CQ_BASE_CQE_TYPE_TERMINAL:
2492 rc = bnxt_qplib_cq_process_terminal
2493 (cq, (struct cq_terminal *)hw_cqe,
2494 &cqe, &budget);
2495 break;
2496 case CQ_BASE_CQE_TYPE_CUT_OFF:
2497 bnxt_qplib_cq_process_cutoff
2498 (cq, (struct cq_cutoff *)hw_cqe);
2499 /* Done processing this CQ */
2500 goto exit;
2501 default:
2502 dev_err(&cq->hwq.pdev->dev,
2503 "QPLIB: process_cq unknown type 0x%lx",
2504 hw_cqe->cqe_type_toggle &
2505 CQ_BASE_CQE_TYPE_MASK);
2506 rc = -EINVAL;
2507 break;
2508 }
2509 if (rc < 0) {
2510 if (rc == -EAGAIN)
2511 break;
2512 /* Error while processing the CQE, just skip to the
2513 * next one
2514 */
2515 dev_err(&cq->hwq.pdev->dev,
2516 "QPLIB: process_cqe error rc = 0x%x", rc);
2517 }
2518 raw_cons++;
2519 }
2520 if (cq->hwq.cons != raw_cons) {
2521 cq->hwq.cons = raw_cons;
2522 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2523 }
2524exit:
2525 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2526 return num_cqes - budget;
2527}
2528
2529void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2530{
2531 unsigned long flags;
2532
2533 spin_lock_irqsave(&cq->hwq.lock, flags);
2534 if (arm_type)
2535 bnxt_qplib_arm_cq(cq, arm_type);
Selvin Xavierf218d672017-06-29 12:28:15 -07002536 /* Using cq->arm_state variable to track whether to issue cq handler */
2537 atomic_set(&cq->arm_state, 1);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002538 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2539}
Selvin Xavierc88a7852017-11-06 08:07:31 -08002540
2541void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2542{
2543 flush_workqueue(qp->scq->nq->cqn_wq);
2544 if (qp->scq != qp->rcq)
2545 flush_workqueue(qp->rcq->nq->cqn_wq);
2546}