blob: 30f67bebffa35742189c4fc08b91654f75bfdcfa [file] [log] [blame]
Devesh Sharma71ee6732015-07-24 05:03:59 +05301/* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
5 * www.emulex.com
6 *
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Parav Panditfe2caef2012-03-21 04:09:06 +053034 *
35 * Contact Information:
36 * linux-drivers@emulex.com
37 *
38 * Emulex
39 * 3333 Susan Street
40 * Costa Mesa, CA 92626
Devesh Sharma71ee6732015-07-24 05:03:59 +053041 */
Parav Panditfe2caef2012-03-21 04:09:06 +053042
43#include <linux/sched.h>
44#include <linux/interrupt.h>
45#include <linux/log2.h>
46#include <linux/dma-mapping.h>
47
48#include <rdma/ib_verbs.h>
49#include <rdma/ib_user_verbs.h>
Matan Barakdbf727d2015-10-15 18:38:51 +030050#include <rdma/ib_cache.h>
Parav Panditfe2caef2012-03-21 04:09:06 +053051
52#include "ocrdma.h"
53#include "ocrdma_hw.h"
54#include "ocrdma_verbs.h"
55#include "ocrdma_ah.h"
56
57enum mbx_status {
58 OCRDMA_MBX_STATUS_FAILED = 1,
59 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
60 OCRDMA_MBX_STATUS_OOR = 100,
61 OCRDMA_MBX_STATUS_INVALID_PD = 101,
62 OCRDMA_MBX_STATUS_PD_INUSE = 102,
63 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
64 OCRDMA_MBX_STATUS_INVALID_QP = 104,
65 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
66 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
67 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
68 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
69 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
70 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
71 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
72 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
73 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
74 OCRDMA_MBX_STATUS_MW_BOUND = 114,
75 OCRDMA_MBX_STATUS_INVALID_VA = 115,
76 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
77 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
78 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
79 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
80 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
81 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
82 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
83 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
84 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
85 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
86 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
87 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
88 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
89 OCRDMA_MBX_STATUS_QP_BOUND = 130,
90 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
91 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
92 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
93 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
94 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
95 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
96};
97
98enum additional_status {
99 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
100};
101
102enum cqe_status {
103 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
104 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
105 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
106 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
107 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
108};
109
110static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
111{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530112 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530113}
114
115static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
116{
117 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
118}
119
120static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
121{
122 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530123 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
Parav Panditfe2caef2012-03-21 04:09:06 +0530124
125 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
126 return NULL;
127 return cqe;
128}
129
130static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
131{
132 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
133}
134
135static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
136{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530137 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530138}
139
140static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
141{
142 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +0530143}
144
145static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
146{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530147 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530148}
149
150enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
151{
152 switch (qps) {
153 case OCRDMA_QPS_RST:
154 return IB_QPS_RESET;
155 case OCRDMA_QPS_INIT:
156 return IB_QPS_INIT;
157 case OCRDMA_QPS_RTR:
158 return IB_QPS_RTR;
159 case OCRDMA_QPS_RTS:
160 return IB_QPS_RTS;
161 case OCRDMA_QPS_SQD:
162 case OCRDMA_QPS_SQ_DRAINING:
163 return IB_QPS_SQD;
164 case OCRDMA_QPS_SQE:
165 return IB_QPS_SQE;
166 case OCRDMA_QPS_ERR:
167 return IB_QPS_ERR;
Joe Perches2b50176d2013-10-08 16:07:22 -0700168 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530169 return IB_QPS_ERR;
170}
171
Roland Dreierabe3afa2012-04-16 11:36:29 -0700172static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
Parav Panditfe2caef2012-03-21 04:09:06 +0530173{
174 switch (qps) {
175 case IB_QPS_RESET:
176 return OCRDMA_QPS_RST;
177 case IB_QPS_INIT:
178 return OCRDMA_QPS_INIT;
179 case IB_QPS_RTR:
180 return OCRDMA_QPS_RTR;
181 case IB_QPS_RTS:
182 return OCRDMA_QPS_RTS;
183 case IB_QPS_SQD:
184 return OCRDMA_QPS_SQD;
185 case IB_QPS_SQE:
186 return OCRDMA_QPS_SQE;
187 case IB_QPS_ERR:
188 return OCRDMA_QPS_ERR;
Joe Perches2b50176d2013-10-08 16:07:22 -0700189 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530190 return OCRDMA_QPS_ERR;
191}
192
193static int ocrdma_get_mbx_errno(u32 status)
194{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530195 int err_num;
Parav Panditfe2caef2012-03-21 04:09:06 +0530196 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
197 OCRDMA_MBX_RSP_STATUS_SHIFT;
198 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
199 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
200
201 switch (mbox_status) {
202 case OCRDMA_MBX_STATUS_OOR:
203 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
204 err_num = -EAGAIN;
205 break;
206
207 case OCRDMA_MBX_STATUS_INVALID_PD:
208 case OCRDMA_MBX_STATUS_INVALID_CQ:
209 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
210 case OCRDMA_MBX_STATUS_INVALID_QP:
211 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
212 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
213 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
214 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
215 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
216 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
217 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
218 case OCRDMA_MBX_STATUS_INVALID_LKEY:
219 case OCRDMA_MBX_STATUS_INVALID_VA:
220 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
221 case OCRDMA_MBX_STATUS_INVALID_FBO:
222 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
223 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
224 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
225 case OCRDMA_MBX_STATUS_SRQ_ERROR:
226 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
227 err_num = -EINVAL;
228 break;
229
230 case OCRDMA_MBX_STATUS_PD_INUSE:
231 case OCRDMA_MBX_STATUS_QP_BOUND:
232 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
233 case OCRDMA_MBX_STATUS_MW_BOUND:
234 err_num = -EBUSY;
235 break;
236
237 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
238 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
239 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
240 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
241 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
242 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
243 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
244 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
245 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
246 err_num = -ENOBUFS;
247 break;
248
249 case OCRDMA_MBX_STATUS_FAILED:
250 switch (add_status) {
251 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
252 err_num = -EAGAIN;
253 break;
254 }
255 default:
256 err_num = -EFAULT;
257 }
258 return err_num;
259}
260
Selvin Xaviera51f06e2014-02-04 11:57:07 +0530261char *port_speed_string(struct ocrdma_dev *dev)
262{
263 char *str = "";
264 u16 speeds_supported;
265
266 speeds_supported = dev->phy.fixed_speeds_supported |
267 dev->phy.auto_speeds_supported;
268 if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
269 str = "40Gbps ";
270 else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
271 str = "10Gbps ";
272 else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
273 str = "1Gbps ";
274
275 return str;
276}
277
Parav Panditfe2caef2012-03-21 04:09:06 +0530278static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
279{
280 int err_num = -EINVAL;
281
282 switch (cqe_status) {
283 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
284 err_num = -EPERM;
285 break;
286 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
287 err_num = -EINVAL;
288 break;
289 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
290 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +0530291 err_num = -EINVAL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530292 break;
293 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530294 default:
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +0530295 err_num = -EINVAL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530296 break;
297 }
298 return err_num;
299}
300
301void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
302 bool solicited, u16 cqe_popped)
303{
304 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
305
306 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
307 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
308
309 if (armed)
310 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
311 if (solicited)
312 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
313 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
314 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
315}
316
317static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
318{
319 u32 val = 0;
320
321 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
322 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
323 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
324}
325
326static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
327 bool arm, bool clear_int, u16 num_eqe)
328{
329 u32 val = 0;
330
331 val |= eq_id & OCRDMA_EQ_ID_MASK;
332 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
333 if (arm)
334 val |= (1 << OCRDMA_REARM_SHIFT);
335 if (clear_int)
336 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
337 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
338 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
339 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
340}
341
342static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
343 u8 opcode, u8 subsys, u32 cmd_len)
344{
345 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
346 cmd_hdr->timeout = 20; /* seconds */
347 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
348}
349
350static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
351{
352 struct ocrdma_mqe *mqe;
353
354 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
355 if (!mqe)
356 return NULL;
357 mqe->hdr.spcl_sge_cnt_emb |=
358 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
359 OCRDMA_MQE_HDR_EMB_MASK;
360 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
361
362 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
363 mqe->hdr.pyld_len);
364 return mqe;
365}
366
367static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
368{
369 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
370}
371
372static int ocrdma_alloc_q(struct ocrdma_dev *dev,
373 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
374{
375 memset(q, 0, sizeof(*q));
376 q->len = len;
377 q->entry_size = entry_size;
378 q->size = len * entry_size;
379 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
380 &q->dma, GFP_KERNEL);
381 if (!q->va)
382 return -ENOMEM;
383 memset(q->va, 0, q->size);
384 return 0;
385}
386
387static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
388 dma_addr_t host_pa, int hw_page_size)
389{
390 int i;
391
392 for (i = 0; i < cnt; i++) {
393 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
394 q_pa[i].hi = (u32) upper_32_bits(host_pa);
395 host_pa += hw_page_size;
396 }
397}
398
Devesh Sharmafad51b72014-02-04 11:57:10 +0530399static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
400 struct ocrdma_queue_info *q, int queue_type)
Parav Panditfe2caef2012-03-21 04:09:06 +0530401{
402 u8 opcode = 0;
403 int status;
404 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
405
406 switch (queue_type) {
407 case QTYPE_MCCQ:
408 opcode = OCRDMA_CMD_DELETE_MQ;
409 break;
410 case QTYPE_CQ:
411 opcode = OCRDMA_CMD_DELETE_CQ;
412 break;
413 case QTYPE_EQ:
414 opcode = OCRDMA_CMD_DELETE_EQ;
415 break;
416 default:
417 BUG();
418 }
419 memset(cmd, 0, sizeof(*cmd));
420 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
421 cmd->id = q->id;
422
423 status = be_roce_mcc_cmd(dev->nic_info.netdev,
424 cmd, sizeof(*cmd), NULL, NULL);
425 if (!status)
426 q->created = false;
427 return status;
428}
429
430static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
431{
432 int status;
433 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
434 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
435
436 memset(cmd, 0, sizeof(*cmd));
437 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
438 sizeof(*cmd));
Parav Panditfe2caef2012-03-21 04:09:06 +0530439
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530440 cmd->req.rsvd_version = 2;
Parav Panditfe2caef2012-03-21 04:09:06 +0530441 cmd->num_pages = 4;
442 cmd->valid = OCRDMA_CREATE_EQ_VALID;
443 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
444
445 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
446 PAGE_SIZE_4K);
447 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
448 NULL);
449 if (!status) {
450 eq->q.id = rsp->vector_eqid & 0xffff;
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530451 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
Parav Panditfe2caef2012-03-21 04:09:06 +0530452 eq->q.created = true;
453 }
454 return status;
455}
456
457static int ocrdma_create_eq(struct ocrdma_dev *dev,
458 struct ocrdma_eq *eq, u16 q_len)
459{
460 int status;
461
462 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
463 sizeof(struct ocrdma_eqe));
464 if (status)
465 return status;
466
467 status = ocrdma_mbx_create_eq(dev, eq);
468 if (status)
469 goto mbx_err;
470 eq->dev = dev;
471 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
472
473 return 0;
474mbx_err:
475 ocrdma_free_q(dev, &eq->q);
476 return status;
477}
478
Devesh Sharmaea6176262014-02-04 11:56:54 +0530479int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530480{
481 int irq;
482
483 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
484 irq = dev->nic_info.pdev->irq;
485 else
486 irq = dev->nic_info.msix.vector_list[eq->vector];
487 return irq;
488}
489
490static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
491{
492 if (eq->q.created) {
493 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
Parav Panditfe2caef2012-03-21 04:09:06 +0530494 ocrdma_free_q(dev, &eq->q);
495 }
496}
497
498static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
499{
500 int irq;
501
502 /* disarm EQ so that interrupts are not generated
503 * during freeing and EQ delete is in progress.
504 */
505 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
506
507 irq = ocrdma_get_irq(dev, eq);
508 free_irq(irq, eq);
509 _ocrdma_destroy_eq(dev, eq);
510}
511
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530512static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
Parav Panditfe2caef2012-03-21 04:09:06 +0530513{
514 int i;
515
Parav Panditfe2caef2012-03-21 04:09:06 +0530516 for (i = 0; i < dev->eq_cnt; i++)
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530517 ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
Parav Panditfe2caef2012-03-21 04:09:06 +0530518}
519
Roland Dreierabe3afa2012-04-16 11:36:29 -0700520static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
521 struct ocrdma_queue_info *cq,
522 struct ocrdma_queue_info *eq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530523{
524 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
525 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
526 int status;
527
528 memset(cmd, 0, sizeof(*cmd));
529 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
530 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
531
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530532 cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
533 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
534 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
535 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
Parav Panditfe2caef2012-03-21 04:09:06 +0530536
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530537 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
538 cmd->eqn = eq->id;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +0530539 cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530540
541 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
Parav Panditfe2caef2012-03-21 04:09:06 +0530542 cq->dma, PAGE_SIZE_4K);
543 status = be_roce_mcc_cmd(dev->nic_info.netdev,
544 cmd, sizeof(*cmd), NULL, NULL);
545 if (!status) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530546 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +0530547 cq->created = true;
548 }
549 return status;
550}
551
552static u32 ocrdma_encoded_q_len(int q_len)
553{
554 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
555
556 if (len_encoded == 16)
557 len_encoded = 0;
558 return len_encoded;
559}
560
561static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
562 struct ocrdma_queue_info *mq,
563 struct ocrdma_queue_info *cq)
564{
565 int num_pages, status;
566 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
567 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
568 struct ocrdma_pa *pa;
569
570 memset(cmd, 0, sizeof(*cmd));
571 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
572
Naresh Gottumukkalab1d58b92013-06-10 04:42:38 +0000573 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
574 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
575 cmd->req.rsvd_version = 1;
576 cmd->cqid_pages = num_pages;
577 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
578 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530579
Jes Sorensende123482014-10-05 16:33:24 +0200580 cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
581 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530582
Naresh Gottumukkalab1d58b92013-06-10 04:42:38 +0000583 cmd->async_cqid_ringsize = cq->id;
584 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
585 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
586 cmd->valid = OCRDMA_CREATE_MQ_VALID;
587 pa = &cmd->pa[0];
588
Parav Panditfe2caef2012-03-21 04:09:06 +0530589 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
590 status = be_roce_mcc_cmd(dev->nic_info.netdev,
591 cmd, sizeof(*cmd), NULL, NULL);
592 if (!status) {
593 mq->id = rsp->id;
594 mq->created = true;
595 }
596 return status;
597}
598
599static int ocrdma_create_mq(struct ocrdma_dev *dev)
600{
601 int status;
602
603 /* Alloc completion queue for Mailbox queue */
604 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
605 sizeof(struct ocrdma_mcqe));
606 if (status)
607 goto alloc_err;
608
Devesh Sharmaea6176262014-02-04 11:56:54 +0530609 dev->eq_tbl[0].cq_cnt++;
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530610 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
Parav Panditfe2caef2012-03-21 04:09:06 +0530611 if (status)
612 goto mbx_cq_free;
613
614 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
615 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
616 mutex_init(&dev->mqe_ctx.lock);
617
618 /* Alloc Mailbox queue */
619 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
620 sizeof(struct ocrdma_mqe));
621 if (status)
622 goto mbx_cq_destroy;
623 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
624 if (status)
625 goto mbx_q_free;
626 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
627 return 0;
628
629mbx_q_free:
630 ocrdma_free_q(dev, &dev->mq.sq);
631mbx_cq_destroy:
632 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
633mbx_cq_free:
634 ocrdma_free_q(dev, &dev->mq.cq);
635alloc_err:
636 return status;
637}
638
639static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
640{
641 struct ocrdma_queue_info *mbxq, *cq;
642
643 /* mqe_ctx lock synchronizes with any other pending cmds. */
644 mutex_lock(&dev->mqe_ctx.lock);
645 mbxq = &dev->mq.sq;
646 if (mbxq->created) {
647 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
648 ocrdma_free_q(dev, mbxq);
649 }
650 mutex_unlock(&dev->mqe_ctx.lock);
651
652 cq = &dev->mq.cq;
653 if (cq->created) {
654 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
655 ocrdma_free_q(dev, cq);
656 }
657}
658
659static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
660 struct ocrdma_qp *qp)
661{
662 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
663 enum ib_qp_state old_ib_qps;
664
665 if (qp == NULL)
666 BUG();
Naresh Gottumukkala057729c2013-08-07 12:52:35 +0530667 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +0530668}
669
670static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
671 struct ocrdma_ae_mcqe *cqe)
672{
673 struct ocrdma_qp *qp = NULL;
674 struct ocrdma_cq *cq = NULL;
Selvin Xavier1b09a0c2014-06-10 19:32:26 +0530675 struct ib_event ib_evt;
Parav Panditfe2caef2012-03-21 04:09:06 +0530676 int cq_event = 0;
677 int qp_event = 1;
678 int srq_event = 0;
679 int dev_event = 0;
680 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
681 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
Naga Irrinkiaeb922d2015-10-20 14:17:58 +0530682 u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
683 u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
Parav Panditfe2caef2012-03-21 04:09:06 +0530684
Naga Irrinkiaeb922d2015-10-20 14:17:58 +0530685 /*
686 * Some FW version returns wrong qp or cq ids in CQEs.
687 * Checking whether the IDs are valid
688 */
689
690 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
691 if (qpid < dev->attr.max_qp)
692 qp = dev->qp_tbl[qpid];
693 if (qp == NULL) {
694 pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
695 dev->id, qpid);
696 return;
697 }
698 }
699
700 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
701 if (cqid < dev->attr.max_cq)
702 cq = dev->cq_tbl[cqid];
703 if (cq == NULL) {
704 pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
705 dev->id, cqid);
706 return;
707 }
708 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530709
Selvin Xavier1b09a0c2014-06-10 19:32:26 +0530710 memset(&ib_evt, 0, sizeof(ib_evt));
711
Roland Dreiere9db2952012-04-16 12:13:24 -0700712 ib_evt.device = &dev->ibdev;
713
Parav Panditfe2caef2012-03-21 04:09:06 +0530714 switch (type) {
715 case OCRDMA_CQ_ERROR:
716 ib_evt.element.cq = &cq->ibcq;
717 ib_evt.event = IB_EVENT_CQ_ERR;
718 cq_event = 1;
719 qp_event = 0;
720 break;
721 case OCRDMA_CQ_OVERRUN_ERROR:
722 ib_evt.element.cq = &cq->ibcq;
723 ib_evt.event = IB_EVENT_CQ_ERR;
Selvin Xavier12280562014-02-04 11:57:05 +0530724 cq_event = 1;
725 qp_event = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530726 break;
727 case OCRDMA_CQ_QPCAT_ERROR:
728 ib_evt.element.qp = &qp->ibqp;
729 ib_evt.event = IB_EVENT_QP_FATAL;
730 ocrdma_process_qpcat_error(dev, qp);
731 break;
732 case OCRDMA_QP_ACCESS_ERROR:
733 ib_evt.element.qp = &qp->ibqp;
734 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
735 break;
736 case OCRDMA_QP_COMM_EST_EVENT:
737 ib_evt.element.qp = &qp->ibqp;
738 ib_evt.event = IB_EVENT_COMM_EST;
739 break;
740 case OCRDMA_SQ_DRAINED_EVENT:
741 ib_evt.element.qp = &qp->ibqp;
742 ib_evt.event = IB_EVENT_SQ_DRAINED;
743 break;
744 case OCRDMA_DEVICE_FATAL_EVENT:
745 ib_evt.element.port_num = 1;
746 ib_evt.event = IB_EVENT_DEVICE_FATAL;
747 qp_event = 0;
748 dev_event = 1;
749 break;
750 case OCRDMA_SRQCAT_ERROR:
751 ib_evt.element.srq = &qp->srq->ibsrq;
752 ib_evt.event = IB_EVENT_SRQ_ERR;
753 srq_event = 1;
754 qp_event = 0;
755 break;
756 case OCRDMA_SRQ_LIMIT_EVENT:
757 ib_evt.element.srq = &qp->srq->ibsrq;
Parav Pandit804eaf22012-05-23 21:11:17 +0530758 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
Parav Panditfe2caef2012-03-21 04:09:06 +0530759 srq_event = 1;
760 qp_event = 0;
761 break;
762 case OCRDMA_QP_LAST_WQE_EVENT:
763 ib_evt.element.qp = &qp->ibqp;
764 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
765 break;
766 default:
767 cq_event = 0;
768 qp_event = 0;
769 srq_event = 0;
770 dev_event = 0;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000771 pr_err("%s() unknown type=0x%x\n", __func__, type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530772 break;
773 }
774
Selvin Xavierad56ebb2014-12-18 14:12:59 +0530775 if (type < OCRDMA_MAX_ASYNC_ERRORS)
776 atomic_inc(&dev->async_err_stats[type]);
777
Parav Panditfe2caef2012-03-21 04:09:06 +0530778 if (qp_event) {
779 if (qp->ibqp.event_handler)
780 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
781 } else if (cq_event) {
782 if (cq->ibcq.event_handler)
783 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
784 } else if (srq_event) {
785 if (qp->srq->ibsrq.event_handler)
786 qp->srq->ibsrq.event_handler(&ib_evt,
787 qp->srq->ibsrq.
788 srq_context);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530789 } else if (dev_event) {
Selvin Xavier12280562014-02-04 11:57:05 +0530790 pr_err("%s: Fatal event received\n", dev->ibdev.name);
Parav Panditfe2caef2012-03-21 04:09:06 +0530791 ib_dispatch_event(&ib_evt);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530792 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530793
794}
795
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530796static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
797 struct ocrdma_ae_mcqe *cqe)
798{
799 struct ocrdma_ae_pvid_mcqe *evt;
800 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
801 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
802
803 switch (type) {
804 case OCRDMA_ASYNC_EVENT_PVID_STATE:
805 evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
806 if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
807 OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
808 dev->pvid = ((evt->tag_enabled &
809 OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
810 OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
811 break;
Selvin Xavier31dbdd92014-06-10 19:32:13 +0530812
813 case OCRDMA_ASYNC_EVENT_COS_VALUE:
814 atomic_set(&dev->update_sl, 1);
815 break;
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530816 default:
817 /* Not interested evts. */
818 break;
819 }
820}
821
Parav Panditfe2caef2012-03-21 04:09:06 +0530822static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
823{
824 /* async CQE processing */
825 struct ocrdma_ae_mcqe *cqe = ae_cqe;
826 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
827 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
828
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530829 if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
Parav Panditfe2caef2012-03-21 04:09:06 +0530830 ocrdma_dispatch_ibevent(dev, cqe);
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530831 else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
832 ocrdma_process_grp5_aync(dev, cqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530833 else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000834 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
835 dev->id, evt_code);
Parav Panditfe2caef2012-03-21 04:09:06 +0530836}
837
838static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
839{
840 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
841 dev->mqe_ctx.cqe_status = (cqe->status &
842 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
843 dev->mqe_ctx.ext_status =
844 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
845 >> OCRDMA_MCQE_ESTATUS_SHIFT;
846 dev->mqe_ctx.cmd_done = true;
847 wake_up(&dev->mqe_ctx.cmd_wait);
848 } else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000849 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
850 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
Parav Panditfe2caef2012-03-21 04:09:06 +0530851}
852
853static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
854{
855 u16 cqe_popped = 0;
856 struct ocrdma_mcqe *cqe;
857
858 while (1) {
859 cqe = ocrdma_get_mcqe(dev);
860 if (cqe == NULL)
861 break;
862 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
863 cqe_popped += 1;
864 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
865 ocrdma_process_acqe(dev, cqe);
866 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
867 ocrdma_process_mcqe(dev, cqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530868 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
869 ocrdma_mcq_inc_tail(dev);
870 }
871 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
872 return 0;
873}
874
Selvin Xavier043e9de2014-12-18 14:13:03 +0530875static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
876 struct ocrdma_cq *cq, bool sq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530877{
Parav Panditfe2caef2012-03-21 04:09:06 +0530878 struct ocrdma_qp *qp;
Selvin Xavier043e9de2014-12-18 14:13:03 +0530879 struct list_head *cur;
880 struct ocrdma_cq *bcq = NULL;
881 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
882
883 list_for_each(cur, head) {
884 if (sq)
885 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
886 else
887 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
888
Parav Panditfe2caef2012-03-21 04:09:06 +0530889 if (qp->srq)
890 continue;
891 /* if wq and rq share the same cq, than comp_handler
892 * is already invoked.
893 */
894 if (qp->sq_cq == qp->rq_cq)
895 continue;
896 /* if completion came on sq, rq's cq is buddy cq.
897 * if completion came on rq, sq's cq is buddy cq.
898 */
899 if (qp->sq_cq == cq)
Selvin Xavier043e9de2014-12-18 14:13:03 +0530900 bcq = qp->rq_cq;
Parav Panditfe2caef2012-03-21 04:09:06 +0530901 else
Selvin Xavier043e9de2014-12-18 14:13:03 +0530902 bcq = qp->sq_cq;
903 return bcq;
Parav Panditfe2caef2012-03-21 04:09:06 +0530904 }
Selvin Xavier043e9de2014-12-18 14:13:03 +0530905 return NULL;
906}
907
908static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
909 struct ocrdma_cq *cq)
910{
911 unsigned long flags;
912 struct ocrdma_cq *bcq = NULL;
913
914 /* Go through list of QPs in error state which are using this CQ
915 * and invoke its callback handler to trigger CQE processing for
916 * error/flushed CQE. It is rare to find more than few entries in
917 * this list as most consumers stops after getting error CQE.
918 * List is traversed only once when a matching buddy cq found for a QP.
919 */
920 spin_lock_irqsave(&dev->flush_q_lock, flags);
921 /* Check if buddy CQ is present.
922 * true - Check for SQ CQ
923 * false - Check for RQ CQ
924 */
925 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
926 if (bcq == NULL)
927 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
Parav Panditfe2caef2012-03-21 04:09:06 +0530928 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
Selvin Xavier043e9de2014-12-18 14:13:03 +0530929
930 /* if there is valid buddy cq, look for its completion handler */
931 if (bcq && bcq->ibcq.comp_handler) {
932 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
933 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
934 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +0530935 }
936}
937
938static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
939{
940 unsigned long flags;
941 struct ocrdma_cq *cq;
942
943 if (cq_idx >= OCRDMA_MAX_CQ)
944 BUG();
945
946 cq = dev->cq_tbl[cq_idx];
Devesh Sharmaea6176262014-02-04 11:56:54 +0530947 if (cq == NULL)
Parav Panditfe2caef2012-03-21 04:09:06 +0530948 return;
Parav Panditfe2caef2012-03-21 04:09:06 +0530949
950 if (cq->ibcq.comp_handler) {
951 spin_lock_irqsave(&cq->comp_handler_lock, flags);
952 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
953 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
954 }
955 ocrdma_qp_buddy_cq_handler(dev, cq);
956}
957
958static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
959{
960 /* process the MQ-CQE. */
961 if (cq_id == dev->mq.cq.id)
962 ocrdma_mq_cq_handler(dev, cq_id);
963 else
964 ocrdma_qp_cq_handler(dev, cq_id);
965}
966
967static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
968{
969 struct ocrdma_eq *eq = handle;
970 struct ocrdma_dev *dev = eq->dev;
971 struct ocrdma_eqe eqe;
972 struct ocrdma_eqe *ptr;
Parav Panditfe2caef2012-03-21 04:09:06 +0530973 u16 cq_id;
Devesh Sharma5e6f9232015-05-19 11:32:33 +0530974 u8 mcode;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530975 int budget = eq->cq_cnt;
976
977 do {
Parav Panditfe2caef2012-03-21 04:09:06 +0530978 ptr = ocrdma_get_eqe(eq);
979 eqe = *ptr;
980 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
Devesh Sharma5e6f9232015-05-19 11:32:33 +0530981 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
982 >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
983 if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
984 pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
985 eq->q.id, eqe.id_valid);
Parav Panditfe2caef2012-03-21 04:09:06 +0530986 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
987 break;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530988
Parav Panditfe2caef2012-03-21 04:09:06 +0530989 ptr->id_valid = 0;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530990 /* ring eq doorbell as soon as its consumed. */
991 ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
Parav Panditfe2caef2012-03-21 04:09:06 +0530992 /* check whether its CQE or not. */
993 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
994 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
995 ocrdma_cq_handler(dev, cq_id);
996 }
997 ocrdma_eq_inc_tail(eq);
Devesh Sharmaea6176262014-02-04 11:56:54 +0530998
999 /* There can be a stale EQE after the last bound CQ is
1000 * destroyed. EQE valid and budget == 0 implies this.
1001 */
1002 if (budget)
1003 budget--;
1004
1005 } while (budget);
1006
Mitesh Ahujab4dbe8d2014-12-18 14:13:05 +05301007 eq->aic_obj.eq_intr_cnt++;
Devesh Sharmaea6176262014-02-04 11:56:54 +05301008 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
Parav Panditfe2caef2012-03-21 04:09:06 +05301009 return IRQ_HANDLED;
1010}
1011
1012static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
1013{
1014 struct ocrdma_mqe *mqe;
1015
1016 dev->mqe_ctx.tag = dev->mq.sq.head;
1017 dev->mqe_ctx.cmd_done = false;
1018 mqe = ocrdma_get_mqe(dev);
1019 cmd->hdr.tag_lo = dev->mq.sq.head;
1020 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
1021 /* make sure descriptor is written before ringing doorbell */
1022 wmb();
1023 ocrdma_mq_inc_head(dev);
1024 ocrdma_ring_mq_db(dev);
1025}
1026
1027static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
1028{
1029 long status;
1030 /* 30 sec timeout */
1031 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
1032 (dev->mqe_ctx.cmd_done != false),
1033 msecs_to_jiffies(30000));
1034 if (status)
1035 return 0;
Mitesh Ahuja6dab0262014-06-10 19:32:21 +05301036 else {
1037 dev->mqe_ctx.fw_error_state = true;
1038 pr_err("%s(%d) mailbox timeout: fw not responding\n",
1039 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301040 return -1;
Mitesh Ahuja6dab0262014-06-10 19:32:21 +05301041 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301042}
1043
1044/* issue a mailbox command on the MQ */
1045static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
1046{
1047 int status = 0;
1048 u16 cqe_status, ext_status;
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301049 struct ocrdma_mqe *rsp_mqe;
1050 struct ocrdma_mbx_rsp *rsp = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +05301051
1052 mutex_lock(&dev->mqe_ctx.lock);
Mitesh Ahuja6dab0262014-06-10 19:32:21 +05301053 if (dev->mqe_ctx.fw_error_state)
1054 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +05301055 ocrdma_post_mqe(dev, mqe);
1056 status = ocrdma_wait_mqe_cmpl(dev);
1057 if (status)
1058 goto mbx_err;
1059 cqe_status = dev->mqe_ctx.cqe_status;
1060 ext_status = dev->mqe_ctx.ext_status;
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301061 rsp_mqe = ocrdma_get_mqe_rsp(dev);
1062 ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
1063 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1064 OCRDMA_MQE_HDR_EMB_SHIFT)
1065 rsp = &mqe->u.rsp;
1066
Parav Panditfe2caef2012-03-21 04:09:06 +05301067 if (cqe_status || ext_status) {
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301068 pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
1069 __func__, cqe_status, ext_status);
1070 if (rsp) {
1071 /* This is for embedded cmds. */
1072 pr_err("opcode=0x%x, subsystem=0x%x\n",
1073 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1074 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1075 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1076 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1077 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301078 status = ocrdma_get_mbx_cqe_errno(cqe_status);
1079 goto mbx_err;
1080 }
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301081 /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1082 if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
Parav Panditfe2caef2012-03-21 04:09:06 +05301083 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
1084mbx_err:
1085 mutex_unlock(&dev->mqe_ctx.lock);
1086 return status;
1087}
1088
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301089static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
1090 void *payload_va)
1091{
1092 int status = 0;
1093 struct ocrdma_mbx_rsp *rsp = payload_va;
1094
1095 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1096 OCRDMA_MQE_HDR_EMB_SHIFT)
1097 BUG();
1098
1099 status = ocrdma_mbx_cmd(dev, mqe);
1100 if (!status)
1101 /* For non embedded, only CQE failures are handled in
1102 * ocrdma_mbx_cmd. We need to check for RSP errors.
1103 */
1104 if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
1105 status = ocrdma_get_mbx_errno(rsp->status);
1106
1107 if (status)
1108 pr_err("opcode=0x%x, subsystem=0x%x\n",
1109 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1110 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1111 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1112 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1113 return status;
1114}
1115
Parav Panditfe2caef2012-03-21 04:09:06 +05301116static void ocrdma_get_attr(struct ocrdma_dev *dev,
1117 struct ocrdma_dev_attr *attr,
1118 struct ocrdma_mbx_query_config *rsp)
1119{
Parav Panditfe2caef2012-03-21 04:09:06 +05301120 attr->max_pd =
1121 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1122 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301123 attr->max_dpp_pds =
1124 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1125 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301126 attr->max_qp =
1127 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1128 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
Devesh Sharmafad51b72014-02-04 11:57:10 +05301129 attr->max_srq =
1130 (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1131 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301132 attr->max_send_sge = ((rsp->max_write_send_sge &
1133 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1134 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1135 attr->max_recv_sge = (rsp->max_write_send_sge &
1136 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1137 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +05301138 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1139 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1140 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301141 attr->max_rdma_sge = (rsp->max_write_send_sge &
1142 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
1143 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05301144 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1145 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1146 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1147 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1148 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1149 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1150 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1151 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1152 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1153 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1154 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1155 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1156 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1157 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1158 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
Selvin Xavierac578ae2014-02-04 11:57:04 +05301159 attr->max_mw = rsp->max_mw;
Parav Panditfe2caef2012-03-21 04:09:06 +05301160 attr->max_mr = rsp->max_mr;
Mitesh Ahuja033edd42014-06-10 19:32:22 +05301161 attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
1162 rsp->max_mr_size_lo;
Parav Panditfe2caef2012-03-21 04:09:06 +05301163 attr->max_fmr = 0;
1164 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1165 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1166 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1167 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +05301168 attr->max_cq = (rsp->max_cq_cqes_per_cq &
1169 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
1170 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301171 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1172 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1173 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1174 OCRDMA_WQE_STRIDE;
1175 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1176 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1177 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1178 OCRDMA_WQE_STRIDE;
1179 attr->max_inline_data =
1180 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1181 sizeof(struct ocrdma_sge));
Devesh Sharma21c33912014-02-04 11:56:56 +05301182 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301183 attr->ird = 1;
1184 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1185 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
Mahesh Vardhamanaiah07bb5422012-06-08 21:25:52 +05301186 }
1187 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1188 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1189 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1190 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
Parav Panditfe2caef2012-03-21 04:09:06 +05301191}
1192
1193static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1194 struct ocrdma_fw_conf_rsp *conf)
1195{
1196 u32 fn_mode;
1197
1198 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1199 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1200 return -EINVAL;
1201 dev->base_eqid = conf->base_eqid;
1202 dev->max_eq = conf->max_eq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301203 return 0;
1204}
1205
1206/* can be issued only during init time. */
1207static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1208{
1209 int status = -ENOMEM;
1210 struct ocrdma_mqe *cmd;
1211 struct ocrdma_fw_ver_rsp *rsp;
1212
1213 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1214 if (!cmd)
1215 return -ENOMEM;
1216 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1217 OCRDMA_CMD_GET_FW_VER,
1218 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1219
1220 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1221 if (status)
1222 goto mbx_err;
1223 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1224 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1225 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1226 sizeof(rsp->running_ver));
1227 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1228mbx_err:
1229 kfree(cmd);
1230 return status;
1231}
1232
1233/* can be issued only during init time. */
1234static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1235{
1236 int status = -ENOMEM;
1237 struct ocrdma_mqe *cmd;
1238 struct ocrdma_fw_conf_rsp *rsp;
1239
1240 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1241 if (!cmd)
1242 return -ENOMEM;
1243 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1244 OCRDMA_CMD_GET_FW_CONFIG,
1245 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1246 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1247 if (status)
1248 goto mbx_err;
1249 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1250 status = ocrdma_check_fw_config(dev, rsp);
1251mbx_err:
1252 kfree(cmd);
1253 return status;
1254}
1255
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301256int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1257{
1258 struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1259 struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
Jes Sorensenbeb9b702014-10-05 16:33:23 +02001260 struct ocrdma_rdma_stats_resp *old_stats;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301261 int status;
1262
Jes Sorensenbeb9b702014-10-05 16:33:23 +02001263 old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301264 if (old_stats == NULL)
1265 return -ENOMEM;
1266
1267 memset(mqe, 0, sizeof(*mqe));
1268 mqe->hdr.pyld_len = dev->stats_mem.size;
1269 mqe->hdr.spcl_sge_cnt_emb |=
1270 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1271 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1272 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1273 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1274 mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1275
1276 /* Cache the old stats */
1277 memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1278 memset(req, 0, dev->stats_mem.size);
1279
1280 ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1281 OCRDMA_CMD_GET_RDMA_STATS,
1282 OCRDMA_SUBSYS_ROCE,
1283 dev->stats_mem.size);
1284 if (reset)
1285 req->reset_stats = reset;
1286
1287 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1288 if (status)
1289 /* Copy from cache, if mbox fails */
1290 memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1291 else
1292 ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1293
1294 kfree(old_stats);
1295 return status;
1296}
1297
1298static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1299{
1300 int status = -ENOMEM;
1301 struct ocrdma_dma_mem dma;
1302 struct ocrdma_mqe *mqe;
1303 struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1304 struct mgmt_hba_attribs *hba_attribs;
1305
Jes Sorensenbeb9b702014-10-05 16:33:23 +02001306 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301307 if (!mqe)
1308 return status;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301309
1310 dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1311 dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1312 dma.size, &dma.pa, GFP_KERNEL);
1313 if (!dma.va)
1314 goto free_mqe;
1315
1316 mqe->hdr.pyld_len = dma.size;
1317 mqe->hdr.spcl_sge_cnt_emb |=
1318 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1319 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1320 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1321 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1322 mqe->u.nonemb_req.sge[0].len = dma.size;
1323
1324 memset(dma.va, 0, dma.size);
1325 ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1326 OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1327 OCRDMA_SUBSYS_COMMON,
1328 dma.size);
1329
1330 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1331 if (!status) {
1332 ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1333 hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1334
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301335 dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
1336 OCRDMA_HBA_ATTRB_PTNUM_MASK)
1337 >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301338 strncpy(dev->model_number,
1339 hba_attribs->controller_model_number, 31);
1340 }
1341 dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1342free_mqe:
1343 kfree(mqe);
1344 return status;
1345}
1346
Parav Panditfe2caef2012-03-21 04:09:06 +05301347static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1348{
1349 int status = -ENOMEM;
1350 struct ocrdma_mbx_query_config *rsp;
1351 struct ocrdma_mqe *cmd;
1352
1353 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1354 if (!cmd)
1355 return status;
1356 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1357 if (status)
1358 goto mbx_err;
1359 rsp = (struct ocrdma_mbx_query_config *)cmd;
1360 ocrdma_get_attr(dev, &dev->attr, rsp);
1361mbx_err:
1362 kfree(cmd);
1363 return status;
1364}
1365
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +05301366int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
1367{
1368 int status = -ENOMEM;
1369 struct ocrdma_get_link_speed_rsp *rsp;
1370 struct ocrdma_mqe *cmd;
1371
1372 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1373 sizeof(*cmd));
1374 if (!cmd)
1375 return status;
1376 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1377 OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1378 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1379
1380 ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
1381
1382 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1383 if (status)
1384 goto mbx_err;
1385
1386 rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301387 *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1388 >> OCRDMA_PHY_PS_SHIFT;
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +05301389
1390mbx_err:
1391 kfree(cmd);
1392 return status;
1393}
1394
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301395static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1396{
1397 int status = -ENOMEM;
1398 struct ocrdma_mqe *cmd;
1399 struct ocrdma_get_phy_info_rsp *rsp;
1400
1401 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1402 if (!cmd)
1403 return status;
1404
1405 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1406 OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1407 sizeof(*cmd));
1408
1409 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1410 if (status)
1411 goto mbx_err;
1412
1413 rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301414 dev->phy.phy_type =
1415 (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
1416 dev->phy.interface_type =
1417 (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
1418 >> OCRDMA_IF_TYPE_SHIFT;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301419 dev->phy.auto_speeds_supported =
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301420 (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301421 dev->phy.fixed_speeds_supported =
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301422 (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
1423 >> OCRDMA_FSPEED_SUPP_SHIFT;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301424mbx_err:
1425 kfree(cmd);
1426 return status;
1427}
1428
Parav Panditfe2caef2012-03-21 04:09:06 +05301429int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1430{
1431 int status = -ENOMEM;
1432 struct ocrdma_alloc_pd *cmd;
1433 struct ocrdma_alloc_pd_rsp *rsp;
1434
1435 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1436 if (!cmd)
1437 return status;
1438 if (pd->dpp_enabled)
1439 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1440 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1441 if (status)
1442 goto mbx_err;
1443 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1444 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1445 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1446 pd->dpp_enabled = true;
1447 pd->dpp_page = rsp->dpp_page_pdid >>
1448 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1449 } else {
1450 pd->dpp_enabled = false;
1451 pd->num_dpp_qp = 0;
1452 }
1453mbx_err:
1454 kfree(cmd);
1455 return status;
1456}
1457
1458int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1459{
1460 int status = -ENOMEM;
1461 struct ocrdma_dealloc_pd *cmd;
1462
1463 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1464 if (!cmd)
1465 return status;
1466 cmd->id = pd->id;
1467 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1468 kfree(cmd);
1469 return status;
1470}
1471
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301472
1473static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1474{
1475 int status = -ENOMEM;
1476 size_t pd_bitmap_size;
1477 struct ocrdma_alloc_pd_range *cmd;
1478 struct ocrdma_alloc_pd_range_rsp *rsp;
1479
1480 /* Pre allocate the DPP PDs */
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301481 if (dev->attr.max_dpp_pds) {
1482 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1483 sizeof(*cmd));
1484 if (!cmd)
1485 return -ENOMEM;
1486 cmd->pd_count = dev->attr.max_dpp_pds;
1487 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1488 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1489 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301490
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301491 if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1492 rsp->pd_count) {
1493 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1494 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1495 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1496 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1497 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1498 pd_bitmap_size =
1499 BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1500 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1501 GFP_KERNEL);
1502 }
1503 kfree(cmd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301504 }
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301505
1506 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1507 if (!cmd)
1508 return -ENOMEM;
1509
1510 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1511 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301512 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301513 if (!status && rsp->pd_count) {
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301514 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1515 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1516 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1517 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1518 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1519 GFP_KERNEL);
1520 }
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301521 kfree(cmd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301522
1523 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1524 /* Enable PD resource manager */
1525 dev->pd_mgr->pd_prealloc_valid = true;
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301526 return 0;
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301527 }
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301528 return status;
1529}
1530
1531static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1532{
1533 struct ocrdma_dealloc_pd_range *cmd;
1534
1535 /* return normal PDs to firmware */
1536 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1537 if (!cmd)
1538 goto mbx_err;
1539
1540 if (dev->pd_mgr->max_normal_pd) {
1541 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1542 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1543 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1544 }
1545
1546 if (dev->pd_mgr->max_dpp_pd) {
1547 kfree(cmd);
1548 /* return DPP PDs to firmware */
1549 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1550 sizeof(*cmd));
1551 if (!cmd)
1552 goto mbx_err;
1553
1554 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1555 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1556 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1557 }
1558mbx_err:
1559 kfree(cmd);
1560}
1561
1562void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1563{
1564 int status;
1565
1566 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1567 GFP_KERNEL);
1568 if (!dev->pd_mgr) {
1569 pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
1570 return;
1571 }
1572 status = ocrdma_mbx_alloc_pd_range(dev);
1573 if (status) {
1574 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1575 __func__, dev->id);
1576 }
1577}
1578
1579static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1580{
1581 ocrdma_mbx_dealloc_pd_range(dev);
1582 kfree(dev->pd_mgr->pd_norm_bitmap);
1583 kfree(dev->pd_mgr->pd_dpp_bitmap);
1584 kfree(dev->pd_mgr);
1585}
1586
Parav Panditfe2caef2012-03-21 04:09:06 +05301587static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1588 int *num_pages, int *page_size)
1589{
1590 int i;
1591 int mem_size;
1592
1593 *num_entries = roundup_pow_of_two(*num_entries);
1594 mem_size = *num_entries * entry_size;
1595 /* find the possible lowest possible multiplier */
1596 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1597 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1598 break;
1599 }
1600 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1601 return -EINVAL;
1602 mem_size = roundup(mem_size,
1603 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1604 *num_pages =
1605 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1606 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1607 *num_entries = mem_size / entry_size;
1608 return 0;
1609}
1610
1611static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1612{
Devesh Sharmafad51b72014-02-04 11:57:10 +05301613 int i;
Parav Panditfe2caef2012-03-21 04:09:06 +05301614 int status = 0;
1615 int max_ah;
1616 struct ocrdma_create_ah_tbl *cmd;
1617 struct ocrdma_create_ah_tbl_rsp *rsp;
1618 struct pci_dev *pdev = dev->nic_info.pdev;
1619 dma_addr_t pa;
1620 struct ocrdma_pbe *pbes;
1621
1622 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1623 if (!cmd)
1624 return status;
1625
1626 max_ah = OCRDMA_MAX_AH;
1627 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1628
1629 /* number of PBEs in PBL */
1630 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1631 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1632 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1633
1634 /* page size */
1635 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1636 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1637 break;
1638 }
1639 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1640 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1641
1642 /* ah_entry size */
1643 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1644 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1645 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1646
1647 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1648 &dev->av_tbl.pbl.pa,
1649 GFP_KERNEL);
1650 if (dev->av_tbl.pbl.va == NULL)
1651 goto mem_err;
1652
1653 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1654 &pa, GFP_KERNEL);
1655 if (dev->av_tbl.va == NULL)
1656 goto mem_err_ah;
1657 dev->av_tbl.pa = pa;
1658 dev->av_tbl.num_ah = max_ah;
1659 memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1660
1661 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1662 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301663 pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
1664 pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
Parav Panditfe2caef2012-03-21 04:09:06 +05301665 pa += PAGE_SIZE;
1666 }
1667 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1668 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1669 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1670 if (status)
1671 goto mbx_err;
1672 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1673 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1674 kfree(cmd);
1675 return 0;
1676
1677mbx_err:
1678 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1679 dev->av_tbl.pa);
1680 dev->av_tbl.va = NULL;
1681mem_err_ah:
1682 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1683 dev->av_tbl.pbl.pa);
1684 dev->av_tbl.pbl.va = NULL;
1685 dev->av_tbl.size = 0;
1686mem_err:
1687 kfree(cmd);
1688 return status;
1689}
1690
1691static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1692{
1693 struct ocrdma_delete_ah_tbl *cmd;
1694 struct pci_dev *pdev = dev->nic_info.pdev;
1695
1696 if (dev->av_tbl.va == NULL)
1697 return;
1698
1699 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1700 if (!cmd)
1701 return;
1702 cmd->ahid = dev->av_tbl.ahid;
1703
1704 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1705 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1706 dev->av_tbl.pa);
Devesh Sharmadaac9682014-06-10 19:32:18 +05301707 dev->av_tbl.va = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +05301708 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1709 dev->av_tbl.pbl.pa);
1710 kfree(cmd);
1711}
1712
1713/* Multiple CQs uses the EQ. This routine returns least used
1714 * EQ to associate with CQ. This will distributes the interrupt
1715 * processing and CPU load to associated EQ, vector and so to that CPU.
1716 */
1717static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1718{
1719 int i, selected_eq = 0, cq_cnt = 0;
1720 u16 eq_id;
1721
1722 mutex_lock(&dev->dev_lock);
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05301723 cq_cnt = dev->eq_tbl[0].cq_cnt;
1724 eq_id = dev->eq_tbl[0].q.id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301725 /* find the EQ which is has the least number of
1726 * CQs associated with it.
1727 */
1728 for (i = 0; i < dev->eq_cnt; i++) {
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05301729 if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
1730 cq_cnt = dev->eq_tbl[i].cq_cnt;
1731 eq_id = dev->eq_tbl[i].q.id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301732 selected_eq = i;
1733 }
1734 }
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05301735 dev->eq_tbl[selected_eq].cq_cnt += 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301736 mutex_unlock(&dev->dev_lock);
1737 return eq_id;
1738}
1739
1740static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1741{
1742 int i;
1743
1744 mutex_lock(&dev->dev_lock);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301745 i = ocrdma_get_eq_table_index(dev, eq_id);
1746 if (i == -EINVAL)
1747 BUG();
1748 dev->eq_tbl[i].cq_cnt -= 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301749 mutex_unlock(&dev->dev_lock);
1750}
1751
1752int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301753 int entries, int dpp_cq, u16 pd_id)
Parav Panditfe2caef2012-03-21 04:09:06 +05301754{
1755 int status = -ENOMEM; int max_hw_cqe;
1756 struct pci_dev *pdev = dev->nic_info.pdev;
1757 struct ocrdma_create_cq *cmd;
1758 struct ocrdma_create_cq_rsp *rsp;
1759 u32 hw_pages, cqe_size, page_size, cqe_count;
1760
Parav Panditfe2caef2012-03-21 04:09:06 +05301761 if (entries > dev->attr.max_cqe) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001762 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1763 __func__, dev->id, dev->attr.max_cqe, entries);
Parav Panditfe2caef2012-03-21 04:09:06 +05301764 return -EINVAL;
1765 }
Devesh Sharma21c33912014-02-04 11:56:56 +05301766 if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
Parav Panditfe2caef2012-03-21 04:09:06 +05301767 return -EINVAL;
1768
1769 if (dpp_cq) {
1770 cq->max_hw_cqe = 1;
1771 max_hw_cqe = 1;
1772 cqe_size = OCRDMA_DPP_CQE_SIZE;
1773 hw_pages = 1;
1774 } else {
1775 cq->max_hw_cqe = dev->attr.max_cqe;
1776 max_hw_cqe = dev->attr.max_cqe;
1777 cqe_size = sizeof(struct ocrdma_cqe);
1778 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1779 }
1780
1781 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1782
1783 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1784 if (!cmd)
1785 return -ENOMEM;
1786 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1787 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1788 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1789 if (!cq->va) {
1790 status = -ENOMEM;
1791 goto mem_err;
1792 }
1793 memset(cq->va, 0, cq->len);
1794 page_size = cq->len / hw_pages;
1795 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1796 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1797 cmd->cmd.pgsz_pgcnt |= hw_pages;
1798 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1799
Parav Panditfe2caef2012-03-21 04:09:06 +05301800 cq->eqn = ocrdma_bind_eq(dev);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301801 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
Parav Panditfe2caef2012-03-21 04:09:06 +05301802 cqe_count = cq->len / cqe_size;
Devesh Sharmaea6176262014-02-04 11:56:54 +05301803 cq->cqe_cnt = cqe_count;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301804 if (cqe_count > 1024) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301805 /* Set cnt to 3 to indicate more than 1024 cq entries */
1806 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301807 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301808 u8 count = 0;
1809 switch (cqe_count) {
1810 case 256:
1811 count = 0;
1812 break;
1813 case 512:
1814 count = 1;
1815 break;
1816 case 1024:
1817 count = 2;
1818 break;
1819 default:
1820 goto mbx_err;
1821 }
1822 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1823 }
1824 /* shared eq between all the consumer cqs. */
1825 cmd->cmd.eqn = cq->eqn;
Devesh Sharma21c33912014-02-04 11:56:56 +05301826 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301827 if (dpp_cq)
1828 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1829 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1830 cq->phase_change = false;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301831 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
Parav Panditfe2caef2012-03-21 04:09:06 +05301832 } else {
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301833 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301834 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1835 cq->phase_change = true;
1836 }
1837
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301838 /* pd_id valid only for v3 */
1839 cmd->cmd.pdid_cqecnt |= (pd_id <<
1840 OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05301841 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1842 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1843 if (status)
1844 goto mbx_err;
1845
1846 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1847 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1848 kfree(cmd);
1849 return 0;
1850mbx_err:
1851 ocrdma_unbind_eq(dev, cq->eqn);
Parav Panditfe2caef2012-03-21 04:09:06 +05301852 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1853mem_err:
1854 kfree(cmd);
1855 return status;
1856}
1857
1858int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1859{
1860 int status = -ENOMEM;
1861 struct ocrdma_destroy_cq *cmd;
1862
1863 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1864 if (!cmd)
1865 return status;
1866 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1867 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1868
1869 cmd->bypass_flush_qid |=
1870 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1871 OCRDMA_DESTROY_CQ_QID_MASK;
1872
Parav Panditfe2caef2012-03-21 04:09:06 +05301873 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301874 ocrdma_unbind_eq(dev, cq->eqn);
Parav Panditfe2caef2012-03-21 04:09:06 +05301875 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
Parav Panditfe2caef2012-03-21 04:09:06 +05301876 kfree(cmd);
1877 return status;
1878}
1879
1880int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1881 u32 pdid, int addr_check)
1882{
1883 int status = -ENOMEM;
1884 struct ocrdma_alloc_lkey *cmd;
1885 struct ocrdma_alloc_lkey_rsp *rsp;
1886
1887 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1888 if (!cmd)
1889 return status;
1890 cmd->pdid = pdid;
1891 cmd->pbl_sz_flags |= addr_check;
1892 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1893 cmd->pbl_sz_flags |=
1894 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1895 cmd->pbl_sz_flags |=
1896 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1897 cmd->pbl_sz_flags |=
1898 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1899 cmd->pbl_sz_flags |=
1900 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1901 cmd->pbl_sz_flags |=
1902 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1903
1904 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1905 if (status)
1906 goto mbx_err;
1907 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1908 hwmr->lkey = rsp->lrkey;
1909mbx_err:
1910 kfree(cmd);
1911 return status;
1912}
1913
1914int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1915{
1916 int status = -ENOMEM;
1917 struct ocrdma_dealloc_lkey *cmd;
1918
1919 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1920 if (!cmd)
1921 return -ENOMEM;
1922 cmd->lkey = lkey;
1923 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1924 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1925 if (status)
1926 goto mbx_err;
1927mbx_err:
1928 kfree(cmd);
1929 return status;
1930}
1931
1932static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1933 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1934{
1935 int status = -ENOMEM;
1936 int i;
1937 struct ocrdma_reg_nsmr *cmd;
1938 struct ocrdma_reg_nsmr_rsp *rsp;
1939
1940 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1941 if (!cmd)
1942 return -ENOMEM;
1943 cmd->num_pbl_pdid =
1944 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301945 cmd->fr_mr = hwmr->fr_mr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301946
1947 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1948 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1949 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1950 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1951 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1952 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1953 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1954 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1955 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1956 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1957 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1958
1959 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1960 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1961 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1962 cmd->totlen_low = hwmr->len;
1963 cmd->totlen_high = upper_32_bits(hwmr->len);
1964 cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1965 cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1966 cmd->va_loaddr = (u32) hwmr->va;
1967 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1968
1969 for (i = 0; i < pbl_cnt; i++) {
1970 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1971 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1972 }
1973 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1974 if (status)
1975 goto mbx_err;
1976 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1977 hwmr->lkey = rsp->lrkey;
1978mbx_err:
1979 kfree(cmd);
1980 return status;
1981}
1982
1983static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1984 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1985 u32 pbl_offset, u32 last)
1986{
1987 int status = -ENOMEM;
1988 int i;
1989 struct ocrdma_reg_nsmr_cont *cmd;
1990
1991 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1992 if (!cmd)
1993 return -ENOMEM;
1994 cmd->lrkey = hwmr->lkey;
1995 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
1996 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1997 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1998
1999 for (i = 0; i < pbl_cnt; i++) {
2000 cmd->pbl[i].lo =
2001 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
2002 cmd->pbl[i].hi =
2003 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
2004 }
2005 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2006 if (status)
2007 goto mbx_err;
2008mbx_err:
2009 kfree(cmd);
2010 return status;
2011}
2012
2013int ocrdma_reg_mr(struct ocrdma_dev *dev,
2014 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
2015{
2016 int status;
2017 u32 last = 0;
2018 u32 cur_pbl_cnt, pbl_offset;
2019 u32 pending_pbl_cnt = hwmr->num_pbls;
2020
2021 pbl_offset = 0;
2022 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2023 if (cur_pbl_cnt == pending_pbl_cnt)
2024 last = 1;
2025
2026 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
2027 cur_pbl_cnt, hwmr->pbe_size, last);
2028 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002029 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05302030 return status;
2031 }
2032 /* if there is no more pbls to register then exit. */
2033 if (last)
2034 return 0;
2035
2036 while (!last) {
2037 pbl_offset += cur_pbl_cnt;
2038 pending_pbl_cnt -= cur_pbl_cnt;
2039 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2040 /* if we reach the end of the pbls, then need to set the last
2041 * bit, indicating no more pbls to register for this memory key.
2042 */
2043 if (cur_pbl_cnt == pending_pbl_cnt)
2044 last = 1;
2045
2046 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
2047 pbl_offset, last);
2048 if (status)
2049 break;
2050 }
2051 if (status)
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002052 pr_err("%s() err. status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05302053
2054 return status;
2055}
2056
2057bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2058{
2059 struct ocrdma_qp *tmp;
2060 bool found = false;
2061 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
2062 if (qp == tmp) {
2063 found = true;
2064 break;
2065 }
2066 }
2067 return found;
2068}
2069
2070bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2071{
2072 struct ocrdma_qp *tmp;
2073 bool found = false;
2074 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
2075 if (qp == tmp) {
2076 found = true;
2077 break;
2078 }
2079 }
2080 return found;
2081}
2082
2083void ocrdma_flush_qp(struct ocrdma_qp *qp)
2084{
2085 bool found;
2086 unsigned long flags;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302087 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302088
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302089 spin_lock_irqsave(&dev->flush_q_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05302090 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
2091 if (!found)
2092 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
2093 if (!qp->srq) {
2094 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
2095 if (!found)
2096 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
2097 }
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302098 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05302099}
2100
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302101static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
2102{
2103 qp->sq.head = 0;
2104 qp->sq.tail = 0;
2105 qp->rq.head = 0;
2106 qp->rq.tail = 0;
2107}
2108
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302109int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
2110 enum ib_qp_state *old_ib_state)
Parav Panditfe2caef2012-03-21 04:09:06 +05302111{
2112 unsigned long flags;
2113 int status = 0;
2114 enum ocrdma_qp_state new_state;
2115 new_state = get_ocrdma_qp_state(new_ib_state);
2116
2117 /* sync with wqe and rqe posting */
2118 spin_lock_irqsave(&qp->q_lock, flags);
2119
2120 if (old_ib_state)
2121 *old_ib_state = get_ibqp_state(qp->state);
2122 if (new_state == qp->state) {
2123 spin_unlock_irqrestore(&qp->q_lock, flags);
2124 return 1;
2125 }
2126
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302127
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302128 if (new_state == OCRDMA_QPS_INIT) {
2129 ocrdma_init_hwq_ptr(qp);
2130 ocrdma_del_flush_qp(qp);
2131 } else if (new_state == OCRDMA_QPS_ERR) {
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302132 ocrdma_flush_qp(qp);
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302133 }
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302134
2135 qp->state = new_state;
Parav Panditfe2caef2012-03-21 04:09:06 +05302136
2137 spin_unlock_irqrestore(&qp->q_lock, flags);
2138 return status;
2139}
2140
2141static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
2142{
2143 u32 flags = 0;
2144 if (qp->cap_flags & OCRDMA_QP_INB_RD)
2145 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
2146 if (qp->cap_flags & OCRDMA_QP_INB_WR)
2147 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
2148 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
2149 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
2150 if (qp->cap_flags & OCRDMA_QP_LKEY0)
2151 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
2152 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
2153 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
2154 return flags;
2155}
2156
2157static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2158 struct ib_qp_init_attr *attrs,
2159 struct ocrdma_qp *qp)
2160{
2161 int status;
2162 u32 len, hw_pages, hw_page_size;
2163 dma_addr_t pa;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302164 struct ocrdma_pd *pd = qp->pd;
2165 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302166 struct pci_dev *pdev = dev->nic_info.pdev;
2167 u32 max_wqe_allocated;
2168 u32 max_sges = attrs->cap.max_send_sge;
2169
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302170 /* QP1 may exceed 127 */
Dan Carpenter6ebacdf2013-09-06 11:50:46 +03002171 max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302172 dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05302173
2174 status = ocrdma_build_q_conf(&max_wqe_allocated,
2175 dev->attr.wqe_size, &hw_pages, &hw_page_size);
2176 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002177 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
2178 max_wqe_allocated);
Parav Panditfe2caef2012-03-21 04:09:06 +05302179 return -EINVAL;
2180 }
2181 qp->sq.max_cnt = max_wqe_allocated;
2182 len = (hw_pages * hw_page_size);
2183
2184 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2185 if (!qp->sq.va)
2186 return -EINVAL;
2187 memset(qp->sq.va, 0, len);
2188 qp->sq.len = len;
2189 qp->sq.pa = pa;
2190 qp->sq.entry_size = dev->attr.wqe_size;
2191 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
2192
2193 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2194 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
2195 cmd->num_wq_rq_pages |= (hw_pages <<
2196 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
2197 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
2198 cmd->max_sge_send_write |= (max_sges <<
2199 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
2200 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
2201 cmd->max_sge_send_write |= (max_sges <<
2202 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
2203 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
2204 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
2205 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
2206 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
2207 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
2208 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
2209 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
2210 return 0;
2211}
2212
2213static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2214 struct ib_qp_init_attr *attrs,
2215 struct ocrdma_qp *qp)
2216{
2217 int status;
2218 u32 len, hw_pages, hw_page_size;
2219 dma_addr_t pa = 0;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302220 struct ocrdma_pd *pd = qp->pd;
2221 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302222 struct pci_dev *pdev = dev->nic_info.pdev;
2223 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2224
2225 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
2226 &hw_pages, &hw_page_size);
2227 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002228 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
2229 attrs->cap.max_recv_wr + 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05302230 return status;
2231 }
2232 qp->rq.max_cnt = max_rqe_allocated;
2233 len = (hw_pages * hw_page_size);
2234
2235 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2236 if (!qp->rq.va)
Wei Yongjunc94e15c2013-06-23 09:07:19 +08002237 return -ENOMEM;
Parav Panditfe2caef2012-03-21 04:09:06 +05302238 memset(qp->rq.va, 0, len);
2239 qp->rq.pa = pa;
2240 qp->rq.len = len;
2241 qp->rq.entry_size = dev->attr.rqe_size;
2242
2243 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2244 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
2245 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
2246 cmd->num_wq_rq_pages |=
2247 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
2248 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
2249 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
2250 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
2251 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
2252 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
2253 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
2254 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
2255 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
2256 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
2257 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
2258 return 0;
2259}
2260
2261static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2262 struct ocrdma_pd *pd,
2263 struct ocrdma_qp *qp,
2264 u8 enable_dpp_cq, u16 dpp_cq_id)
2265{
2266 pd->num_dpp_qp--;
2267 qp->dpp_enabled = true;
2268 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2269 if (!enable_dpp_cq)
2270 return;
2271 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2272 cmd->dpp_credits_cqid = dpp_cq_id;
2273 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
2274 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
2275}
2276
2277static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2278 struct ocrdma_qp *qp)
2279{
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302280 struct ocrdma_pd *pd = qp->pd;
2281 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302282 struct pci_dev *pdev = dev->nic_info.pdev;
2283 dma_addr_t pa = 0;
2284 int ird_page_size = dev->attr.ird_page_size;
2285 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302286 struct ocrdma_hdr_wqe *rqe;
2287 int i = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302288
2289 if (dev->attr.ird == 0)
2290 return 0;
2291
2292 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
2293 &pa, GFP_KERNEL);
2294 if (!qp->ird_q_va)
2295 return -ENOMEM;
2296 memset(qp->ird_q_va, 0, ird_q_len);
2297 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
2298 pa, ird_page_size);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302299 for (; i < ird_q_len / dev->attr.rqe_size; i++) {
2300 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
2301 (i * dev->attr.rqe_size));
2302 rqe->cw = 0;
2303 rqe->cw |= 2;
2304 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2305 rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
2306 rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
2307 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302308 return 0;
2309}
2310
2311static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
2312 struct ocrdma_qp *qp,
2313 struct ib_qp_init_attr *attrs,
2314 u16 *dpp_offset, u16 *dpp_credit_lmt)
2315{
2316 u32 max_wqe_allocated, max_rqe_allocated;
2317 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
2318 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
2319 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
2320 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
2321 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
2322 qp->dpp_enabled = false;
2323 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
2324 qp->dpp_enabled = true;
2325 *dpp_credit_lmt = (rsp->dpp_response &
2326 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
2327 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
2328 *dpp_offset = (rsp->dpp_response &
2329 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
2330 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
2331 }
2332 max_wqe_allocated =
2333 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
2334 max_wqe_allocated = 1 << max_wqe_allocated;
2335 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
2336
Parav Panditfe2caef2012-03-21 04:09:06 +05302337 qp->sq.max_cnt = max_wqe_allocated;
2338 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2339
2340 if (!attrs->srq) {
2341 qp->rq.max_cnt = max_rqe_allocated;
2342 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05302343 }
2344}
2345
2346int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2347 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2348 u16 *dpp_credit_lmt)
2349{
2350 int status = -ENOMEM;
2351 u32 flags = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302352 struct ocrdma_pd *pd = qp->pd;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302353 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302354 struct pci_dev *pdev = dev->nic_info.pdev;
2355 struct ocrdma_cq *cq;
2356 struct ocrdma_create_qp_req *cmd;
2357 struct ocrdma_create_qp_rsp *rsp;
2358 int qptype;
2359
2360 switch (attrs->qp_type) {
2361 case IB_QPT_GSI:
2362 qptype = OCRDMA_QPT_GSI;
2363 break;
2364 case IB_QPT_RC:
2365 qptype = OCRDMA_QPT_RC;
2366 break;
2367 case IB_QPT_UD:
2368 qptype = OCRDMA_QPT_UD;
2369 break;
2370 default:
2371 return -EINVAL;
Joe Perches2b50176d2013-10-08 16:07:22 -07002372 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302373
2374 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2375 if (!cmd)
2376 return status;
2377 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2378 OCRDMA_CREATE_QP_REQ_QPT_MASK;
2379 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2380 if (status)
2381 goto sq_err;
2382
2383 if (attrs->srq) {
2384 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2385 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2386 cmd->rq_addr[0].lo = srq->id;
2387 qp->srq = srq;
2388 } else {
2389 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2390 if (status)
2391 goto rq_err;
2392 }
2393
2394 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2395 if (status)
2396 goto mbx_err;
2397
2398 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2399 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2400
2401 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2402
2403 cmd->max_sge_recv_flags |= flags;
2404 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2405 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2406 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2407 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2408 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2409 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2410 cq = get_ocrdma_cq(attrs->send_cq);
2411 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2412 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2413 qp->sq_cq = cq;
2414 cq = get_ocrdma_cq(attrs->recv_cq);
2415 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2416 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2417 qp->rq_cq = cq;
2418
Devesh Sharmaf50f31e2014-06-10 19:32:12 +05302419 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2420 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302421 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2422 dpp_cq_id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302423 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302424
2425 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2426 if (status)
2427 goto mbx_err;
2428 rsp = (struct ocrdma_create_qp_rsp *)cmd;
2429 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2430 qp->state = OCRDMA_QPS_RST;
2431 kfree(cmd);
2432 return 0;
2433mbx_err:
2434 if (qp->rq.va)
2435 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2436rq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002437 pr_err("%s(%d) rq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302438 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2439sq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002440 pr_err("%s(%d) sq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302441 kfree(cmd);
2442 return status;
2443}
2444
2445int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2446 struct ocrdma_qp_params *param)
2447{
2448 int status = -ENOMEM;
2449 struct ocrdma_query_qp *cmd;
2450 struct ocrdma_query_qp_rsp *rsp;
2451
Mitesh Ahuja038ab8b2015-05-19 11:32:36 +05302452 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +05302453 if (!cmd)
2454 return status;
2455 cmd->qp_id = qp->id;
2456 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2457 if (status)
2458 goto mbx_err;
2459 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2460 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2461mbx_err:
2462 kfree(cmd);
2463 return status;
2464}
2465
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302466static int ocrdma_set_av_params(struct ocrdma_qp *qp,
Parav Panditfe2caef2012-03-21 04:09:06 +05302467 struct ocrdma_modify_qp *cmd,
Selvin Xavierbf674722014-08-22 16:57:20 +05302468 struct ib_qp_attr *attrs,
2469 int attr_mask)
Parav Panditfe2caef2012-03-21 04:09:06 +05302470{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302471 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302472 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302473 union ib_gid sgid, zgid;
Matan Barakdbf727d2015-10-15 18:38:51 +03002474 struct ib_gid_attr sgid_attr;
Devesh Sharma6f5deab2015-05-19 11:32:35 +05302475 u32 vlan_id = 0xFFFF;
Parav Panditfe2caef2012-03-21 04:09:06 +05302476 u8 mac_addr[6];
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302477 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302478
Parav Panditfe2caef2012-03-21 04:09:06 +05302479 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302480 return -EINVAL;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302481 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2482 ocrdma_init_service_level(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05302483 cmd->params.tclass_sq_psn |=
2484 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2485 cmd->params.rnt_rc_sl_fl |=
2486 (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302487 cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302488 cmd->params.hop_lmt_rq_psn |=
2489 (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2490 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2491 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2492 sizeof(cmd->params.dgid));
Matan Barakdbf727d2015-10-15 18:38:51 +03002493
2494 status = ib_get_cached_gid(&dev->ibdev, 1, ah_attr->grh.sgid_index,
2495 &sgid, &sgid_attr);
2496 if (!status && sgid_attr.ndev) {
2497 vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
2498 memcpy(mac_addr, sgid_attr.ndev->dev_addr, ETH_ALEN);
2499 dev_put(sgid_attr.ndev);
2500 }
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302501
2502 memset(&zgid, 0, sizeof(zgid));
2503 if (!memcmp(&sgid, &zgid, sizeof(zgid)))
2504 return -EINVAL;
2505
Parav Panditfe2caef2012-03-21 04:09:06 +05302506 qp->sgid_idx = ah_attr->grh.sgid_index;
2507 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302508 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
Devesh Sharmaa601dc72014-12-18 14:13:04 +05302509 if (status)
2510 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302511 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2512 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2513 /* convert them to LE format. */
2514 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2515 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2516 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
Devesh Sharma6f5deab2015-05-19 11:32:35 +05302517
2518 if (vlan_id < 0x1000) {
Matan Barakdbf727d2015-10-15 18:38:51 +03002519 if (dev->pfc_state) {
2520 vlan_id = 0;
2521 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2522 dev->id);
2523 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2524 dev->id);
2525 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302526 cmd->params.vlan_dmac_b4_to_b5 |=
2527 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2528 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
Selvin Xavier31dbdd92014-06-10 19:32:13 +05302529 cmd->params.rnt_rc_sl_fl |=
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302530 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05302531 }
Devesh Sharma6f5deab2015-05-19 11:32:35 +05302532
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302533 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302534}
2535
2536static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2537 struct ocrdma_modify_qp *cmd,
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05002538 struct ib_qp_attr *attrs, int attr_mask)
Parav Panditfe2caef2012-03-21 04:09:06 +05302539{
2540 int status = 0;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302541 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302542
2543 if (attr_mask & IB_QP_PKEY_INDEX) {
2544 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2545 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2546 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2547 }
2548 if (attr_mask & IB_QP_QKEY) {
2549 qp->qkey = attrs->qkey;
2550 cmd->params.qkey = attrs->qkey;
2551 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2552 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302553 if (attr_mask & IB_QP_AV) {
Selvin Xavierbf674722014-08-22 16:57:20 +05302554 status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302555 if (status)
2556 return status;
2557 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302558 /* set the default mac address for UD, GSI QPs */
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302559 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2560 (dev->nic_info.mac_addr[1] << 8) |
2561 (dev->nic_info.mac_addr[2] << 16) |
2562 (dev->nic_info.mac_addr[3] << 24);
2563 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2564 (dev->nic_info.mac_addr[5] << 8);
Parav Panditfe2caef2012-03-21 04:09:06 +05302565 }
2566 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2567 attrs->en_sqd_async_notify) {
2568 cmd->params.max_sge_recv_flags |=
2569 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2570 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2571 }
2572 if (attr_mask & IB_QP_DEST_QPN) {
2573 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2574 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2575 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2576 }
2577 if (attr_mask & IB_QP_PATH_MTU) {
Naga Irrinki72d8a012015-05-19 11:32:39 +05302578 if (attrs->path_mtu < IB_MTU_512 ||
Naresh Gottumukkalad3cb6c02013-08-26 15:27:40 +05302579 attrs->path_mtu > IB_MTU_4096) {
Naga Irrinki72d8a012015-05-19 11:32:39 +05302580 pr_err("ocrdma%d: IB MTU %d is not supported\n",
2581 dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
Parav Panditfe2caef2012-03-21 04:09:06 +05302582 status = -EINVAL;
2583 goto pmtu_err;
2584 }
2585 cmd->params.path_mtu_pkey_indx |=
2586 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2587 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2588 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2589 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2590 }
2591 if (attr_mask & IB_QP_TIMEOUT) {
2592 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2593 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2594 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2595 }
2596 if (attr_mask & IB_QP_RETRY_CNT) {
2597 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2598 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2599 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2600 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2601 }
2602 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2603 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2604 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2605 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2606 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2607 }
2608 if (attr_mask & IB_QP_RNR_RETRY) {
2609 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2610 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2611 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2612 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2613 }
2614 if (attr_mask & IB_QP_SQ_PSN) {
2615 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2616 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2617 }
2618 if (attr_mask & IB_QP_RQ_PSN) {
2619 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2620 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2621 }
2622 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302623 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302624 status = -EINVAL;
2625 goto pmtu_err;
2626 }
2627 qp->max_ord = attrs->max_rd_atomic;
2628 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2629 }
2630 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302631 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302632 status = -EINVAL;
2633 goto pmtu_err;
2634 }
2635 qp->max_ird = attrs->max_dest_rd_atomic;
2636 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2637 }
2638 cmd->params.max_ord_ird = (qp->max_ord <<
2639 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2640 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2641pmtu_err:
2642 return status;
2643}
2644
2645int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05002646 struct ib_qp_attr *attrs, int attr_mask)
Parav Panditfe2caef2012-03-21 04:09:06 +05302647{
2648 int status = -ENOMEM;
2649 struct ocrdma_modify_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302650
2651 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2652 if (!cmd)
2653 return status;
2654
2655 cmd->params.id = qp->id;
2656 cmd->flags = 0;
2657 if (attr_mask & IB_QP_STATE) {
2658 cmd->params.max_sge_recv_flags |=
2659 (get_ocrdma_qp_state(attrs->qp_state) <<
2660 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2661 OCRDMA_QP_PARAMS_STATE_MASK;
2662 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302663 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302664 cmd->params.max_sge_recv_flags |=
2665 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2666 OCRDMA_QP_PARAMS_STATE_MASK;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302667 }
2668
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05002669 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
Parav Panditfe2caef2012-03-21 04:09:06 +05302670 if (status)
2671 goto mbx_err;
2672 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2673 if (status)
2674 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002675
Parav Panditfe2caef2012-03-21 04:09:06 +05302676mbx_err:
2677 kfree(cmd);
2678 return status;
2679}
2680
2681int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2682{
2683 int status = -ENOMEM;
2684 struct ocrdma_destroy_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302685 struct pci_dev *pdev = dev->nic_info.pdev;
2686
2687 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2688 if (!cmd)
2689 return status;
2690 cmd->qp_id = qp->id;
2691 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2692 if (status)
2693 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002694
Parav Panditfe2caef2012-03-21 04:09:06 +05302695mbx_err:
2696 kfree(cmd);
2697 if (qp->sq.va)
2698 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2699 if (!qp->srq && qp->rq.va)
2700 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2701 if (qp->dpp_enabled)
2702 qp->pd->num_dpp_qp++;
2703 return status;
2704}
2705
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302706int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
Parav Panditfe2caef2012-03-21 04:09:06 +05302707 struct ib_srq_init_attr *srq_attr,
2708 struct ocrdma_pd *pd)
2709{
2710 int status = -ENOMEM;
2711 int hw_pages, hw_page_size;
2712 int len;
2713 struct ocrdma_create_srq_rsp *rsp;
2714 struct ocrdma_create_srq *cmd;
2715 dma_addr_t pa;
Parav Panditfe2caef2012-03-21 04:09:06 +05302716 struct pci_dev *pdev = dev->nic_info.pdev;
2717 u32 max_rqe_allocated;
2718
2719 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2720 if (!cmd)
2721 return status;
2722
2723 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2724 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2725 status = ocrdma_build_q_conf(&max_rqe_allocated,
2726 dev->attr.rqe_size,
2727 &hw_pages, &hw_page_size);
2728 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002729 pr_err("%s() req. max_wr=0x%x\n", __func__,
2730 srq_attr->attr.max_wr);
Parav Panditfe2caef2012-03-21 04:09:06 +05302731 status = -EINVAL;
2732 goto ret;
2733 }
2734 len = hw_pages * hw_page_size;
2735 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2736 if (!srq->rq.va) {
2737 status = -ENOMEM;
2738 goto ret;
2739 }
2740 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2741
2742 srq->rq.entry_size = dev->attr.rqe_size;
2743 srq->rq.pa = pa;
2744 srq->rq.len = len;
2745 srq->rq.max_cnt = max_rqe_allocated;
2746
2747 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2748 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2749 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2750
2751 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2752 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2753 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2754 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2755 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2756 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2757
2758 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2759 if (status)
2760 goto mbx_err;
2761 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2762 srq->id = rsp->id;
2763 srq->rq.dbid = rsp->id;
2764 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2765 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2766 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2767 max_rqe_allocated = (1 << max_rqe_allocated);
2768 srq->rq.max_cnt = max_rqe_allocated;
2769 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2770 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2771 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2772 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2773 goto ret;
2774mbx_err:
2775 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2776ret:
2777 kfree(cmd);
2778 return status;
2779}
2780
2781int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2782{
2783 int status = -ENOMEM;
2784 struct ocrdma_modify_srq *cmd;
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302785 struct ocrdma_pd *pd = srq->pd;
2786 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302787
Naresh Gottumukkalad7e19c02013-08-26 15:27:51 +05302788 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
Parav Panditfe2caef2012-03-21 04:09:06 +05302789 if (!cmd)
2790 return status;
2791 cmd->id = srq->id;
2792 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2793 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302794 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302795 kfree(cmd);
2796 return status;
2797}
2798
2799int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2800{
2801 int status = -ENOMEM;
2802 struct ocrdma_query_srq *cmd;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302803 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2804
Naresh Gottumukkalad7e19c02013-08-26 15:27:51 +05302805 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
Parav Panditfe2caef2012-03-21 04:09:06 +05302806 if (!cmd)
2807 return status;
2808 cmd->id = srq->rq.dbid;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302809 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302810 if (status == 0) {
2811 struct ocrdma_query_srq_rsp *rsp =
2812 (struct ocrdma_query_srq_rsp *)cmd;
2813 srq_attr->max_sge =
2814 rsp->srq_lmt_max_sge &
2815 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2816 srq_attr->max_wr =
2817 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2818 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2819 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2820 }
2821 kfree(cmd);
2822 return status;
2823}
2824
2825int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2826{
2827 int status = -ENOMEM;
2828 struct ocrdma_destroy_srq *cmd;
2829 struct pci_dev *pdev = dev->nic_info.pdev;
2830 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2831 if (!cmd)
2832 return status;
2833 cmd->id = srq->id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302834 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302835 if (srq->rq.va)
2836 dma_free_coherent(&pdev->dev, srq->rq.len,
2837 srq->rq.va, srq->rq.pa);
2838 kfree(cmd);
2839 return status;
2840}
2841
Selvin Xavier31dbdd92014-06-10 19:32:13 +05302842static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
2843 struct ocrdma_dcbx_cfg *dcbxcfg)
2844{
2845 int status = 0;
2846 dma_addr_t pa;
2847 struct ocrdma_mqe cmd;
2848
2849 struct ocrdma_get_dcbx_cfg_req *req = NULL;
2850 struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
2851 struct pci_dev *pdev = dev->nic_info.pdev;
2852 struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
2853
2854 memset(&cmd, 0, sizeof(struct ocrdma_mqe));
2855 cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
2856 sizeof(struct ocrdma_get_dcbx_cfg_req));
2857 req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
2858 if (!req) {
2859 status = -ENOMEM;
2860 goto mem_err;
2861 }
2862
2863 cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
2864 OCRDMA_MQE_HDR_SGE_CNT_MASK;
2865 mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
2866 mqe_sge->pa_hi = (u32) upper_32_bits(pa);
2867 mqe_sge->len = cmd.hdr.pyld_len;
2868
2869 memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
2870 ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
2871 OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
2872 req->param_type = ptype;
2873
2874 status = ocrdma_mbx_cmd(dev, &cmd);
2875 if (status)
2876 goto mbx_err;
2877
2878 rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
2879 ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
2880 memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
2881
2882mbx_err:
2883 dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
2884mem_err:
2885 return status;
2886}
2887
2888#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
2889#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
2890
2891static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
2892 struct ocrdma_dcbx_cfg *dcbxcfg,
2893 u8 *srvc_lvl)
2894{
2895 int status = -EINVAL, indx, slindx;
2896 int ventry_cnt;
2897 struct ocrdma_app_parameter *app_param;
2898 u8 valid, proto_sel;
2899 u8 app_prio, pfc_prio;
2900 u16 proto;
2901
2902 if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
2903 pr_info("%s ocrdma%d DCBX is disabled\n",
2904 dev_name(&dev->nic_info.pdev->dev), dev->id);
2905 goto out;
2906 }
2907
2908 if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
2909 pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2910 dev_name(&dev->nic_info.pdev->dev), dev->id,
2911 (ptype > 0 ? "operational" : "admin"),
2912 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
2913 "enabled" : "disabled",
2914 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
2915 "" : ", not sync'ed");
2916 goto out;
2917 } else {
2918 pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2919 dev_name(&dev->nic_info.pdev->dev), dev->id);
2920 }
2921
2922 ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
2923 OCRDMA_DCBX_APP_ENTRY_SHIFT)
2924 & OCRDMA_DCBX_STATE_MASK;
2925
2926 for (indx = 0; indx < ventry_cnt; indx++) {
2927 app_param = &dcbxcfg->app_param[indx];
2928 valid = (app_param->valid_proto_app >>
2929 OCRDMA_APP_PARAM_VALID_SHIFT)
2930 & OCRDMA_APP_PARAM_VALID_MASK;
2931 proto_sel = (app_param->valid_proto_app
2932 >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
2933 & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
2934 proto = app_param->valid_proto_app &
2935 OCRDMA_APP_PARAM_APP_PROTO_MASK;
2936
2937 if (
2938 valid && proto == OCRDMA_APP_PROTO_ROCE &&
2939 proto_sel == OCRDMA_PROTO_SELECT_L2) {
2940 for (slindx = 0; slindx <
2941 OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
2942 app_prio = ocrdma_get_app_prio(
2943 (u8 *)app_param->app_prio,
2944 slindx);
2945 pfc_prio = ocrdma_get_pfc_prio(
2946 (u8 *)dcbxcfg->pfc_prio,
2947 slindx);
2948
2949 if (app_prio && pfc_prio) {
2950 *srvc_lvl = slindx;
2951 status = 0;
2952 goto out;
2953 }
2954 }
2955 if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
2956 pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
2957 dev_name(&dev->nic_info.pdev->dev),
2958 dev->id, proto);
2959 }
2960 }
2961 }
2962
2963out:
2964 return status;
2965}
2966
2967void ocrdma_init_service_level(struct ocrdma_dev *dev)
2968{
2969 int status = 0, indx;
2970 struct ocrdma_dcbx_cfg dcbxcfg;
2971 u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
2972 int ptype = OCRDMA_PARAMETER_TYPE_OPER;
2973
2974 for (indx = 0; indx < 2; indx++) {
2975 status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
2976 if (status) {
2977 pr_err("%s(): status=%d\n", __func__, status);
2978 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
2979 continue;
2980 }
2981
2982 status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
2983 &dcbxcfg, &srvc_lvl);
2984 if (status) {
2985 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
2986 continue;
2987 }
2988
2989 break;
2990 }
2991
2992 if (status)
2993 pr_info("%s ocrdma%d service level default\n",
2994 dev_name(&dev->nic_info.pdev->dev), dev->id);
2995 else
2996 pr_info("%s ocrdma%d service level %d\n",
2997 dev_name(&dev->nic_info.pdev->dev), dev->id,
2998 srvc_lvl);
2999
3000 dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
3001 dev->sl = srvc_lvl;
3002}
3003
Parav Panditfe2caef2012-03-21 04:09:06 +05303004int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3005{
3006 int i;
3007 int status = -EINVAL;
3008 struct ocrdma_av *av;
3009 unsigned long flags;
3010
3011 av = dev->av_tbl.va;
3012 spin_lock_irqsave(&dev->av_tbl.lock, flags);
3013 for (i = 0; i < dev->av_tbl.num_ah; i++) {
3014 if (av->valid == 0) {
3015 av->valid = OCRDMA_AV_VALID;
3016 ah->av = av;
3017 ah->id = i;
3018 status = 0;
3019 break;
3020 }
3021 av++;
3022 }
3023 if (i == dev->av_tbl.num_ah)
3024 status = -EAGAIN;
3025 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3026 return status;
3027}
3028
3029int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
3030{
3031 unsigned long flags;
3032 spin_lock_irqsave(&dev->av_tbl.lock, flags);
3033 ah->av->valid = 0;
3034 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
3035 return 0;
3036}
3037
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303038static int ocrdma_create_eqs(struct ocrdma_dev *dev)
Parav Panditfe2caef2012-03-21 04:09:06 +05303039{
Roland Dreierda496432012-04-16 11:32:17 -07003040 int num_eq, i, status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05303041 int irq;
3042 unsigned long flags = 0;
3043
3044 num_eq = dev->nic_info.msix.num_vectors -
3045 dev->nic_info.msix.start_vector;
3046 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
3047 num_eq = 1;
3048 flags = IRQF_SHARED;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05303049 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05303050 num_eq = min_t(u32, num_eq, num_online_cpus());
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05303051 }
3052
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303053 if (!num_eq)
3054 return -EINVAL;
3055
3056 dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
3057 if (!dev->eq_tbl)
Parav Panditfe2caef2012-03-21 04:09:06 +05303058 return -ENOMEM;
3059
3060 for (i = 0; i < num_eq; i++) {
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303061 status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
Devesh Sharmafad51b72014-02-04 11:57:10 +05303062 OCRDMA_EQ_LEN);
Parav Panditfe2caef2012-03-21 04:09:06 +05303063 if (status) {
3064 status = -EINVAL;
3065 break;
3066 }
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303067 sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
Parav Panditfe2caef2012-03-21 04:09:06 +05303068 dev->id, i);
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303069 irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
Parav Panditfe2caef2012-03-21 04:09:06 +05303070 status = request_irq(irq, ocrdma_irq_handler, flags,
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303071 dev->eq_tbl[i].irq_name,
3072 &dev->eq_tbl[i]);
3073 if (status)
3074 goto done;
Parav Panditfe2caef2012-03-21 04:09:06 +05303075 dev->eq_cnt += 1;
3076 }
3077 /* one eq is sufficient for data path to work */
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303078 return 0;
3079done:
3080 ocrdma_destroy_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303081 return status;
3082}
3083
Mitesh Ahujab4dbe8d2014-12-18 14:13:05 +05303084static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3085 int num)
3086{
3087 int i, status = -ENOMEM;
3088 struct ocrdma_modify_eqd_req *cmd;
3089
3090 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3091 if (!cmd)
3092 return status;
3093
3094 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3095 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3096
3097 cmd->cmd.num_eq = num;
3098 for (i = 0; i < num; i++) {
3099 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3100 cmd->cmd.set_eqd[i].phase = 0;
3101 cmd->cmd.set_eqd[i].delay_multiplier =
3102 (eq[i].aic_obj.prev_eqd * 65)/100;
3103 }
3104 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3105 if (status)
3106 goto mbx_err;
3107mbx_err:
3108 kfree(cmd);
3109 return status;
3110}
3111
3112static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3113 int num)
3114{
3115 int num_eqs, i = 0;
3116 if (num > 8) {
3117 while (num) {
3118 num_eqs = min(num, 8);
3119 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3120 i += num_eqs;
3121 num -= num_eqs;
3122 }
3123 } else {
3124 ocrdma_mbx_modify_eqd(dev, eq, num);
3125 }
3126 return 0;
3127}
3128
3129void ocrdma_eqd_set_task(struct work_struct *work)
3130{
3131 struct ocrdma_dev *dev =
3132 container_of(work, struct ocrdma_dev, eqd_work.work);
3133 struct ocrdma_eq *eq = 0;
3134 int i, num = 0, status = -EINVAL;
3135 u64 eq_intr;
3136
3137 for (i = 0; i < dev->eq_cnt; i++) {
3138 eq = &dev->eq_tbl[i];
3139 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3140 eq_intr = eq->aic_obj.eq_intr_cnt -
3141 eq->aic_obj.prev_eq_intr_cnt;
3142 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3143 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3144 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3145 num++;
3146 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3147 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3148 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3149 num++;
3150 }
3151 }
3152 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3153 }
3154
3155 if (num)
3156 status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3157 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3158}
3159
Parav Panditfe2caef2012-03-21 04:09:06 +05303160int ocrdma_init_hw(struct ocrdma_dev *dev)
3161{
3162 int status;
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303163
3164 /* create the eqs */
3165 status = ocrdma_create_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303166 if (status)
3167 goto qpeq_err;
3168 status = ocrdma_create_mq(dev);
3169 if (status)
3170 goto mq_err;
3171 status = ocrdma_mbx_query_fw_config(dev);
3172 if (status)
3173 goto conf_err;
3174 status = ocrdma_mbx_query_dev(dev);
3175 if (status)
3176 goto conf_err;
3177 status = ocrdma_mbx_query_fw_ver(dev);
3178 if (status)
3179 goto conf_err;
3180 status = ocrdma_mbx_create_ah_tbl(dev);
3181 if (status)
3182 goto conf_err;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05303183 status = ocrdma_mbx_get_phy_info(dev);
3184 if (status)
Devesh Sharmadaac9682014-06-10 19:32:18 +05303185 goto info_attrb_err;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05303186 status = ocrdma_mbx_get_ctrl_attribs(dev);
3187 if (status)
Devesh Sharmadaac9682014-06-10 19:32:18 +05303188 goto info_attrb_err;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05303189
Parav Panditfe2caef2012-03-21 04:09:06 +05303190 return 0;
3191
Devesh Sharmadaac9682014-06-10 19:32:18 +05303192info_attrb_err:
3193 ocrdma_mbx_delete_ah_tbl(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303194conf_err:
3195 ocrdma_destroy_mq(dev);
3196mq_err:
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303197 ocrdma_destroy_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303198qpeq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00003199 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05303200 return status;
3201}
3202
3203void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3204{
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05303205 ocrdma_free_pd_pool(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303206 ocrdma_mbx_delete_ah_tbl(dev);
3207
Parav Panditfe2caef2012-03-21 04:09:06 +05303208 /* cleanup the control path */
3209 ocrdma_destroy_mq(dev);
Selvin Xavier314fdf42015-05-19 11:32:32 +05303210
3211 /* cleanup the eqs */
3212 ocrdma_destroy_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303213}