blob: 47615ff33bc6a1fb8c0c703b9f975acc6afe79c2 [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/sched.h>
29#include <linux/interrupt.h>
30#include <linux/log2.h>
31#include <linux/dma-mapping.h>
32
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_user_verbs.h>
Parav Panditfe2caef2012-03-21 04:09:06 +053035
36#include "ocrdma.h"
37#include "ocrdma_hw.h"
38#include "ocrdma_verbs.h"
39#include "ocrdma_ah.h"
40
41enum mbx_status {
42 OCRDMA_MBX_STATUS_FAILED = 1,
43 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
44 OCRDMA_MBX_STATUS_OOR = 100,
45 OCRDMA_MBX_STATUS_INVALID_PD = 101,
46 OCRDMA_MBX_STATUS_PD_INUSE = 102,
47 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
48 OCRDMA_MBX_STATUS_INVALID_QP = 104,
49 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
50 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
51 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
52 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
53 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
54 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
55 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
56 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
57 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
58 OCRDMA_MBX_STATUS_MW_BOUND = 114,
59 OCRDMA_MBX_STATUS_INVALID_VA = 115,
60 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
61 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
62 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
63 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
64 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
65 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
66 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
67 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
68 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
69 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
70 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
71 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
72 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
73 OCRDMA_MBX_STATUS_QP_BOUND = 130,
74 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
75 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
76 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
77 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
78 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
79 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
80};
81
82enum additional_status {
83 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
84};
85
86enum cqe_status {
87 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
88 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
89 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
90 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
91 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
92};
93
94static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
95{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +053096 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
Parav Panditfe2caef2012-03-21 04:09:06 +053097}
98
99static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
100{
101 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
102}
103
104static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
105{
106 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530107 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
Parav Panditfe2caef2012-03-21 04:09:06 +0530108
109 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
110 return NULL;
111 return cqe;
112}
113
114static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
115{
116 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
117}
118
119static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
120{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530121 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530122}
123
124static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
125{
126 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +0530127}
128
129static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
130{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530131 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530132}
133
134enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
135{
136 switch (qps) {
137 case OCRDMA_QPS_RST:
138 return IB_QPS_RESET;
139 case OCRDMA_QPS_INIT:
140 return IB_QPS_INIT;
141 case OCRDMA_QPS_RTR:
142 return IB_QPS_RTR;
143 case OCRDMA_QPS_RTS:
144 return IB_QPS_RTS;
145 case OCRDMA_QPS_SQD:
146 case OCRDMA_QPS_SQ_DRAINING:
147 return IB_QPS_SQD;
148 case OCRDMA_QPS_SQE:
149 return IB_QPS_SQE;
150 case OCRDMA_QPS_ERR:
151 return IB_QPS_ERR;
Joe Perches2b50176d2013-10-08 16:07:22 -0700152 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530153 return IB_QPS_ERR;
154}
155
Roland Dreierabe3afa2012-04-16 11:36:29 -0700156static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
Parav Panditfe2caef2012-03-21 04:09:06 +0530157{
158 switch (qps) {
159 case IB_QPS_RESET:
160 return OCRDMA_QPS_RST;
161 case IB_QPS_INIT:
162 return OCRDMA_QPS_INIT;
163 case IB_QPS_RTR:
164 return OCRDMA_QPS_RTR;
165 case IB_QPS_RTS:
166 return OCRDMA_QPS_RTS;
167 case IB_QPS_SQD:
168 return OCRDMA_QPS_SQD;
169 case IB_QPS_SQE:
170 return OCRDMA_QPS_SQE;
171 case IB_QPS_ERR:
172 return OCRDMA_QPS_ERR;
Joe Perches2b50176d2013-10-08 16:07:22 -0700173 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530174 return OCRDMA_QPS_ERR;
175}
176
177static int ocrdma_get_mbx_errno(u32 status)
178{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530179 int err_num;
Parav Panditfe2caef2012-03-21 04:09:06 +0530180 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
181 OCRDMA_MBX_RSP_STATUS_SHIFT;
182 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
183 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
184
185 switch (mbox_status) {
186 case OCRDMA_MBX_STATUS_OOR:
187 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
188 err_num = -EAGAIN;
189 break;
190
191 case OCRDMA_MBX_STATUS_INVALID_PD:
192 case OCRDMA_MBX_STATUS_INVALID_CQ:
193 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
194 case OCRDMA_MBX_STATUS_INVALID_QP:
195 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
196 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
197 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
198 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
199 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
200 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
201 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
202 case OCRDMA_MBX_STATUS_INVALID_LKEY:
203 case OCRDMA_MBX_STATUS_INVALID_VA:
204 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
205 case OCRDMA_MBX_STATUS_INVALID_FBO:
206 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
207 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
208 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
209 case OCRDMA_MBX_STATUS_SRQ_ERROR:
210 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
211 err_num = -EINVAL;
212 break;
213
214 case OCRDMA_MBX_STATUS_PD_INUSE:
215 case OCRDMA_MBX_STATUS_QP_BOUND:
216 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
217 case OCRDMA_MBX_STATUS_MW_BOUND:
218 err_num = -EBUSY;
219 break;
220
221 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
222 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
223 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
224 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
225 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
226 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
227 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
228 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
229 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
230 err_num = -ENOBUFS;
231 break;
232
233 case OCRDMA_MBX_STATUS_FAILED:
234 switch (add_status) {
235 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
236 err_num = -EAGAIN;
237 break;
238 }
239 default:
240 err_num = -EFAULT;
241 }
242 return err_num;
243}
244
Selvin Xaviera51f06e2014-02-04 11:57:07 +0530245char *port_speed_string(struct ocrdma_dev *dev)
246{
247 char *str = "";
248 u16 speeds_supported;
249
250 speeds_supported = dev->phy.fixed_speeds_supported |
251 dev->phy.auto_speeds_supported;
252 if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
253 str = "40Gbps ";
254 else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
255 str = "10Gbps ";
256 else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
257 str = "1Gbps ";
258
259 return str;
260}
261
Parav Panditfe2caef2012-03-21 04:09:06 +0530262static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
263{
264 int err_num = -EINVAL;
265
266 switch (cqe_status) {
267 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
268 err_num = -EPERM;
269 break;
270 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
271 err_num = -EINVAL;
272 break;
273 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
274 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +0530275 err_num = -EINVAL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530276 break;
277 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530278 default:
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +0530279 err_num = -EINVAL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530280 break;
281 }
282 return err_num;
283}
284
285void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
286 bool solicited, u16 cqe_popped)
287{
288 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
289
290 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
291 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
292
293 if (armed)
294 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
295 if (solicited)
296 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
297 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
298 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
299}
300
301static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
302{
303 u32 val = 0;
304
305 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
306 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
307 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
308}
309
310static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
311 bool arm, bool clear_int, u16 num_eqe)
312{
313 u32 val = 0;
314
315 val |= eq_id & OCRDMA_EQ_ID_MASK;
316 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
317 if (arm)
318 val |= (1 << OCRDMA_REARM_SHIFT);
319 if (clear_int)
320 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
321 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
322 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
323 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
324}
325
326static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
327 u8 opcode, u8 subsys, u32 cmd_len)
328{
329 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
330 cmd_hdr->timeout = 20; /* seconds */
331 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
332}
333
334static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
335{
336 struct ocrdma_mqe *mqe;
337
338 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
339 if (!mqe)
340 return NULL;
341 mqe->hdr.spcl_sge_cnt_emb |=
342 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
343 OCRDMA_MQE_HDR_EMB_MASK;
344 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
345
346 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
347 mqe->hdr.pyld_len);
348 return mqe;
349}
350
351static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
352{
353 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
354}
355
356static int ocrdma_alloc_q(struct ocrdma_dev *dev,
357 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
358{
359 memset(q, 0, sizeof(*q));
360 q->len = len;
361 q->entry_size = entry_size;
362 q->size = len * entry_size;
363 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
364 &q->dma, GFP_KERNEL);
365 if (!q->va)
366 return -ENOMEM;
367 memset(q->va, 0, q->size);
368 return 0;
369}
370
371static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
372 dma_addr_t host_pa, int hw_page_size)
373{
374 int i;
375
376 for (i = 0; i < cnt; i++) {
377 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
378 q_pa[i].hi = (u32) upper_32_bits(host_pa);
379 host_pa += hw_page_size;
380 }
381}
382
Devesh Sharmafad51b72014-02-04 11:57:10 +0530383static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
384 struct ocrdma_queue_info *q, int queue_type)
Parav Panditfe2caef2012-03-21 04:09:06 +0530385{
386 u8 opcode = 0;
387 int status;
388 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
389
390 switch (queue_type) {
391 case QTYPE_MCCQ:
392 opcode = OCRDMA_CMD_DELETE_MQ;
393 break;
394 case QTYPE_CQ:
395 opcode = OCRDMA_CMD_DELETE_CQ;
396 break;
397 case QTYPE_EQ:
398 opcode = OCRDMA_CMD_DELETE_EQ;
399 break;
400 default:
401 BUG();
402 }
403 memset(cmd, 0, sizeof(*cmd));
404 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
405 cmd->id = q->id;
406
407 status = be_roce_mcc_cmd(dev->nic_info.netdev,
408 cmd, sizeof(*cmd), NULL, NULL);
409 if (!status)
410 q->created = false;
411 return status;
412}
413
414static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
415{
416 int status;
417 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
418 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
419
420 memset(cmd, 0, sizeof(*cmd));
421 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
422 sizeof(*cmd));
Parav Panditfe2caef2012-03-21 04:09:06 +0530423
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530424 cmd->req.rsvd_version = 2;
Parav Panditfe2caef2012-03-21 04:09:06 +0530425 cmd->num_pages = 4;
426 cmd->valid = OCRDMA_CREATE_EQ_VALID;
427 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
428
429 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
430 PAGE_SIZE_4K);
431 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
432 NULL);
433 if (!status) {
434 eq->q.id = rsp->vector_eqid & 0xffff;
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530435 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
Parav Panditfe2caef2012-03-21 04:09:06 +0530436 eq->q.created = true;
437 }
438 return status;
439}
440
441static int ocrdma_create_eq(struct ocrdma_dev *dev,
442 struct ocrdma_eq *eq, u16 q_len)
443{
444 int status;
445
446 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
447 sizeof(struct ocrdma_eqe));
448 if (status)
449 return status;
450
451 status = ocrdma_mbx_create_eq(dev, eq);
452 if (status)
453 goto mbx_err;
454 eq->dev = dev;
455 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
456
457 return 0;
458mbx_err:
459 ocrdma_free_q(dev, &eq->q);
460 return status;
461}
462
Devesh Sharmaea6176262014-02-04 11:56:54 +0530463int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530464{
465 int irq;
466
467 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
468 irq = dev->nic_info.pdev->irq;
469 else
470 irq = dev->nic_info.msix.vector_list[eq->vector];
471 return irq;
472}
473
474static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
475{
476 if (eq->q.created) {
477 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
Parav Panditfe2caef2012-03-21 04:09:06 +0530478 ocrdma_free_q(dev, &eq->q);
479 }
480}
481
482static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
483{
484 int irq;
485
486 /* disarm EQ so that interrupts are not generated
487 * during freeing and EQ delete is in progress.
488 */
489 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
490
491 irq = ocrdma_get_irq(dev, eq);
492 free_irq(irq, eq);
493 _ocrdma_destroy_eq(dev, eq);
494}
495
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530496static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
Parav Panditfe2caef2012-03-21 04:09:06 +0530497{
498 int i;
499
Parav Panditfe2caef2012-03-21 04:09:06 +0530500 for (i = 0; i < dev->eq_cnt; i++)
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530501 ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
Parav Panditfe2caef2012-03-21 04:09:06 +0530502}
503
Roland Dreierabe3afa2012-04-16 11:36:29 -0700504static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
505 struct ocrdma_queue_info *cq,
506 struct ocrdma_queue_info *eq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530507{
508 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
509 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
510 int status;
511
512 memset(cmd, 0, sizeof(*cmd));
513 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
514 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
515
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530516 cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
517 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
518 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
519 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
Parav Panditfe2caef2012-03-21 04:09:06 +0530520
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530521 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
522 cmd->eqn = eq->id;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +0530523 cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530524
525 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
Parav Panditfe2caef2012-03-21 04:09:06 +0530526 cq->dma, PAGE_SIZE_4K);
527 status = be_roce_mcc_cmd(dev->nic_info.netdev,
528 cmd, sizeof(*cmd), NULL, NULL);
529 if (!status) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530530 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +0530531 cq->created = true;
532 }
533 return status;
534}
535
536static u32 ocrdma_encoded_q_len(int q_len)
537{
538 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
539
540 if (len_encoded == 16)
541 len_encoded = 0;
542 return len_encoded;
543}
544
545static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
546 struct ocrdma_queue_info *mq,
547 struct ocrdma_queue_info *cq)
548{
549 int num_pages, status;
550 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
551 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
552 struct ocrdma_pa *pa;
553
554 memset(cmd, 0, sizeof(*cmd));
555 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
556
Naresh Gottumukkalab1d58b92013-06-10 04:42:38 +0000557 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
558 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
559 cmd->req.rsvd_version = 1;
560 cmd->cqid_pages = num_pages;
561 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
562 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530563
Jes Sorensende123482014-10-05 16:33:24 +0200564 cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
565 cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530566
Naresh Gottumukkalab1d58b92013-06-10 04:42:38 +0000567 cmd->async_cqid_ringsize = cq->id;
568 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
569 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
570 cmd->valid = OCRDMA_CREATE_MQ_VALID;
571 pa = &cmd->pa[0];
572
Parav Panditfe2caef2012-03-21 04:09:06 +0530573 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
574 status = be_roce_mcc_cmd(dev->nic_info.netdev,
575 cmd, sizeof(*cmd), NULL, NULL);
576 if (!status) {
577 mq->id = rsp->id;
578 mq->created = true;
579 }
580 return status;
581}
582
583static int ocrdma_create_mq(struct ocrdma_dev *dev)
584{
585 int status;
586
587 /* Alloc completion queue for Mailbox queue */
588 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
589 sizeof(struct ocrdma_mcqe));
590 if (status)
591 goto alloc_err;
592
Devesh Sharmaea6176262014-02-04 11:56:54 +0530593 dev->eq_tbl[0].cq_cnt++;
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +0530594 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
Parav Panditfe2caef2012-03-21 04:09:06 +0530595 if (status)
596 goto mbx_cq_free;
597
598 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
599 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
600 mutex_init(&dev->mqe_ctx.lock);
601
602 /* Alloc Mailbox queue */
603 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
604 sizeof(struct ocrdma_mqe));
605 if (status)
606 goto mbx_cq_destroy;
607 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
608 if (status)
609 goto mbx_q_free;
610 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
611 return 0;
612
613mbx_q_free:
614 ocrdma_free_q(dev, &dev->mq.sq);
615mbx_cq_destroy:
616 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
617mbx_cq_free:
618 ocrdma_free_q(dev, &dev->mq.cq);
619alloc_err:
620 return status;
621}
622
623static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
624{
625 struct ocrdma_queue_info *mbxq, *cq;
626
627 /* mqe_ctx lock synchronizes with any other pending cmds. */
628 mutex_lock(&dev->mqe_ctx.lock);
629 mbxq = &dev->mq.sq;
630 if (mbxq->created) {
631 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
632 ocrdma_free_q(dev, mbxq);
633 }
634 mutex_unlock(&dev->mqe_ctx.lock);
635
636 cq = &dev->mq.cq;
637 if (cq->created) {
638 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
639 ocrdma_free_q(dev, cq);
640 }
641}
642
643static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
644 struct ocrdma_qp *qp)
645{
646 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
647 enum ib_qp_state old_ib_qps;
648
649 if (qp == NULL)
650 BUG();
Naresh Gottumukkala057729c2013-08-07 12:52:35 +0530651 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +0530652}
653
654static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
655 struct ocrdma_ae_mcqe *cqe)
656{
657 struct ocrdma_qp *qp = NULL;
658 struct ocrdma_cq *cq = NULL;
Selvin Xavier1b09a0c2014-06-10 19:32:26 +0530659 struct ib_event ib_evt;
Parav Panditfe2caef2012-03-21 04:09:06 +0530660 int cq_event = 0;
661 int qp_event = 1;
662 int srq_event = 0;
663 int dev_event = 0;
664 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
665 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
666
667 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
668 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
669 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
670 cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
671
Selvin Xavier1b09a0c2014-06-10 19:32:26 +0530672 memset(&ib_evt, 0, sizeof(ib_evt));
673
Roland Dreiere9db2952012-04-16 12:13:24 -0700674 ib_evt.device = &dev->ibdev;
675
Parav Panditfe2caef2012-03-21 04:09:06 +0530676 switch (type) {
677 case OCRDMA_CQ_ERROR:
678 ib_evt.element.cq = &cq->ibcq;
679 ib_evt.event = IB_EVENT_CQ_ERR;
680 cq_event = 1;
681 qp_event = 0;
682 break;
683 case OCRDMA_CQ_OVERRUN_ERROR:
684 ib_evt.element.cq = &cq->ibcq;
685 ib_evt.event = IB_EVENT_CQ_ERR;
Selvin Xavier12280562014-02-04 11:57:05 +0530686 cq_event = 1;
687 qp_event = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530688 break;
689 case OCRDMA_CQ_QPCAT_ERROR:
690 ib_evt.element.qp = &qp->ibqp;
691 ib_evt.event = IB_EVENT_QP_FATAL;
692 ocrdma_process_qpcat_error(dev, qp);
693 break;
694 case OCRDMA_QP_ACCESS_ERROR:
695 ib_evt.element.qp = &qp->ibqp;
696 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
697 break;
698 case OCRDMA_QP_COMM_EST_EVENT:
699 ib_evt.element.qp = &qp->ibqp;
700 ib_evt.event = IB_EVENT_COMM_EST;
701 break;
702 case OCRDMA_SQ_DRAINED_EVENT:
703 ib_evt.element.qp = &qp->ibqp;
704 ib_evt.event = IB_EVENT_SQ_DRAINED;
705 break;
706 case OCRDMA_DEVICE_FATAL_EVENT:
707 ib_evt.element.port_num = 1;
708 ib_evt.event = IB_EVENT_DEVICE_FATAL;
709 qp_event = 0;
710 dev_event = 1;
711 break;
712 case OCRDMA_SRQCAT_ERROR:
713 ib_evt.element.srq = &qp->srq->ibsrq;
714 ib_evt.event = IB_EVENT_SRQ_ERR;
715 srq_event = 1;
716 qp_event = 0;
717 break;
718 case OCRDMA_SRQ_LIMIT_EVENT:
719 ib_evt.element.srq = &qp->srq->ibsrq;
Parav Pandit804eaf22012-05-23 21:11:17 +0530720 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
Parav Panditfe2caef2012-03-21 04:09:06 +0530721 srq_event = 1;
722 qp_event = 0;
723 break;
724 case OCRDMA_QP_LAST_WQE_EVENT:
725 ib_evt.element.qp = &qp->ibqp;
726 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
727 break;
728 default:
729 cq_event = 0;
730 qp_event = 0;
731 srq_event = 0;
732 dev_event = 0;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000733 pr_err("%s() unknown type=0x%x\n", __func__, type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530734 break;
735 }
736
Selvin Xavierad56ebb2014-12-18 14:12:59 +0530737 if (type < OCRDMA_MAX_ASYNC_ERRORS)
738 atomic_inc(&dev->async_err_stats[type]);
739
Parav Panditfe2caef2012-03-21 04:09:06 +0530740 if (qp_event) {
741 if (qp->ibqp.event_handler)
742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
743 } else if (cq_event) {
744 if (cq->ibcq.event_handler)
745 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
746 } else if (srq_event) {
747 if (qp->srq->ibsrq.event_handler)
748 qp->srq->ibsrq.event_handler(&ib_evt,
749 qp->srq->ibsrq.
750 srq_context);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530751 } else if (dev_event) {
Selvin Xavier12280562014-02-04 11:57:05 +0530752 pr_err("%s: Fatal event received\n", dev->ibdev.name);
Parav Panditfe2caef2012-03-21 04:09:06 +0530753 ib_dispatch_event(&ib_evt);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530754 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530755
756}
757
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530758static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
759 struct ocrdma_ae_mcqe *cqe)
760{
761 struct ocrdma_ae_pvid_mcqe *evt;
762 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
763 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
764
765 switch (type) {
766 case OCRDMA_ASYNC_EVENT_PVID_STATE:
767 evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
768 if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
769 OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
770 dev->pvid = ((evt->tag_enabled &
771 OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
772 OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
773 break;
Selvin Xavier31dbdd92014-06-10 19:32:13 +0530774
775 case OCRDMA_ASYNC_EVENT_COS_VALUE:
776 atomic_set(&dev->update_sl, 1);
777 break;
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530778 default:
779 /* Not interested evts. */
780 break;
781 }
782}
783
Parav Panditfe2caef2012-03-21 04:09:06 +0530784static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
785{
786 /* async CQE processing */
787 struct ocrdma_ae_mcqe *cqe = ae_cqe;
788 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
789 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
790
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530791 if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
Parav Panditfe2caef2012-03-21 04:09:06 +0530792 ocrdma_dispatch_ibevent(dev, cqe);
Naresh Gottumukkala84b105d2013-08-26 15:27:50 +0530793 else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
794 ocrdma_process_grp5_aync(dev, cqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530795 else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000796 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
797 dev->id, evt_code);
Parav Panditfe2caef2012-03-21 04:09:06 +0530798}
799
800static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
801{
802 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
803 dev->mqe_ctx.cqe_status = (cqe->status &
804 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
805 dev->mqe_ctx.ext_status =
806 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
807 >> OCRDMA_MCQE_ESTATUS_SHIFT;
808 dev->mqe_ctx.cmd_done = true;
809 wake_up(&dev->mqe_ctx.cmd_wait);
810 } else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000811 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
812 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
Parav Panditfe2caef2012-03-21 04:09:06 +0530813}
814
815static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
816{
817 u16 cqe_popped = 0;
818 struct ocrdma_mcqe *cqe;
819
820 while (1) {
821 cqe = ocrdma_get_mcqe(dev);
822 if (cqe == NULL)
823 break;
824 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
825 cqe_popped += 1;
826 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
827 ocrdma_process_acqe(dev, cqe);
828 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
829 ocrdma_process_mcqe(dev, cqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530830 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
831 ocrdma_mcq_inc_tail(dev);
832 }
833 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
834 return 0;
835}
836
Selvin Xavier043e9de2014-12-18 14:13:03 +0530837static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
838 struct ocrdma_cq *cq, bool sq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530839{
Parav Panditfe2caef2012-03-21 04:09:06 +0530840 struct ocrdma_qp *qp;
Selvin Xavier043e9de2014-12-18 14:13:03 +0530841 struct list_head *cur;
842 struct ocrdma_cq *bcq = NULL;
843 struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
844
845 list_for_each(cur, head) {
846 if (sq)
847 qp = list_entry(cur, struct ocrdma_qp, sq_entry);
848 else
849 qp = list_entry(cur, struct ocrdma_qp, rq_entry);
850
Parav Panditfe2caef2012-03-21 04:09:06 +0530851 if (qp->srq)
852 continue;
853 /* if wq and rq share the same cq, than comp_handler
854 * is already invoked.
855 */
856 if (qp->sq_cq == qp->rq_cq)
857 continue;
858 /* if completion came on sq, rq's cq is buddy cq.
859 * if completion came on rq, sq's cq is buddy cq.
860 */
861 if (qp->sq_cq == cq)
Selvin Xavier043e9de2014-12-18 14:13:03 +0530862 bcq = qp->rq_cq;
Parav Panditfe2caef2012-03-21 04:09:06 +0530863 else
Selvin Xavier043e9de2014-12-18 14:13:03 +0530864 bcq = qp->sq_cq;
865 return bcq;
Parav Panditfe2caef2012-03-21 04:09:06 +0530866 }
Selvin Xavier043e9de2014-12-18 14:13:03 +0530867 return NULL;
868}
869
870static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
871 struct ocrdma_cq *cq)
872{
873 unsigned long flags;
874 struct ocrdma_cq *bcq = NULL;
875
876 /* Go through list of QPs in error state which are using this CQ
877 * and invoke its callback handler to trigger CQE processing for
878 * error/flushed CQE. It is rare to find more than few entries in
879 * this list as most consumers stops after getting error CQE.
880 * List is traversed only once when a matching buddy cq found for a QP.
881 */
882 spin_lock_irqsave(&dev->flush_q_lock, flags);
883 /* Check if buddy CQ is present.
884 * true - Check for SQ CQ
885 * false - Check for RQ CQ
886 */
887 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
888 if (bcq == NULL)
889 bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
Parav Panditfe2caef2012-03-21 04:09:06 +0530890 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
Selvin Xavier043e9de2014-12-18 14:13:03 +0530891
892 /* if there is valid buddy cq, look for its completion handler */
893 if (bcq && bcq->ibcq.comp_handler) {
894 spin_lock_irqsave(&bcq->comp_handler_lock, flags);
895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
896 spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +0530897 }
898}
899
900static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
901{
902 unsigned long flags;
903 struct ocrdma_cq *cq;
904
905 if (cq_idx >= OCRDMA_MAX_CQ)
906 BUG();
907
908 cq = dev->cq_tbl[cq_idx];
Devesh Sharmaea6176262014-02-04 11:56:54 +0530909 if (cq == NULL)
Parav Panditfe2caef2012-03-21 04:09:06 +0530910 return;
Parav Panditfe2caef2012-03-21 04:09:06 +0530911
912 if (cq->ibcq.comp_handler) {
913 spin_lock_irqsave(&cq->comp_handler_lock, flags);
914 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
915 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
916 }
917 ocrdma_qp_buddy_cq_handler(dev, cq);
918}
919
920static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
921{
922 /* process the MQ-CQE. */
923 if (cq_id == dev->mq.cq.id)
924 ocrdma_mq_cq_handler(dev, cq_id);
925 else
926 ocrdma_qp_cq_handler(dev, cq_id);
927}
928
929static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
930{
931 struct ocrdma_eq *eq = handle;
932 struct ocrdma_dev *dev = eq->dev;
933 struct ocrdma_eqe eqe;
934 struct ocrdma_eqe *ptr;
Parav Panditfe2caef2012-03-21 04:09:06 +0530935 u16 cq_id;
Devesh Sharma5e6f9232015-05-19 11:32:33 +0530936 u8 mcode;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530937 int budget = eq->cq_cnt;
938
939 do {
Parav Panditfe2caef2012-03-21 04:09:06 +0530940 ptr = ocrdma_get_eqe(eq);
941 eqe = *ptr;
942 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
Devesh Sharma5e6f9232015-05-19 11:32:33 +0530943 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
944 >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
945 if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
946 pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
947 eq->q.id, eqe.id_valid);
Parav Panditfe2caef2012-03-21 04:09:06 +0530948 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
949 break;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530950
Parav Panditfe2caef2012-03-21 04:09:06 +0530951 ptr->id_valid = 0;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530952 /* ring eq doorbell as soon as its consumed. */
953 ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
Parav Panditfe2caef2012-03-21 04:09:06 +0530954 /* check whether its CQE or not. */
955 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
956 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
957 ocrdma_cq_handler(dev, cq_id);
958 }
959 ocrdma_eq_inc_tail(eq);
Devesh Sharmaea6176262014-02-04 11:56:54 +0530960
961 /* There can be a stale EQE after the last bound CQ is
962 * destroyed. EQE valid and budget == 0 implies this.
963 */
964 if (budget)
965 budget--;
966
967 } while (budget);
968
Mitesh Ahujab4dbe8d2014-12-18 14:13:05 +0530969 eq->aic_obj.eq_intr_cnt++;
Devesh Sharmaea6176262014-02-04 11:56:54 +0530970 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
Parav Panditfe2caef2012-03-21 04:09:06 +0530971 return IRQ_HANDLED;
972}
973
974static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
975{
976 struct ocrdma_mqe *mqe;
977
978 dev->mqe_ctx.tag = dev->mq.sq.head;
979 dev->mqe_ctx.cmd_done = false;
980 mqe = ocrdma_get_mqe(dev);
981 cmd->hdr.tag_lo = dev->mq.sq.head;
982 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
983 /* make sure descriptor is written before ringing doorbell */
984 wmb();
985 ocrdma_mq_inc_head(dev);
986 ocrdma_ring_mq_db(dev);
987}
988
989static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
990{
991 long status;
992 /* 30 sec timeout */
993 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
994 (dev->mqe_ctx.cmd_done != false),
995 msecs_to_jiffies(30000));
996 if (status)
997 return 0;
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530998 else {
999 dev->mqe_ctx.fw_error_state = true;
1000 pr_err("%s(%d) mailbox timeout: fw not responding\n",
1001 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301002 return -1;
Mitesh Ahuja6dab0262014-06-10 19:32:21 +05301003 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301004}
1005
1006/* issue a mailbox command on the MQ */
1007static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
1008{
1009 int status = 0;
1010 u16 cqe_status, ext_status;
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301011 struct ocrdma_mqe *rsp_mqe;
1012 struct ocrdma_mbx_rsp *rsp = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +05301013
1014 mutex_lock(&dev->mqe_ctx.lock);
Mitesh Ahuja6dab0262014-06-10 19:32:21 +05301015 if (dev->mqe_ctx.fw_error_state)
1016 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +05301017 ocrdma_post_mqe(dev, mqe);
1018 status = ocrdma_wait_mqe_cmpl(dev);
1019 if (status)
1020 goto mbx_err;
1021 cqe_status = dev->mqe_ctx.cqe_status;
1022 ext_status = dev->mqe_ctx.ext_status;
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301023 rsp_mqe = ocrdma_get_mqe_rsp(dev);
1024 ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
1025 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1026 OCRDMA_MQE_HDR_EMB_SHIFT)
1027 rsp = &mqe->u.rsp;
1028
Parav Panditfe2caef2012-03-21 04:09:06 +05301029 if (cqe_status || ext_status) {
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301030 pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
1031 __func__, cqe_status, ext_status);
1032 if (rsp) {
1033 /* This is for embedded cmds. */
1034 pr_err("opcode=0x%x, subsystem=0x%x\n",
1035 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1036 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1037 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1038 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1039 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301040 status = ocrdma_get_mbx_cqe_errno(cqe_status);
1041 goto mbx_err;
1042 }
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301043 /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1044 if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
Parav Panditfe2caef2012-03-21 04:09:06 +05301045 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
1046mbx_err:
1047 mutex_unlock(&dev->mqe_ctx.lock);
1048 return status;
1049}
1050
Selvin Xavierbbc5ec52014-02-04 11:57:06 +05301051static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
1052 void *payload_va)
1053{
1054 int status = 0;
1055 struct ocrdma_mbx_rsp *rsp = payload_va;
1056
1057 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1058 OCRDMA_MQE_HDR_EMB_SHIFT)
1059 BUG();
1060
1061 status = ocrdma_mbx_cmd(dev, mqe);
1062 if (!status)
1063 /* For non embedded, only CQE failures are handled in
1064 * ocrdma_mbx_cmd. We need to check for RSP errors.
1065 */
1066 if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
1067 status = ocrdma_get_mbx_errno(rsp->status);
1068
1069 if (status)
1070 pr_err("opcode=0x%x, subsystem=0x%x\n",
1071 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1072 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1073 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1074 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1075 return status;
1076}
1077
Parav Panditfe2caef2012-03-21 04:09:06 +05301078static void ocrdma_get_attr(struct ocrdma_dev *dev,
1079 struct ocrdma_dev_attr *attr,
1080 struct ocrdma_mbx_query_config *rsp)
1081{
Parav Panditfe2caef2012-03-21 04:09:06 +05301082 attr->max_pd =
1083 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1084 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301085 attr->max_dpp_pds =
1086 (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
1087 OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301088 attr->max_qp =
1089 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1090 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
Devesh Sharmafad51b72014-02-04 11:57:10 +05301091 attr->max_srq =
1092 (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1093 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301094 attr->max_send_sge = ((rsp->max_write_send_sge &
1095 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1096 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1097 attr->max_recv_sge = (rsp->max_write_send_sge &
1098 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1099 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +05301100 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1101 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1102 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301103 attr->max_rdma_sge = (rsp->max_write_send_sge &
1104 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
1105 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05301106 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1107 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1108 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1109 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1110 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1111 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1112 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1113 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1114 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1115 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1116 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1117 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1118 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1119 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1120 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
Selvin Xavierac578ae2014-02-04 11:57:04 +05301121 attr->max_mw = rsp->max_mw;
Parav Panditfe2caef2012-03-21 04:09:06 +05301122 attr->max_mr = rsp->max_mr;
Mitesh Ahuja033edd42014-06-10 19:32:22 +05301123 attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
1124 rsp->max_mr_size_lo;
Parav Panditfe2caef2012-03-21 04:09:06 +05301125 attr->max_fmr = 0;
1126 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1127 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1128 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1129 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +05301130 attr->max_cq = (rsp->max_cq_cqes_per_cq &
1131 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
1132 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301133 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1134 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1135 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1136 OCRDMA_WQE_STRIDE;
1137 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1138 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1139 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1140 OCRDMA_WQE_STRIDE;
1141 attr->max_inline_data =
1142 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1143 sizeof(struct ocrdma_sge));
Devesh Sharma21c33912014-02-04 11:56:56 +05301144 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301145 attr->ird = 1;
1146 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1147 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
Mahesh Vardhamanaiah07bb5422012-06-08 21:25:52 +05301148 }
1149 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1150 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1151 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1152 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
Parav Panditfe2caef2012-03-21 04:09:06 +05301153}
1154
1155static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1156 struct ocrdma_fw_conf_rsp *conf)
1157{
1158 u32 fn_mode;
1159
1160 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1161 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1162 return -EINVAL;
1163 dev->base_eqid = conf->base_eqid;
1164 dev->max_eq = conf->max_eq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301165 return 0;
1166}
1167
1168/* can be issued only during init time. */
1169static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1170{
1171 int status = -ENOMEM;
1172 struct ocrdma_mqe *cmd;
1173 struct ocrdma_fw_ver_rsp *rsp;
1174
1175 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1176 if (!cmd)
1177 return -ENOMEM;
1178 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1179 OCRDMA_CMD_GET_FW_VER,
1180 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1181
1182 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1183 if (status)
1184 goto mbx_err;
1185 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1186 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1187 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1188 sizeof(rsp->running_ver));
1189 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1190mbx_err:
1191 kfree(cmd);
1192 return status;
1193}
1194
1195/* can be issued only during init time. */
1196static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1197{
1198 int status = -ENOMEM;
1199 struct ocrdma_mqe *cmd;
1200 struct ocrdma_fw_conf_rsp *rsp;
1201
1202 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1203 if (!cmd)
1204 return -ENOMEM;
1205 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1206 OCRDMA_CMD_GET_FW_CONFIG,
1207 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1208 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1209 if (status)
1210 goto mbx_err;
1211 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1212 status = ocrdma_check_fw_config(dev, rsp);
1213mbx_err:
1214 kfree(cmd);
1215 return status;
1216}
1217
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301218int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1219{
1220 struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1221 struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
Jes Sorensenbeb9b702014-10-05 16:33:23 +02001222 struct ocrdma_rdma_stats_resp *old_stats;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301223 int status;
1224
Jes Sorensenbeb9b702014-10-05 16:33:23 +02001225 old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301226 if (old_stats == NULL)
1227 return -ENOMEM;
1228
1229 memset(mqe, 0, sizeof(*mqe));
1230 mqe->hdr.pyld_len = dev->stats_mem.size;
1231 mqe->hdr.spcl_sge_cnt_emb |=
1232 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1233 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1234 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1235 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1236 mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1237
1238 /* Cache the old stats */
1239 memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1240 memset(req, 0, dev->stats_mem.size);
1241
1242 ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1243 OCRDMA_CMD_GET_RDMA_STATS,
1244 OCRDMA_SUBSYS_ROCE,
1245 dev->stats_mem.size);
1246 if (reset)
1247 req->reset_stats = reset;
1248
1249 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1250 if (status)
1251 /* Copy from cache, if mbox fails */
1252 memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1253 else
1254 ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1255
1256 kfree(old_stats);
1257 return status;
1258}
1259
1260static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1261{
1262 int status = -ENOMEM;
1263 struct ocrdma_dma_mem dma;
1264 struct ocrdma_mqe *mqe;
1265 struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1266 struct mgmt_hba_attribs *hba_attribs;
1267
Jes Sorensenbeb9b702014-10-05 16:33:23 +02001268 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301269 if (!mqe)
1270 return status;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301271
1272 dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1273 dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1274 dma.size, &dma.pa, GFP_KERNEL);
1275 if (!dma.va)
1276 goto free_mqe;
1277
1278 mqe->hdr.pyld_len = dma.size;
1279 mqe->hdr.spcl_sge_cnt_emb |=
1280 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1281 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1282 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1283 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1284 mqe->u.nonemb_req.sge[0].len = dma.size;
1285
1286 memset(dma.va, 0, dma.size);
1287 ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1288 OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1289 OCRDMA_SUBSYS_COMMON,
1290 dma.size);
1291
1292 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1293 if (!status) {
1294 ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1295 hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1296
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301297 dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
1298 OCRDMA_HBA_ATTRB_PTNUM_MASK)
1299 >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301300 strncpy(dev->model_number,
1301 hba_attribs->controller_model_number, 31);
1302 }
1303 dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1304free_mqe:
1305 kfree(mqe);
1306 return status;
1307}
1308
Parav Panditfe2caef2012-03-21 04:09:06 +05301309static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1310{
1311 int status = -ENOMEM;
1312 struct ocrdma_mbx_query_config *rsp;
1313 struct ocrdma_mqe *cmd;
1314
1315 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1316 if (!cmd)
1317 return status;
1318 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1319 if (status)
1320 goto mbx_err;
1321 rsp = (struct ocrdma_mbx_query_config *)cmd;
1322 ocrdma_get_attr(dev, &dev->attr, rsp);
1323mbx_err:
1324 kfree(cmd);
1325 return status;
1326}
1327
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +05301328int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
1329{
1330 int status = -ENOMEM;
1331 struct ocrdma_get_link_speed_rsp *rsp;
1332 struct ocrdma_mqe *cmd;
1333
1334 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1335 sizeof(*cmd));
1336 if (!cmd)
1337 return status;
1338 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1339 OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1340 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1341
1342 ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
1343
1344 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1345 if (status)
1346 goto mbx_err;
1347
1348 rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301349 *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1350 >> OCRDMA_PHY_PS_SHIFT;
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +05301351
1352mbx_err:
1353 kfree(cmd);
1354 return status;
1355}
1356
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301357static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1358{
1359 int status = -ENOMEM;
1360 struct ocrdma_mqe *cmd;
1361 struct ocrdma_get_phy_info_rsp *rsp;
1362
1363 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1364 if (!cmd)
1365 return status;
1366
1367 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1368 OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1369 sizeof(*cmd));
1370
1371 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1372 if (status)
1373 goto mbx_err;
1374
1375 rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301376 dev->phy.phy_type =
1377 (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
1378 dev->phy.interface_type =
1379 (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
1380 >> OCRDMA_IF_TYPE_SHIFT;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301381 dev->phy.auto_speeds_supported =
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301382 (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301383 dev->phy.fixed_speeds_supported =
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301384 (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
1385 >> OCRDMA_FSPEED_SUPP_SHIFT;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05301386mbx_err:
1387 kfree(cmd);
1388 return status;
1389}
1390
Parav Panditfe2caef2012-03-21 04:09:06 +05301391int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1392{
1393 int status = -ENOMEM;
1394 struct ocrdma_alloc_pd *cmd;
1395 struct ocrdma_alloc_pd_rsp *rsp;
1396
1397 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1398 if (!cmd)
1399 return status;
1400 if (pd->dpp_enabled)
1401 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1402 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1403 if (status)
1404 goto mbx_err;
1405 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1406 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1407 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1408 pd->dpp_enabled = true;
1409 pd->dpp_page = rsp->dpp_page_pdid >>
1410 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1411 } else {
1412 pd->dpp_enabled = false;
1413 pd->num_dpp_qp = 0;
1414 }
1415mbx_err:
1416 kfree(cmd);
1417 return status;
1418}
1419
1420int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1421{
1422 int status = -ENOMEM;
1423 struct ocrdma_dealloc_pd *cmd;
1424
1425 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1426 if (!cmd)
1427 return status;
1428 cmd->id = pd->id;
1429 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1430 kfree(cmd);
1431 return status;
1432}
1433
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301434
1435static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1436{
1437 int status = -ENOMEM;
1438 size_t pd_bitmap_size;
1439 struct ocrdma_alloc_pd_range *cmd;
1440 struct ocrdma_alloc_pd_range_rsp *rsp;
1441
1442 /* Pre allocate the DPP PDs */
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301443 if (dev->attr.max_dpp_pds) {
1444 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1445 sizeof(*cmd));
1446 if (!cmd)
1447 return -ENOMEM;
1448 cmd->pd_count = dev->attr.max_dpp_pds;
1449 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1450 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1451 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301452
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301453 if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1454 rsp->pd_count) {
1455 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1456 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1457 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1458 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1459 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1460 pd_bitmap_size =
1461 BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1462 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1463 GFP_KERNEL);
1464 }
1465 kfree(cmd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301466 }
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301467
1468 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1469 if (!cmd)
1470 return -ENOMEM;
1471
1472 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1473 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301474 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301475 if (!status && rsp->pd_count) {
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301476 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1477 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1478 dev->pd_mgr->max_normal_pd = rsp->pd_count;
1479 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1480 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1481 GFP_KERNEL);
1482 }
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301483 kfree(cmd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301484
1485 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1486 /* Enable PD resource manager */
1487 dev->pd_mgr->pd_prealloc_valid = true;
Mitesh Ahuja59582d82015-05-19 11:32:37 +05301488 return 0;
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301489 }
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05301490 return status;
1491}
1492
1493static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
1494{
1495 struct ocrdma_dealloc_pd_range *cmd;
1496
1497 /* return normal PDs to firmware */
1498 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
1499 if (!cmd)
1500 goto mbx_err;
1501
1502 if (dev->pd_mgr->max_normal_pd) {
1503 cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
1504 cmd->pd_count = dev->pd_mgr->max_normal_pd;
1505 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1506 }
1507
1508 if (dev->pd_mgr->max_dpp_pd) {
1509 kfree(cmd);
1510 /* return DPP PDs to firmware */
1511 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
1512 sizeof(*cmd));
1513 if (!cmd)
1514 goto mbx_err;
1515
1516 cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
1517 cmd->pd_count = dev->pd_mgr->max_dpp_pd;
1518 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1519 }
1520mbx_err:
1521 kfree(cmd);
1522}
1523
1524void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
1525{
1526 int status;
1527
1528 dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
1529 GFP_KERNEL);
1530 if (!dev->pd_mgr) {
1531 pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
1532 return;
1533 }
1534 status = ocrdma_mbx_alloc_pd_range(dev);
1535 if (status) {
1536 pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
1537 __func__, dev->id);
1538 }
1539}
1540
1541static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
1542{
1543 ocrdma_mbx_dealloc_pd_range(dev);
1544 kfree(dev->pd_mgr->pd_norm_bitmap);
1545 kfree(dev->pd_mgr->pd_dpp_bitmap);
1546 kfree(dev->pd_mgr);
1547}
1548
Parav Panditfe2caef2012-03-21 04:09:06 +05301549static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1550 int *num_pages, int *page_size)
1551{
1552 int i;
1553 int mem_size;
1554
1555 *num_entries = roundup_pow_of_two(*num_entries);
1556 mem_size = *num_entries * entry_size;
1557 /* find the possible lowest possible multiplier */
1558 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1559 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1560 break;
1561 }
1562 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1563 return -EINVAL;
1564 mem_size = roundup(mem_size,
1565 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1566 *num_pages =
1567 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1568 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1569 *num_entries = mem_size / entry_size;
1570 return 0;
1571}
1572
1573static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1574{
Devesh Sharmafad51b72014-02-04 11:57:10 +05301575 int i;
Parav Panditfe2caef2012-03-21 04:09:06 +05301576 int status = 0;
1577 int max_ah;
1578 struct ocrdma_create_ah_tbl *cmd;
1579 struct ocrdma_create_ah_tbl_rsp *rsp;
1580 struct pci_dev *pdev = dev->nic_info.pdev;
1581 dma_addr_t pa;
1582 struct ocrdma_pbe *pbes;
1583
1584 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1585 if (!cmd)
1586 return status;
1587
1588 max_ah = OCRDMA_MAX_AH;
1589 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1590
1591 /* number of PBEs in PBL */
1592 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1593 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1594 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1595
1596 /* page size */
1597 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1598 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1599 break;
1600 }
1601 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1602 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1603
1604 /* ah_entry size */
1605 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1606 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1607 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1608
1609 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1610 &dev->av_tbl.pbl.pa,
1611 GFP_KERNEL);
1612 if (dev->av_tbl.pbl.va == NULL)
1613 goto mem_err;
1614
1615 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1616 &pa, GFP_KERNEL);
1617 if (dev->av_tbl.va == NULL)
1618 goto mem_err_ah;
1619 dev->av_tbl.pa = pa;
1620 dev->av_tbl.num_ah = max_ah;
1621 memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1622
1623 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1624 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301625 pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
1626 pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
Parav Panditfe2caef2012-03-21 04:09:06 +05301627 pa += PAGE_SIZE;
1628 }
1629 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1630 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1631 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1632 if (status)
1633 goto mbx_err;
1634 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1635 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1636 kfree(cmd);
1637 return 0;
1638
1639mbx_err:
1640 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1641 dev->av_tbl.pa);
1642 dev->av_tbl.va = NULL;
1643mem_err_ah:
1644 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1645 dev->av_tbl.pbl.pa);
1646 dev->av_tbl.pbl.va = NULL;
1647 dev->av_tbl.size = 0;
1648mem_err:
1649 kfree(cmd);
1650 return status;
1651}
1652
1653static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1654{
1655 struct ocrdma_delete_ah_tbl *cmd;
1656 struct pci_dev *pdev = dev->nic_info.pdev;
1657
1658 if (dev->av_tbl.va == NULL)
1659 return;
1660
1661 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1662 if (!cmd)
1663 return;
1664 cmd->ahid = dev->av_tbl.ahid;
1665
1666 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1667 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1668 dev->av_tbl.pa);
Devesh Sharmadaac9682014-06-10 19:32:18 +05301669 dev->av_tbl.va = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +05301670 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1671 dev->av_tbl.pbl.pa);
1672 kfree(cmd);
1673}
1674
1675/* Multiple CQs uses the EQ. This routine returns least used
1676 * EQ to associate with CQ. This will distributes the interrupt
1677 * processing and CPU load to associated EQ, vector and so to that CPU.
1678 */
1679static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1680{
1681 int i, selected_eq = 0, cq_cnt = 0;
1682 u16 eq_id;
1683
1684 mutex_lock(&dev->dev_lock);
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05301685 cq_cnt = dev->eq_tbl[0].cq_cnt;
1686 eq_id = dev->eq_tbl[0].q.id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301687 /* find the EQ which is has the least number of
1688 * CQs associated with it.
1689 */
1690 for (i = 0; i < dev->eq_cnt; i++) {
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05301691 if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
1692 cq_cnt = dev->eq_tbl[i].cq_cnt;
1693 eq_id = dev->eq_tbl[i].q.id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301694 selected_eq = i;
1695 }
1696 }
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05301697 dev->eq_tbl[selected_eq].cq_cnt += 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301698 mutex_unlock(&dev->dev_lock);
1699 return eq_id;
1700}
1701
1702static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1703{
1704 int i;
1705
1706 mutex_lock(&dev->dev_lock);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301707 i = ocrdma_get_eq_table_index(dev, eq_id);
1708 if (i == -EINVAL)
1709 BUG();
1710 dev->eq_tbl[i].cq_cnt -= 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301711 mutex_unlock(&dev->dev_lock);
1712}
1713
1714int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301715 int entries, int dpp_cq, u16 pd_id)
Parav Panditfe2caef2012-03-21 04:09:06 +05301716{
1717 int status = -ENOMEM; int max_hw_cqe;
1718 struct pci_dev *pdev = dev->nic_info.pdev;
1719 struct ocrdma_create_cq *cmd;
1720 struct ocrdma_create_cq_rsp *rsp;
1721 u32 hw_pages, cqe_size, page_size, cqe_count;
1722
Parav Panditfe2caef2012-03-21 04:09:06 +05301723 if (entries > dev->attr.max_cqe) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001724 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1725 __func__, dev->id, dev->attr.max_cqe, entries);
Parav Panditfe2caef2012-03-21 04:09:06 +05301726 return -EINVAL;
1727 }
Devesh Sharma21c33912014-02-04 11:56:56 +05301728 if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
Parav Panditfe2caef2012-03-21 04:09:06 +05301729 return -EINVAL;
1730
1731 if (dpp_cq) {
1732 cq->max_hw_cqe = 1;
1733 max_hw_cqe = 1;
1734 cqe_size = OCRDMA_DPP_CQE_SIZE;
1735 hw_pages = 1;
1736 } else {
1737 cq->max_hw_cqe = dev->attr.max_cqe;
1738 max_hw_cqe = dev->attr.max_cqe;
1739 cqe_size = sizeof(struct ocrdma_cqe);
1740 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1741 }
1742
1743 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1744
1745 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1746 if (!cmd)
1747 return -ENOMEM;
1748 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1749 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1750 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1751 if (!cq->va) {
1752 status = -ENOMEM;
1753 goto mem_err;
1754 }
1755 memset(cq->va, 0, cq->len);
1756 page_size = cq->len / hw_pages;
1757 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1758 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1759 cmd->cmd.pgsz_pgcnt |= hw_pages;
1760 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1761
Parav Panditfe2caef2012-03-21 04:09:06 +05301762 cq->eqn = ocrdma_bind_eq(dev);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301763 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
Parav Panditfe2caef2012-03-21 04:09:06 +05301764 cqe_count = cq->len / cqe_size;
Devesh Sharmaea6176262014-02-04 11:56:54 +05301765 cq->cqe_cnt = cqe_count;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301766 if (cqe_count > 1024) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301767 /* Set cnt to 3 to indicate more than 1024 cq entries */
1768 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301769 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301770 u8 count = 0;
1771 switch (cqe_count) {
1772 case 256:
1773 count = 0;
1774 break;
1775 case 512:
1776 count = 1;
1777 break;
1778 case 1024:
1779 count = 2;
1780 break;
1781 default:
1782 goto mbx_err;
1783 }
1784 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1785 }
1786 /* shared eq between all the consumer cqs. */
1787 cmd->cmd.eqn = cq->eqn;
Devesh Sharma21c33912014-02-04 11:56:56 +05301788 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301789 if (dpp_cq)
1790 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1791 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1792 cq->phase_change = false;
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301793 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
Parav Panditfe2caef2012-03-21 04:09:06 +05301794 } else {
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301795 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301796 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1797 cq->phase_change = true;
1798 }
1799
Devesh Sharma8ac0c7c2014-07-02 11:36:05 +05301800 /* pd_id valid only for v3 */
1801 cmd->cmd.pdid_cqecnt |= (pd_id <<
1802 OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05301803 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1804 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1805 if (status)
1806 goto mbx_err;
1807
1808 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1809 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1810 kfree(cmd);
1811 return 0;
1812mbx_err:
1813 ocrdma_unbind_eq(dev, cq->eqn);
Parav Panditfe2caef2012-03-21 04:09:06 +05301814 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1815mem_err:
1816 kfree(cmd);
1817 return status;
1818}
1819
1820int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1821{
1822 int status = -ENOMEM;
1823 struct ocrdma_destroy_cq *cmd;
1824
1825 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1826 if (!cmd)
1827 return status;
1828 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1829 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1830
1831 cmd->bypass_flush_qid |=
1832 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1833 OCRDMA_DESTROY_CQ_QID_MASK;
1834
Parav Panditfe2caef2012-03-21 04:09:06 +05301835 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301836 ocrdma_unbind_eq(dev, cq->eqn);
Parav Panditfe2caef2012-03-21 04:09:06 +05301837 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
Parav Panditfe2caef2012-03-21 04:09:06 +05301838 kfree(cmd);
1839 return status;
1840}
1841
1842int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1843 u32 pdid, int addr_check)
1844{
1845 int status = -ENOMEM;
1846 struct ocrdma_alloc_lkey *cmd;
1847 struct ocrdma_alloc_lkey_rsp *rsp;
1848
1849 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1850 if (!cmd)
1851 return status;
1852 cmd->pdid = pdid;
1853 cmd->pbl_sz_flags |= addr_check;
1854 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1855 cmd->pbl_sz_flags |=
1856 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1857 cmd->pbl_sz_flags |=
1858 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1859 cmd->pbl_sz_flags |=
1860 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1861 cmd->pbl_sz_flags |=
1862 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1863 cmd->pbl_sz_flags |=
1864 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1865
1866 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1867 if (status)
1868 goto mbx_err;
1869 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1870 hwmr->lkey = rsp->lrkey;
1871mbx_err:
1872 kfree(cmd);
1873 return status;
1874}
1875
1876int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1877{
1878 int status = -ENOMEM;
1879 struct ocrdma_dealloc_lkey *cmd;
1880
1881 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1882 if (!cmd)
1883 return -ENOMEM;
1884 cmd->lkey = lkey;
1885 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1886 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1887 if (status)
1888 goto mbx_err;
1889mbx_err:
1890 kfree(cmd);
1891 return status;
1892}
1893
1894static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1895 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1896{
1897 int status = -ENOMEM;
1898 int i;
1899 struct ocrdma_reg_nsmr *cmd;
1900 struct ocrdma_reg_nsmr_rsp *rsp;
1901
1902 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1903 if (!cmd)
1904 return -ENOMEM;
1905 cmd->num_pbl_pdid =
1906 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301907 cmd->fr_mr = hwmr->fr_mr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301908
1909 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1910 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1911 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1912 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1913 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1914 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1915 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1916 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1917 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1918 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1919 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1920
1921 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1922 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1923 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1924 cmd->totlen_low = hwmr->len;
1925 cmd->totlen_high = upper_32_bits(hwmr->len);
1926 cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1927 cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1928 cmd->va_loaddr = (u32) hwmr->va;
1929 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1930
1931 for (i = 0; i < pbl_cnt; i++) {
1932 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1933 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1934 }
1935 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1936 if (status)
1937 goto mbx_err;
1938 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1939 hwmr->lkey = rsp->lrkey;
1940mbx_err:
1941 kfree(cmd);
1942 return status;
1943}
1944
1945static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1946 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1947 u32 pbl_offset, u32 last)
1948{
1949 int status = -ENOMEM;
1950 int i;
1951 struct ocrdma_reg_nsmr_cont *cmd;
1952
1953 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1954 if (!cmd)
1955 return -ENOMEM;
1956 cmd->lrkey = hwmr->lkey;
1957 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
1958 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1959 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1960
1961 for (i = 0; i < pbl_cnt; i++) {
1962 cmd->pbl[i].lo =
1963 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
1964 cmd->pbl[i].hi =
1965 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
1966 }
1967 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1968 if (status)
1969 goto mbx_err;
1970mbx_err:
1971 kfree(cmd);
1972 return status;
1973}
1974
1975int ocrdma_reg_mr(struct ocrdma_dev *dev,
1976 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
1977{
1978 int status;
1979 u32 last = 0;
1980 u32 cur_pbl_cnt, pbl_offset;
1981 u32 pending_pbl_cnt = hwmr->num_pbls;
1982
1983 pbl_offset = 0;
1984 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1985 if (cur_pbl_cnt == pending_pbl_cnt)
1986 last = 1;
1987
1988 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1989 cur_pbl_cnt, hwmr->pbe_size, last);
1990 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001991 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301992 return status;
1993 }
1994 /* if there is no more pbls to register then exit. */
1995 if (last)
1996 return 0;
1997
1998 while (!last) {
1999 pbl_offset += cur_pbl_cnt;
2000 pending_pbl_cnt -= cur_pbl_cnt;
2001 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
2002 /* if we reach the end of the pbls, then need to set the last
2003 * bit, indicating no more pbls to register for this memory key.
2004 */
2005 if (cur_pbl_cnt == pending_pbl_cnt)
2006 last = 1;
2007
2008 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
2009 pbl_offset, last);
2010 if (status)
2011 break;
2012 }
2013 if (status)
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002014 pr_err("%s() err. status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05302015
2016 return status;
2017}
2018
2019bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2020{
2021 struct ocrdma_qp *tmp;
2022 bool found = false;
2023 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
2024 if (qp == tmp) {
2025 found = true;
2026 break;
2027 }
2028 }
2029 return found;
2030}
2031
2032bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
2033{
2034 struct ocrdma_qp *tmp;
2035 bool found = false;
2036 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
2037 if (qp == tmp) {
2038 found = true;
2039 break;
2040 }
2041 }
2042 return found;
2043}
2044
2045void ocrdma_flush_qp(struct ocrdma_qp *qp)
2046{
2047 bool found;
2048 unsigned long flags;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302049 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302050
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302051 spin_lock_irqsave(&dev->flush_q_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05302052 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
2053 if (!found)
2054 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
2055 if (!qp->srq) {
2056 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
2057 if (!found)
2058 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
2059 }
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302060 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05302061}
2062
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302063static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
2064{
2065 qp->sq.head = 0;
2066 qp->sq.tail = 0;
2067 qp->rq.head = 0;
2068 qp->rq.tail = 0;
2069}
2070
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302071int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
2072 enum ib_qp_state *old_ib_state)
Parav Panditfe2caef2012-03-21 04:09:06 +05302073{
2074 unsigned long flags;
2075 int status = 0;
2076 enum ocrdma_qp_state new_state;
2077 new_state = get_ocrdma_qp_state(new_ib_state);
2078
2079 /* sync with wqe and rqe posting */
2080 spin_lock_irqsave(&qp->q_lock, flags);
2081
2082 if (old_ib_state)
2083 *old_ib_state = get_ibqp_state(qp->state);
2084 if (new_state == qp->state) {
2085 spin_unlock_irqrestore(&qp->q_lock, flags);
2086 return 1;
2087 }
2088
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302089
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302090 if (new_state == OCRDMA_QPS_INIT) {
2091 ocrdma_init_hwq_ptr(qp);
2092 ocrdma_del_flush_qp(qp);
2093 } else if (new_state == OCRDMA_QPS_ERR) {
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302094 ocrdma_flush_qp(qp);
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302095 }
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302096
2097 qp->state = new_state;
Parav Panditfe2caef2012-03-21 04:09:06 +05302098
2099 spin_unlock_irqrestore(&qp->q_lock, flags);
2100 return status;
2101}
2102
2103static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
2104{
2105 u32 flags = 0;
2106 if (qp->cap_flags & OCRDMA_QP_INB_RD)
2107 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
2108 if (qp->cap_flags & OCRDMA_QP_INB_WR)
2109 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
2110 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
2111 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
2112 if (qp->cap_flags & OCRDMA_QP_LKEY0)
2113 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
2114 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
2115 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
2116 return flags;
2117}
2118
2119static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2120 struct ib_qp_init_attr *attrs,
2121 struct ocrdma_qp *qp)
2122{
2123 int status;
2124 u32 len, hw_pages, hw_page_size;
2125 dma_addr_t pa;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302126 struct ocrdma_pd *pd = qp->pd;
2127 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302128 struct pci_dev *pdev = dev->nic_info.pdev;
2129 u32 max_wqe_allocated;
2130 u32 max_sges = attrs->cap.max_send_sge;
2131
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302132 /* QP1 may exceed 127 */
Dan Carpenter6ebacdf2013-09-06 11:50:46 +03002133 max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302134 dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05302135
2136 status = ocrdma_build_q_conf(&max_wqe_allocated,
2137 dev->attr.wqe_size, &hw_pages, &hw_page_size);
2138 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002139 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
2140 max_wqe_allocated);
Parav Panditfe2caef2012-03-21 04:09:06 +05302141 return -EINVAL;
2142 }
2143 qp->sq.max_cnt = max_wqe_allocated;
2144 len = (hw_pages * hw_page_size);
2145
2146 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2147 if (!qp->sq.va)
2148 return -EINVAL;
2149 memset(qp->sq.va, 0, len);
2150 qp->sq.len = len;
2151 qp->sq.pa = pa;
2152 qp->sq.entry_size = dev->attr.wqe_size;
2153 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
2154
2155 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2156 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
2157 cmd->num_wq_rq_pages |= (hw_pages <<
2158 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
2159 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
2160 cmd->max_sge_send_write |= (max_sges <<
2161 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
2162 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
2163 cmd->max_sge_send_write |= (max_sges <<
2164 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
2165 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
2166 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
2167 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
2168 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
2169 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
2170 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
2171 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
2172 return 0;
2173}
2174
2175static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2176 struct ib_qp_init_attr *attrs,
2177 struct ocrdma_qp *qp)
2178{
2179 int status;
2180 u32 len, hw_pages, hw_page_size;
2181 dma_addr_t pa = 0;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302182 struct ocrdma_pd *pd = qp->pd;
2183 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302184 struct pci_dev *pdev = dev->nic_info.pdev;
2185 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2186
2187 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
2188 &hw_pages, &hw_page_size);
2189 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002190 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
2191 attrs->cap.max_recv_wr + 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05302192 return status;
2193 }
2194 qp->rq.max_cnt = max_rqe_allocated;
2195 len = (hw_pages * hw_page_size);
2196
2197 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2198 if (!qp->rq.va)
Wei Yongjunc94e15c2013-06-23 09:07:19 +08002199 return -ENOMEM;
Parav Panditfe2caef2012-03-21 04:09:06 +05302200 memset(qp->rq.va, 0, len);
2201 qp->rq.pa = pa;
2202 qp->rq.len = len;
2203 qp->rq.entry_size = dev->attr.rqe_size;
2204
2205 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2206 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
2207 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
2208 cmd->num_wq_rq_pages |=
2209 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
2210 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
2211 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
2212 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
2213 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
2214 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
2215 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
2216 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
2217 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
2218 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
2219 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
2220 return 0;
2221}
2222
2223static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2224 struct ocrdma_pd *pd,
2225 struct ocrdma_qp *qp,
2226 u8 enable_dpp_cq, u16 dpp_cq_id)
2227{
2228 pd->num_dpp_qp--;
2229 qp->dpp_enabled = true;
2230 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2231 if (!enable_dpp_cq)
2232 return;
2233 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2234 cmd->dpp_credits_cqid = dpp_cq_id;
2235 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
2236 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
2237}
2238
2239static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2240 struct ocrdma_qp *qp)
2241{
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302242 struct ocrdma_pd *pd = qp->pd;
2243 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302244 struct pci_dev *pdev = dev->nic_info.pdev;
2245 dma_addr_t pa = 0;
2246 int ird_page_size = dev->attr.ird_page_size;
2247 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302248 struct ocrdma_hdr_wqe *rqe;
2249 int i = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302250
2251 if (dev->attr.ird == 0)
2252 return 0;
2253
2254 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
2255 &pa, GFP_KERNEL);
2256 if (!qp->ird_q_va)
2257 return -ENOMEM;
2258 memset(qp->ird_q_va, 0, ird_q_len);
2259 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
2260 pa, ird_page_size);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302261 for (; i < ird_q_len / dev->attr.rqe_size; i++) {
2262 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
2263 (i * dev->attr.rqe_size));
2264 rqe->cw = 0;
2265 rqe->cw |= 2;
2266 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2267 rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
2268 rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
2269 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302270 return 0;
2271}
2272
2273static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
2274 struct ocrdma_qp *qp,
2275 struct ib_qp_init_attr *attrs,
2276 u16 *dpp_offset, u16 *dpp_credit_lmt)
2277{
2278 u32 max_wqe_allocated, max_rqe_allocated;
2279 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
2280 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
2281 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
2282 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
2283 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
2284 qp->dpp_enabled = false;
2285 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
2286 qp->dpp_enabled = true;
2287 *dpp_credit_lmt = (rsp->dpp_response &
2288 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
2289 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
2290 *dpp_offset = (rsp->dpp_response &
2291 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
2292 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
2293 }
2294 max_wqe_allocated =
2295 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
2296 max_wqe_allocated = 1 << max_wqe_allocated;
2297 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
2298
Parav Panditfe2caef2012-03-21 04:09:06 +05302299 qp->sq.max_cnt = max_wqe_allocated;
2300 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2301
2302 if (!attrs->srq) {
2303 qp->rq.max_cnt = max_rqe_allocated;
2304 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05302305 }
2306}
2307
2308int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2309 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2310 u16 *dpp_credit_lmt)
2311{
2312 int status = -ENOMEM;
2313 u32 flags = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302314 struct ocrdma_pd *pd = qp->pd;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302315 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302316 struct pci_dev *pdev = dev->nic_info.pdev;
2317 struct ocrdma_cq *cq;
2318 struct ocrdma_create_qp_req *cmd;
2319 struct ocrdma_create_qp_rsp *rsp;
2320 int qptype;
2321
2322 switch (attrs->qp_type) {
2323 case IB_QPT_GSI:
2324 qptype = OCRDMA_QPT_GSI;
2325 break;
2326 case IB_QPT_RC:
2327 qptype = OCRDMA_QPT_RC;
2328 break;
2329 case IB_QPT_UD:
2330 qptype = OCRDMA_QPT_UD;
2331 break;
2332 default:
2333 return -EINVAL;
Joe Perches2b50176d2013-10-08 16:07:22 -07002334 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302335
2336 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2337 if (!cmd)
2338 return status;
2339 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2340 OCRDMA_CREATE_QP_REQ_QPT_MASK;
2341 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2342 if (status)
2343 goto sq_err;
2344
2345 if (attrs->srq) {
2346 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2347 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2348 cmd->rq_addr[0].lo = srq->id;
2349 qp->srq = srq;
2350 } else {
2351 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2352 if (status)
2353 goto rq_err;
2354 }
2355
2356 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2357 if (status)
2358 goto mbx_err;
2359
2360 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2361 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2362
2363 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2364
2365 cmd->max_sge_recv_flags |= flags;
2366 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2367 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2368 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2369 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2370 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2371 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2372 cq = get_ocrdma_cq(attrs->send_cq);
2373 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2374 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2375 qp->sq_cq = cq;
2376 cq = get_ocrdma_cq(attrs->recv_cq);
2377 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2378 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2379 qp->rq_cq = cq;
2380
Devesh Sharmaf50f31e2014-06-10 19:32:12 +05302381 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2382 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302383 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2384 dpp_cq_id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302385 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302386
2387 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2388 if (status)
2389 goto mbx_err;
2390 rsp = (struct ocrdma_create_qp_rsp *)cmd;
2391 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2392 qp->state = OCRDMA_QPS_RST;
2393 kfree(cmd);
2394 return 0;
2395mbx_err:
2396 if (qp->rq.va)
2397 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2398rq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002399 pr_err("%s(%d) rq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302400 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2401sq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002402 pr_err("%s(%d) sq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302403 kfree(cmd);
2404 return status;
2405}
2406
2407int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2408 struct ocrdma_qp_params *param)
2409{
2410 int status = -ENOMEM;
2411 struct ocrdma_query_qp *cmd;
2412 struct ocrdma_query_qp_rsp *rsp;
2413
Mitesh Ahuja038ab8b2015-05-19 11:32:36 +05302414 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +05302415 if (!cmd)
2416 return status;
2417 cmd->qp_id = qp->id;
2418 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2419 if (status)
2420 goto mbx_err;
2421 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2422 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2423mbx_err:
2424 kfree(cmd);
2425 return status;
2426}
2427
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302428static int ocrdma_set_av_params(struct ocrdma_qp *qp,
Parav Panditfe2caef2012-03-21 04:09:06 +05302429 struct ocrdma_modify_qp *cmd,
Selvin Xavierbf674722014-08-22 16:57:20 +05302430 struct ib_qp_attr *attrs,
2431 int attr_mask)
Parav Panditfe2caef2012-03-21 04:09:06 +05302432{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302433 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302434 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302435 union ib_gid sgid, zgid;
Devesh Sharma6f5deab2015-05-19 11:32:35 +05302436 u32 vlan_id = 0xFFFF;
Parav Panditfe2caef2012-03-21 04:09:06 +05302437 u8 mac_addr[6];
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302438 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302439
Parav Panditfe2caef2012-03-21 04:09:06 +05302440 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302441 return -EINVAL;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302442 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
2443 ocrdma_init_service_level(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05302444 cmd->params.tclass_sq_psn |=
2445 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2446 cmd->params.rnt_rc_sl_fl |=
2447 (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302448 cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302449 cmd->params.hop_lmt_rq_psn |=
2450 (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2451 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2452 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2453 sizeof(cmd->params.dgid));
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302454 status = ocrdma_query_gid(&dev->ibdev, 1,
Devesh Sharmafad51b72014-02-04 11:57:10 +05302455 ah_attr->grh.sgid_index, &sgid);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302456 if (status)
2457 return status;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302458
2459 memset(&zgid, 0, sizeof(zgid));
2460 if (!memcmp(&sgid, &zgid, sizeof(zgid)))
2461 return -EINVAL;
2462
Parav Panditfe2caef2012-03-21 04:09:06 +05302463 qp->sgid_idx = ah_attr->grh.sgid_index;
2464 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302465 status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
Devesh Sharmaa601dc72014-12-18 14:13:04 +05302466 if (status)
2467 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302468 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2469 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2470 /* convert them to LE format. */
2471 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2472 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2473 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
Selvin Xavierbf674722014-08-22 16:57:20 +05302474 if (attr_mask & IB_QP_VID) {
2475 vlan_id = attrs->vlan_id;
Devesh Sharma6f5deab2015-05-19 11:32:35 +05302476 } else if (dev->pfc_state) {
2477 vlan_id = 0;
2478 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2479 dev->id);
2480 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2481 dev->id);
2482 }
2483
2484 if (vlan_id < 0x1000) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302485 cmd->params.vlan_dmac_b4_to_b5 |=
2486 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2487 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
Selvin Xavier31dbdd92014-06-10 19:32:13 +05302488 cmd->params.rnt_rc_sl_fl |=
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302489 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05302490 }
Devesh Sharma6f5deab2015-05-19 11:32:35 +05302491
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302492 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302493}
2494
2495static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2496 struct ocrdma_modify_qp *cmd,
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05002497 struct ib_qp_attr *attrs, int attr_mask)
Parav Panditfe2caef2012-03-21 04:09:06 +05302498{
2499 int status = 0;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302500 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302501
2502 if (attr_mask & IB_QP_PKEY_INDEX) {
2503 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2504 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2505 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2506 }
2507 if (attr_mask & IB_QP_QKEY) {
2508 qp->qkey = attrs->qkey;
2509 cmd->params.qkey = attrs->qkey;
2510 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2511 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302512 if (attr_mask & IB_QP_AV) {
Selvin Xavierbf674722014-08-22 16:57:20 +05302513 status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302514 if (status)
2515 return status;
2516 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302517 /* set the default mac address for UD, GSI QPs */
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302518 cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
2519 (dev->nic_info.mac_addr[1] << 8) |
2520 (dev->nic_info.mac_addr[2] << 16) |
2521 (dev->nic_info.mac_addr[3] << 24);
2522 cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
2523 (dev->nic_info.mac_addr[5] << 8);
Parav Panditfe2caef2012-03-21 04:09:06 +05302524 }
2525 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2526 attrs->en_sqd_async_notify) {
2527 cmd->params.max_sge_recv_flags |=
2528 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2529 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2530 }
2531 if (attr_mask & IB_QP_DEST_QPN) {
2532 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2533 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2534 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2535 }
2536 if (attr_mask & IB_QP_PATH_MTU) {
Naga Irrinki72d8a012015-05-19 11:32:39 +05302537 if (attrs->path_mtu < IB_MTU_512 ||
Naresh Gottumukkalad3cb6c02013-08-26 15:27:40 +05302538 attrs->path_mtu > IB_MTU_4096) {
Naga Irrinki72d8a012015-05-19 11:32:39 +05302539 pr_err("ocrdma%d: IB MTU %d is not supported\n",
2540 dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
Parav Panditfe2caef2012-03-21 04:09:06 +05302541 status = -EINVAL;
2542 goto pmtu_err;
2543 }
2544 cmd->params.path_mtu_pkey_indx |=
2545 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2546 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2547 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2548 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2549 }
2550 if (attr_mask & IB_QP_TIMEOUT) {
2551 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2552 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2553 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2554 }
2555 if (attr_mask & IB_QP_RETRY_CNT) {
2556 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2557 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2558 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2559 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2560 }
2561 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2562 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2563 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2564 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2565 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2566 }
2567 if (attr_mask & IB_QP_RNR_RETRY) {
2568 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2569 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2570 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2571 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2572 }
2573 if (attr_mask & IB_QP_SQ_PSN) {
2574 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2575 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2576 }
2577 if (attr_mask & IB_QP_RQ_PSN) {
2578 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2579 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2580 }
2581 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302582 if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302583 status = -EINVAL;
2584 goto pmtu_err;
2585 }
2586 qp->max_ord = attrs->max_rd_atomic;
2587 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2588 }
2589 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05302590 if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302591 status = -EINVAL;
2592 goto pmtu_err;
2593 }
2594 qp->max_ird = attrs->max_dest_rd_atomic;
2595 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2596 }
2597 cmd->params.max_ord_ird = (qp->max_ord <<
2598 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2599 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2600pmtu_err:
2601 return status;
2602}
2603
2604int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05002605 struct ib_qp_attr *attrs, int attr_mask)
Parav Panditfe2caef2012-03-21 04:09:06 +05302606{
2607 int status = -ENOMEM;
2608 struct ocrdma_modify_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302609
2610 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2611 if (!cmd)
2612 return status;
2613
2614 cmd->params.id = qp->id;
2615 cmd->flags = 0;
2616 if (attr_mask & IB_QP_STATE) {
2617 cmd->params.max_sge_recv_flags |=
2618 (get_ocrdma_qp_state(attrs->qp_state) <<
2619 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2620 OCRDMA_QP_PARAMS_STATE_MASK;
2621 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302622 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302623 cmd->params.max_sge_recv_flags |=
2624 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2625 OCRDMA_QP_PARAMS_STATE_MASK;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302626 }
2627
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05002628 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
Parav Panditfe2caef2012-03-21 04:09:06 +05302629 if (status)
2630 goto mbx_err;
2631 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2632 if (status)
2633 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002634
Parav Panditfe2caef2012-03-21 04:09:06 +05302635mbx_err:
2636 kfree(cmd);
2637 return status;
2638}
2639
2640int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2641{
2642 int status = -ENOMEM;
2643 struct ocrdma_destroy_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302644 struct pci_dev *pdev = dev->nic_info.pdev;
2645
2646 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2647 if (!cmd)
2648 return status;
2649 cmd->qp_id = qp->id;
2650 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2651 if (status)
2652 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002653
Parav Panditfe2caef2012-03-21 04:09:06 +05302654mbx_err:
2655 kfree(cmd);
2656 if (qp->sq.va)
2657 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2658 if (!qp->srq && qp->rq.va)
2659 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2660 if (qp->dpp_enabled)
2661 qp->pd->num_dpp_qp++;
2662 return status;
2663}
2664
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302665int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
Parav Panditfe2caef2012-03-21 04:09:06 +05302666 struct ib_srq_init_attr *srq_attr,
2667 struct ocrdma_pd *pd)
2668{
2669 int status = -ENOMEM;
2670 int hw_pages, hw_page_size;
2671 int len;
2672 struct ocrdma_create_srq_rsp *rsp;
2673 struct ocrdma_create_srq *cmd;
2674 dma_addr_t pa;
Parav Panditfe2caef2012-03-21 04:09:06 +05302675 struct pci_dev *pdev = dev->nic_info.pdev;
2676 u32 max_rqe_allocated;
2677
2678 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2679 if (!cmd)
2680 return status;
2681
2682 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2683 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2684 status = ocrdma_build_q_conf(&max_rqe_allocated,
2685 dev->attr.rqe_size,
2686 &hw_pages, &hw_page_size);
2687 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002688 pr_err("%s() req. max_wr=0x%x\n", __func__,
2689 srq_attr->attr.max_wr);
Parav Panditfe2caef2012-03-21 04:09:06 +05302690 status = -EINVAL;
2691 goto ret;
2692 }
2693 len = hw_pages * hw_page_size;
2694 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2695 if (!srq->rq.va) {
2696 status = -ENOMEM;
2697 goto ret;
2698 }
2699 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2700
2701 srq->rq.entry_size = dev->attr.rqe_size;
2702 srq->rq.pa = pa;
2703 srq->rq.len = len;
2704 srq->rq.max_cnt = max_rqe_allocated;
2705
2706 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2707 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2708 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2709
2710 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2711 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2712 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2713 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2714 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2715 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2716
2717 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2718 if (status)
2719 goto mbx_err;
2720 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2721 srq->id = rsp->id;
2722 srq->rq.dbid = rsp->id;
2723 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2724 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2725 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2726 max_rqe_allocated = (1 << max_rqe_allocated);
2727 srq->rq.max_cnt = max_rqe_allocated;
2728 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2729 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2730 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2731 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2732 goto ret;
2733mbx_err:
2734 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2735ret:
2736 kfree(cmd);
2737 return status;
2738}
2739
2740int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2741{
2742 int status = -ENOMEM;
2743 struct ocrdma_modify_srq *cmd;
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05302744 struct ocrdma_pd *pd = srq->pd;
2745 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302746
Naresh Gottumukkalad7e19c02013-08-26 15:27:51 +05302747 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
Parav Panditfe2caef2012-03-21 04:09:06 +05302748 if (!cmd)
2749 return status;
2750 cmd->id = srq->id;
2751 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2752 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302753 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302754 kfree(cmd);
2755 return status;
2756}
2757
2758int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2759{
2760 int status = -ENOMEM;
2761 struct ocrdma_query_srq *cmd;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302762 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2763
Naresh Gottumukkalad7e19c02013-08-26 15:27:51 +05302764 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
Parav Panditfe2caef2012-03-21 04:09:06 +05302765 if (!cmd)
2766 return status;
2767 cmd->id = srq->rq.dbid;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302768 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302769 if (status == 0) {
2770 struct ocrdma_query_srq_rsp *rsp =
2771 (struct ocrdma_query_srq_rsp *)cmd;
2772 srq_attr->max_sge =
2773 rsp->srq_lmt_max_sge &
2774 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2775 srq_attr->max_wr =
2776 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2777 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2778 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2779 }
2780 kfree(cmd);
2781 return status;
2782}
2783
2784int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2785{
2786 int status = -ENOMEM;
2787 struct ocrdma_destroy_srq *cmd;
2788 struct pci_dev *pdev = dev->nic_info.pdev;
2789 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2790 if (!cmd)
2791 return status;
2792 cmd->id = srq->id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302793 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302794 if (srq->rq.va)
2795 dma_free_coherent(&pdev->dev, srq->rq.len,
2796 srq->rq.va, srq->rq.pa);
2797 kfree(cmd);
2798 return status;
2799}
2800
Selvin Xavier31dbdd92014-06-10 19:32:13 +05302801static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
2802 struct ocrdma_dcbx_cfg *dcbxcfg)
2803{
2804 int status = 0;
2805 dma_addr_t pa;
2806 struct ocrdma_mqe cmd;
2807
2808 struct ocrdma_get_dcbx_cfg_req *req = NULL;
2809 struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
2810 struct pci_dev *pdev = dev->nic_info.pdev;
2811 struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
2812
2813 memset(&cmd, 0, sizeof(struct ocrdma_mqe));
2814 cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
2815 sizeof(struct ocrdma_get_dcbx_cfg_req));
2816 req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
2817 if (!req) {
2818 status = -ENOMEM;
2819 goto mem_err;
2820 }
2821
2822 cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
2823 OCRDMA_MQE_HDR_SGE_CNT_MASK;
2824 mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
2825 mqe_sge->pa_hi = (u32) upper_32_bits(pa);
2826 mqe_sge->len = cmd.hdr.pyld_len;
2827
2828 memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
2829 ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
2830 OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
2831 req->param_type = ptype;
2832
2833 status = ocrdma_mbx_cmd(dev, &cmd);
2834 if (status)
2835 goto mbx_err;
2836
2837 rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
2838 ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
2839 memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
2840
2841mbx_err:
2842 dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
2843mem_err:
2844 return status;
2845}
2846
2847#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
2848#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
2849
2850static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
2851 struct ocrdma_dcbx_cfg *dcbxcfg,
2852 u8 *srvc_lvl)
2853{
2854 int status = -EINVAL, indx, slindx;
2855 int ventry_cnt;
2856 struct ocrdma_app_parameter *app_param;
2857 u8 valid, proto_sel;
2858 u8 app_prio, pfc_prio;
2859 u16 proto;
2860
2861 if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
2862 pr_info("%s ocrdma%d DCBX is disabled\n",
2863 dev_name(&dev->nic_info.pdev->dev), dev->id);
2864 goto out;
2865 }
2866
2867 if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
2868 pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2869 dev_name(&dev->nic_info.pdev->dev), dev->id,
2870 (ptype > 0 ? "operational" : "admin"),
2871 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
2872 "enabled" : "disabled",
2873 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
2874 "" : ", not sync'ed");
2875 goto out;
2876 } else {
2877 pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2878 dev_name(&dev->nic_info.pdev->dev), dev->id);
2879 }
2880
2881 ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
2882 OCRDMA_DCBX_APP_ENTRY_SHIFT)
2883 & OCRDMA_DCBX_STATE_MASK;
2884
2885 for (indx = 0; indx < ventry_cnt; indx++) {
2886 app_param = &dcbxcfg->app_param[indx];
2887 valid = (app_param->valid_proto_app >>
2888 OCRDMA_APP_PARAM_VALID_SHIFT)
2889 & OCRDMA_APP_PARAM_VALID_MASK;
2890 proto_sel = (app_param->valid_proto_app
2891 >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
2892 & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
2893 proto = app_param->valid_proto_app &
2894 OCRDMA_APP_PARAM_APP_PROTO_MASK;
2895
2896 if (
2897 valid && proto == OCRDMA_APP_PROTO_ROCE &&
2898 proto_sel == OCRDMA_PROTO_SELECT_L2) {
2899 for (slindx = 0; slindx <
2900 OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
2901 app_prio = ocrdma_get_app_prio(
2902 (u8 *)app_param->app_prio,
2903 slindx);
2904 pfc_prio = ocrdma_get_pfc_prio(
2905 (u8 *)dcbxcfg->pfc_prio,
2906 slindx);
2907
2908 if (app_prio && pfc_prio) {
2909 *srvc_lvl = slindx;
2910 status = 0;
2911 goto out;
2912 }
2913 }
2914 if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
2915 pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
2916 dev_name(&dev->nic_info.pdev->dev),
2917 dev->id, proto);
2918 }
2919 }
2920 }
2921
2922out:
2923 return status;
2924}
2925
2926void ocrdma_init_service_level(struct ocrdma_dev *dev)
2927{
2928 int status = 0, indx;
2929 struct ocrdma_dcbx_cfg dcbxcfg;
2930 u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
2931 int ptype = OCRDMA_PARAMETER_TYPE_OPER;
2932
2933 for (indx = 0; indx < 2; indx++) {
2934 status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
2935 if (status) {
2936 pr_err("%s(): status=%d\n", __func__, status);
2937 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
2938 continue;
2939 }
2940
2941 status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
2942 &dcbxcfg, &srvc_lvl);
2943 if (status) {
2944 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
2945 continue;
2946 }
2947
2948 break;
2949 }
2950
2951 if (status)
2952 pr_info("%s ocrdma%d service level default\n",
2953 dev_name(&dev->nic_info.pdev->dev), dev->id);
2954 else
2955 pr_info("%s ocrdma%d service level %d\n",
2956 dev_name(&dev->nic_info.pdev->dev), dev->id,
2957 srvc_lvl);
2958
2959 dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
2960 dev->sl = srvc_lvl;
2961}
2962
Parav Panditfe2caef2012-03-21 04:09:06 +05302963int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2964{
2965 int i;
2966 int status = -EINVAL;
2967 struct ocrdma_av *av;
2968 unsigned long flags;
2969
2970 av = dev->av_tbl.va;
2971 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2972 for (i = 0; i < dev->av_tbl.num_ah; i++) {
2973 if (av->valid == 0) {
2974 av->valid = OCRDMA_AV_VALID;
2975 ah->av = av;
2976 ah->id = i;
2977 status = 0;
2978 break;
2979 }
2980 av++;
2981 }
2982 if (i == dev->av_tbl.num_ah)
2983 status = -EAGAIN;
2984 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2985 return status;
2986}
2987
2988int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2989{
2990 unsigned long flags;
2991 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2992 ah->av->valid = 0;
2993 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2994 return 0;
2995}
2996
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05302997static int ocrdma_create_eqs(struct ocrdma_dev *dev)
Parav Panditfe2caef2012-03-21 04:09:06 +05302998{
Roland Dreierda496432012-04-16 11:32:17 -07002999 int num_eq, i, status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05303000 int irq;
3001 unsigned long flags = 0;
3002
3003 num_eq = dev->nic_info.msix.num_vectors -
3004 dev->nic_info.msix.start_vector;
3005 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
3006 num_eq = 1;
3007 flags = IRQF_SHARED;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05303008 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05303009 num_eq = min_t(u32, num_eq, num_online_cpus());
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05303010 }
3011
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303012 if (!num_eq)
3013 return -EINVAL;
3014
3015 dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
3016 if (!dev->eq_tbl)
Parav Panditfe2caef2012-03-21 04:09:06 +05303017 return -ENOMEM;
3018
3019 for (i = 0; i < num_eq; i++) {
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303020 status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
Devesh Sharmafad51b72014-02-04 11:57:10 +05303021 OCRDMA_EQ_LEN);
Parav Panditfe2caef2012-03-21 04:09:06 +05303022 if (status) {
3023 status = -EINVAL;
3024 break;
3025 }
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303026 sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
Parav Panditfe2caef2012-03-21 04:09:06 +05303027 dev->id, i);
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303028 irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
Parav Panditfe2caef2012-03-21 04:09:06 +05303029 status = request_irq(irq, ocrdma_irq_handler, flags,
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303030 dev->eq_tbl[i].irq_name,
3031 &dev->eq_tbl[i]);
3032 if (status)
3033 goto done;
Parav Panditfe2caef2012-03-21 04:09:06 +05303034 dev->eq_cnt += 1;
3035 }
3036 /* one eq is sufficient for data path to work */
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303037 return 0;
3038done:
3039 ocrdma_destroy_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303040 return status;
3041}
3042
Mitesh Ahujab4dbe8d2014-12-18 14:13:05 +05303043static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3044 int num)
3045{
3046 int i, status = -ENOMEM;
3047 struct ocrdma_modify_eqd_req *cmd;
3048
3049 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
3050 if (!cmd)
3051 return status;
3052
3053 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
3054 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
3055
3056 cmd->cmd.num_eq = num;
3057 for (i = 0; i < num; i++) {
3058 cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
3059 cmd->cmd.set_eqd[i].phase = 0;
3060 cmd->cmd.set_eqd[i].delay_multiplier =
3061 (eq[i].aic_obj.prev_eqd * 65)/100;
3062 }
3063 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
3064 if (status)
3065 goto mbx_err;
3066mbx_err:
3067 kfree(cmd);
3068 return status;
3069}
3070
3071static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
3072 int num)
3073{
3074 int num_eqs, i = 0;
3075 if (num > 8) {
3076 while (num) {
3077 num_eqs = min(num, 8);
3078 ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
3079 i += num_eqs;
3080 num -= num_eqs;
3081 }
3082 } else {
3083 ocrdma_mbx_modify_eqd(dev, eq, num);
3084 }
3085 return 0;
3086}
3087
3088void ocrdma_eqd_set_task(struct work_struct *work)
3089{
3090 struct ocrdma_dev *dev =
3091 container_of(work, struct ocrdma_dev, eqd_work.work);
3092 struct ocrdma_eq *eq = 0;
3093 int i, num = 0, status = -EINVAL;
3094 u64 eq_intr;
3095
3096 for (i = 0; i < dev->eq_cnt; i++) {
3097 eq = &dev->eq_tbl[i];
3098 if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
3099 eq_intr = eq->aic_obj.eq_intr_cnt -
3100 eq->aic_obj.prev_eq_intr_cnt;
3101 if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
3102 (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
3103 eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
3104 num++;
3105 } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
3106 (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
3107 eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
3108 num++;
3109 }
3110 }
3111 eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
3112 }
3113
3114 if (num)
3115 status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
3116 schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
3117}
3118
Parav Panditfe2caef2012-03-21 04:09:06 +05303119int ocrdma_init_hw(struct ocrdma_dev *dev)
3120{
3121 int status;
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303122
3123 /* create the eqs */
3124 status = ocrdma_create_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303125 if (status)
3126 goto qpeq_err;
3127 status = ocrdma_create_mq(dev);
3128 if (status)
3129 goto mq_err;
3130 status = ocrdma_mbx_query_fw_config(dev);
3131 if (status)
3132 goto conf_err;
3133 status = ocrdma_mbx_query_dev(dev);
3134 if (status)
3135 goto conf_err;
3136 status = ocrdma_mbx_query_fw_ver(dev);
3137 if (status)
3138 goto conf_err;
3139 status = ocrdma_mbx_create_ah_tbl(dev);
3140 if (status)
3141 goto conf_err;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05303142 status = ocrdma_mbx_get_phy_info(dev);
3143 if (status)
Devesh Sharmadaac9682014-06-10 19:32:18 +05303144 goto info_attrb_err;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05303145 status = ocrdma_mbx_get_ctrl_attribs(dev);
3146 if (status)
Devesh Sharmadaac9682014-06-10 19:32:18 +05303147 goto info_attrb_err;
Selvin Xaviera51f06e2014-02-04 11:57:07 +05303148
Parav Panditfe2caef2012-03-21 04:09:06 +05303149 return 0;
3150
Devesh Sharmadaac9682014-06-10 19:32:18 +05303151info_attrb_err:
3152 ocrdma_mbx_delete_ah_tbl(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303153conf_err:
3154 ocrdma_destroy_mq(dev);
3155mq_err:
Naresh Gottumukkalac88bd032013-08-26 15:27:41 +05303156 ocrdma_destroy_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303157qpeq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00003158 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05303159 return status;
3160}
3161
3162void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3163{
Mitesh Ahuja9ba13772014-12-18 14:12:57 +05303164 ocrdma_free_pd_pool(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303165 ocrdma_mbx_delete_ah_tbl(dev);
3166
Parav Panditfe2caef2012-03-21 04:09:06 +05303167 /* cleanup the control path */
3168 ocrdma_destroy_mq(dev);
Selvin Xavier314fdf42015-05-19 11:32:32 +05303169
3170 /* cleanup the eqs */
3171 ocrdma_destroy_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05303172}