blob: 31fd3ff1d1d047cb69b7ad2df028212b758d6d67 [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/sched.h>
29#include <linux/interrupt.h>
30#include <linux/log2.h>
31#include <linux/dma-mapping.h>
32
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_user_verbs.h>
35#include <rdma/ib_addr.h>
36
37#include "ocrdma.h"
38#include "ocrdma_hw.h"
39#include "ocrdma_verbs.h"
40#include "ocrdma_ah.h"
41
42enum mbx_status {
43 OCRDMA_MBX_STATUS_FAILED = 1,
44 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
45 OCRDMA_MBX_STATUS_OOR = 100,
46 OCRDMA_MBX_STATUS_INVALID_PD = 101,
47 OCRDMA_MBX_STATUS_PD_INUSE = 102,
48 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
49 OCRDMA_MBX_STATUS_INVALID_QP = 104,
50 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
51 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
52 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
53 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
54 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
55 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
56 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
57 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
58 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
59 OCRDMA_MBX_STATUS_MW_BOUND = 114,
60 OCRDMA_MBX_STATUS_INVALID_VA = 115,
61 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
62 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
63 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
64 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
65 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
66 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
67 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
68 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
69 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
70 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
71 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
72 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
73 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
74 OCRDMA_MBX_STATUS_QP_BOUND = 130,
75 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
76 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
77 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
78 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
79 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
80 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
81};
82
83enum additional_status {
84 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
85};
86
87enum cqe_status {
88 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
89 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
90 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
91 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
92 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
93};
94
95static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
96{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +053097 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
Parav Panditfe2caef2012-03-21 04:09:06 +053098}
99
100static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
101{
102 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
103}
104
105static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
106{
107 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530108 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
Parav Panditfe2caef2012-03-21 04:09:06 +0530109
110 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
111 return NULL;
112 return cqe;
113}
114
115static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
116{
117 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
118}
119
120static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
121{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530122 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530123}
124
125static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
126{
127 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +0530128}
129
130static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
131{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530132 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530133}
134
135enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
136{
137 switch (qps) {
138 case OCRDMA_QPS_RST:
139 return IB_QPS_RESET;
140 case OCRDMA_QPS_INIT:
141 return IB_QPS_INIT;
142 case OCRDMA_QPS_RTR:
143 return IB_QPS_RTR;
144 case OCRDMA_QPS_RTS:
145 return IB_QPS_RTS;
146 case OCRDMA_QPS_SQD:
147 case OCRDMA_QPS_SQ_DRAINING:
148 return IB_QPS_SQD;
149 case OCRDMA_QPS_SQE:
150 return IB_QPS_SQE;
151 case OCRDMA_QPS_ERR:
152 return IB_QPS_ERR;
153 };
154 return IB_QPS_ERR;
155}
156
Roland Dreierabe3afa2012-04-16 11:36:29 -0700157static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
Parav Panditfe2caef2012-03-21 04:09:06 +0530158{
159 switch (qps) {
160 case IB_QPS_RESET:
161 return OCRDMA_QPS_RST;
162 case IB_QPS_INIT:
163 return OCRDMA_QPS_INIT;
164 case IB_QPS_RTR:
165 return OCRDMA_QPS_RTR;
166 case IB_QPS_RTS:
167 return OCRDMA_QPS_RTS;
168 case IB_QPS_SQD:
169 return OCRDMA_QPS_SQD;
170 case IB_QPS_SQE:
171 return OCRDMA_QPS_SQE;
172 case IB_QPS_ERR:
173 return OCRDMA_QPS_ERR;
174 };
175 return OCRDMA_QPS_ERR;
176}
177
178static int ocrdma_get_mbx_errno(u32 status)
179{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530180 int err_num;
Parav Panditfe2caef2012-03-21 04:09:06 +0530181 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
182 OCRDMA_MBX_RSP_STATUS_SHIFT;
183 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
184 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
185
186 switch (mbox_status) {
187 case OCRDMA_MBX_STATUS_OOR:
188 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
189 err_num = -EAGAIN;
190 break;
191
192 case OCRDMA_MBX_STATUS_INVALID_PD:
193 case OCRDMA_MBX_STATUS_INVALID_CQ:
194 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
195 case OCRDMA_MBX_STATUS_INVALID_QP:
196 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
197 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
198 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
199 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
200 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
201 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
202 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
203 case OCRDMA_MBX_STATUS_INVALID_LKEY:
204 case OCRDMA_MBX_STATUS_INVALID_VA:
205 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
206 case OCRDMA_MBX_STATUS_INVALID_FBO:
207 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
208 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
209 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
210 case OCRDMA_MBX_STATUS_SRQ_ERROR:
211 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
212 err_num = -EINVAL;
213 break;
214
215 case OCRDMA_MBX_STATUS_PD_INUSE:
216 case OCRDMA_MBX_STATUS_QP_BOUND:
217 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
218 case OCRDMA_MBX_STATUS_MW_BOUND:
219 err_num = -EBUSY;
220 break;
221
222 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
223 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
224 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
225 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
226 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
227 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
228 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
229 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
230 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
231 err_num = -ENOBUFS;
232 break;
233
234 case OCRDMA_MBX_STATUS_FAILED:
235 switch (add_status) {
236 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
237 err_num = -EAGAIN;
238 break;
239 }
240 default:
241 err_num = -EFAULT;
242 }
243 return err_num;
244}
245
246static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
247{
248 int err_num = -EINVAL;
249
250 switch (cqe_status) {
251 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
252 err_num = -EPERM;
253 break;
254 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
255 err_num = -EINVAL;
256 break;
257 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
258 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
259 err_num = -EAGAIN;
260 break;
261 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530262 default:
Parav Panditfe2caef2012-03-21 04:09:06 +0530263 err_num = -EIO;
264 break;
265 }
266 return err_num;
267}
268
269void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
270 bool solicited, u16 cqe_popped)
271{
272 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
273
274 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
275 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
276
277 if (armed)
278 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
279 if (solicited)
280 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
281 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
282 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
283}
284
285static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
286{
287 u32 val = 0;
288
289 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
290 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
291 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
292}
293
294static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
295 bool arm, bool clear_int, u16 num_eqe)
296{
297 u32 val = 0;
298
299 val |= eq_id & OCRDMA_EQ_ID_MASK;
300 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
301 if (arm)
302 val |= (1 << OCRDMA_REARM_SHIFT);
303 if (clear_int)
304 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
305 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
306 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
307 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
308}
309
310static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
311 u8 opcode, u8 subsys, u32 cmd_len)
312{
313 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
314 cmd_hdr->timeout = 20; /* seconds */
315 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
316}
317
318static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
319{
320 struct ocrdma_mqe *mqe;
321
322 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
323 if (!mqe)
324 return NULL;
325 mqe->hdr.spcl_sge_cnt_emb |=
326 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
327 OCRDMA_MQE_HDR_EMB_MASK;
328 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
329
330 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
331 mqe->hdr.pyld_len);
332 return mqe;
333}
334
335static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
336{
337 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
338}
339
340static int ocrdma_alloc_q(struct ocrdma_dev *dev,
341 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
342{
343 memset(q, 0, sizeof(*q));
344 q->len = len;
345 q->entry_size = entry_size;
346 q->size = len * entry_size;
347 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
348 &q->dma, GFP_KERNEL);
349 if (!q->va)
350 return -ENOMEM;
351 memset(q->va, 0, q->size);
352 return 0;
353}
354
355static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
356 dma_addr_t host_pa, int hw_page_size)
357{
358 int i;
359
360 for (i = 0; i < cnt; i++) {
361 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
362 q_pa[i].hi = (u32) upper_32_bits(host_pa);
363 host_pa += hw_page_size;
364 }
365}
366
367static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
368 struct ocrdma_eq *eq)
369{
370 /* assign vector and update vector id for next EQ */
371 eq->vector = dev->nic_info.msix.start_vector;
372 dev->nic_info.msix.start_vector += 1;
373}
374
375static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
376{
377 /* this assumes that EQs are freed in exactly reverse order
378 * as its allocation.
379 */
380 dev->nic_info.msix.start_vector -= 1;
381}
382
Roland Dreierabe3afa2012-04-16 11:36:29 -0700383static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
384 int queue_type)
Parav Panditfe2caef2012-03-21 04:09:06 +0530385{
386 u8 opcode = 0;
387 int status;
388 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
389
390 switch (queue_type) {
391 case QTYPE_MCCQ:
392 opcode = OCRDMA_CMD_DELETE_MQ;
393 break;
394 case QTYPE_CQ:
395 opcode = OCRDMA_CMD_DELETE_CQ;
396 break;
397 case QTYPE_EQ:
398 opcode = OCRDMA_CMD_DELETE_EQ;
399 break;
400 default:
401 BUG();
402 }
403 memset(cmd, 0, sizeof(*cmd));
404 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
405 cmd->id = q->id;
406
407 status = be_roce_mcc_cmd(dev->nic_info.netdev,
408 cmd, sizeof(*cmd), NULL, NULL);
409 if (!status)
410 q->created = false;
411 return status;
412}
413
414static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
415{
416 int status;
417 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
418 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
419
420 memset(cmd, 0, sizeof(*cmd));
421 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
422 sizeof(*cmd));
423 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
424 cmd->req.rsvd_version = 0;
425 else
426 cmd->req.rsvd_version = 2;
427
428 cmd->num_pages = 4;
429 cmd->valid = OCRDMA_CREATE_EQ_VALID;
430 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
431
432 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
433 PAGE_SIZE_4K);
434 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
435 NULL);
436 if (!status) {
437 eq->q.id = rsp->vector_eqid & 0xffff;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530438 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530439 ocrdma_assign_eq_vect_gen2(dev, eq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530440 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530441 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
442 dev->nic_info.msix.start_vector += 1;
443 }
444 eq->q.created = true;
445 }
446 return status;
447}
448
449static int ocrdma_create_eq(struct ocrdma_dev *dev,
450 struct ocrdma_eq *eq, u16 q_len)
451{
452 int status;
453
454 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
455 sizeof(struct ocrdma_eqe));
456 if (status)
457 return status;
458
459 status = ocrdma_mbx_create_eq(dev, eq);
460 if (status)
461 goto mbx_err;
462 eq->dev = dev;
463 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
464
465 return 0;
466mbx_err:
467 ocrdma_free_q(dev, &eq->q);
468 return status;
469}
470
471static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
472{
473 int irq;
474
475 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
476 irq = dev->nic_info.pdev->irq;
477 else
478 irq = dev->nic_info.msix.vector_list[eq->vector];
479 return irq;
480}
481
482static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
483{
484 if (eq->q.created) {
485 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
486 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
487 ocrdma_free_eq_vect_gen2(dev);
488 ocrdma_free_q(dev, &eq->q);
489 }
490}
491
492static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
493{
494 int irq;
495
496 /* disarm EQ so that interrupts are not generated
497 * during freeing and EQ delete is in progress.
498 */
499 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
500
501 irq = ocrdma_get_irq(dev, eq);
502 free_irq(irq, eq);
503 _ocrdma_destroy_eq(dev, eq);
504}
505
506static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
507{
508 int i;
509
510 /* deallocate the data path eqs */
511 for (i = 0; i < dev->eq_cnt; i++)
512 ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
513}
514
Roland Dreierabe3afa2012-04-16 11:36:29 -0700515static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
516 struct ocrdma_queue_info *cq,
517 struct ocrdma_queue_info *eq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530518{
519 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
520 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
521 int status;
522
523 memset(cmd, 0, sizeof(*cmd));
524 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
525 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
526
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530527 cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
528 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
529 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
530 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
Parav Panditfe2caef2012-03-21 04:09:06 +0530531
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530532 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
533 cmd->eqn = eq->id;
534 cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
535
536 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
Parav Panditfe2caef2012-03-21 04:09:06 +0530537 cq->dma, PAGE_SIZE_4K);
538 status = be_roce_mcc_cmd(dev->nic_info.netdev,
539 cmd, sizeof(*cmd), NULL, NULL);
540 if (!status) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530541 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +0530542 cq->created = true;
543 }
544 return status;
545}
546
547static u32 ocrdma_encoded_q_len(int q_len)
548{
549 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
550
551 if (len_encoded == 16)
552 len_encoded = 0;
553 return len_encoded;
554}
555
556static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
557 struct ocrdma_queue_info *mq,
558 struct ocrdma_queue_info *cq)
559{
560 int num_pages, status;
561 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
562 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
563 struct ocrdma_pa *pa;
564
565 memset(cmd, 0, sizeof(*cmd));
566 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
567
Naresh Gottumukkalab1d58b92013-06-10 04:42:38 +0000568 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
569 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
570 cmd->req.rsvd_version = 1;
571 cmd->cqid_pages = num_pages;
572 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
573 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
574 cmd->async_event_bitmap = Bit(20);
575 cmd->async_cqid_ringsize = cq->id;
576 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
577 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
578 cmd->valid = OCRDMA_CREATE_MQ_VALID;
579 pa = &cmd->pa[0];
580
Parav Panditfe2caef2012-03-21 04:09:06 +0530581 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
582 status = be_roce_mcc_cmd(dev->nic_info.netdev,
583 cmd, sizeof(*cmd), NULL, NULL);
584 if (!status) {
585 mq->id = rsp->id;
586 mq->created = true;
587 }
588 return status;
589}
590
591static int ocrdma_create_mq(struct ocrdma_dev *dev)
592{
593 int status;
594
595 /* Alloc completion queue for Mailbox queue */
596 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
597 sizeof(struct ocrdma_mcqe));
598 if (status)
599 goto alloc_err;
600
601 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
602 if (status)
603 goto mbx_cq_free;
604
605 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
606 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
607 mutex_init(&dev->mqe_ctx.lock);
608
609 /* Alloc Mailbox queue */
610 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
611 sizeof(struct ocrdma_mqe));
612 if (status)
613 goto mbx_cq_destroy;
614 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
615 if (status)
616 goto mbx_q_free;
617 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
618 return 0;
619
620mbx_q_free:
621 ocrdma_free_q(dev, &dev->mq.sq);
622mbx_cq_destroy:
623 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
624mbx_cq_free:
625 ocrdma_free_q(dev, &dev->mq.cq);
626alloc_err:
627 return status;
628}
629
630static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
631{
632 struct ocrdma_queue_info *mbxq, *cq;
633
634 /* mqe_ctx lock synchronizes with any other pending cmds. */
635 mutex_lock(&dev->mqe_ctx.lock);
636 mbxq = &dev->mq.sq;
637 if (mbxq->created) {
638 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
639 ocrdma_free_q(dev, mbxq);
640 }
641 mutex_unlock(&dev->mqe_ctx.lock);
642
643 cq = &dev->mq.cq;
644 if (cq->created) {
645 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
646 ocrdma_free_q(dev, cq);
647 }
648}
649
650static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
651 struct ocrdma_qp *qp)
652{
653 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
654 enum ib_qp_state old_ib_qps;
655
656 if (qp == NULL)
657 BUG();
Naresh Gottumukkala057729c2013-08-07 12:52:35 +0530658 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +0530659}
660
661static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
662 struct ocrdma_ae_mcqe *cqe)
663{
664 struct ocrdma_qp *qp = NULL;
665 struct ocrdma_cq *cq = NULL;
Roland Dreiere9db2952012-04-16 12:13:24 -0700666 struct ib_event ib_evt;
Parav Panditfe2caef2012-03-21 04:09:06 +0530667 int cq_event = 0;
668 int qp_event = 1;
669 int srq_event = 0;
670 int dev_event = 0;
671 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
672 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
673
674 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
675 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
676 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
677 cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
678
Roland Dreiere9db2952012-04-16 12:13:24 -0700679 ib_evt.device = &dev->ibdev;
680
Parav Panditfe2caef2012-03-21 04:09:06 +0530681 switch (type) {
682 case OCRDMA_CQ_ERROR:
683 ib_evt.element.cq = &cq->ibcq;
684 ib_evt.event = IB_EVENT_CQ_ERR;
685 cq_event = 1;
686 qp_event = 0;
687 break;
688 case OCRDMA_CQ_OVERRUN_ERROR:
689 ib_evt.element.cq = &cq->ibcq;
690 ib_evt.event = IB_EVENT_CQ_ERR;
691 break;
692 case OCRDMA_CQ_QPCAT_ERROR:
693 ib_evt.element.qp = &qp->ibqp;
694 ib_evt.event = IB_EVENT_QP_FATAL;
695 ocrdma_process_qpcat_error(dev, qp);
696 break;
697 case OCRDMA_QP_ACCESS_ERROR:
698 ib_evt.element.qp = &qp->ibqp;
699 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
700 break;
701 case OCRDMA_QP_COMM_EST_EVENT:
702 ib_evt.element.qp = &qp->ibqp;
703 ib_evt.event = IB_EVENT_COMM_EST;
704 break;
705 case OCRDMA_SQ_DRAINED_EVENT:
706 ib_evt.element.qp = &qp->ibqp;
707 ib_evt.event = IB_EVENT_SQ_DRAINED;
708 break;
709 case OCRDMA_DEVICE_FATAL_EVENT:
710 ib_evt.element.port_num = 1;
711 ib_evt.event = IB_EVENT_DEVICE_FATAL;
712 qp_event = 0;
713 dev_event = 1;
714 break;
715 case OCRDMA_SRQCAT_ERROR:
716 ib_evt.element.srq = &qp->srq->ibsrq;
717 ib_evt.event = IB_EVENT_SRQ_ERR;
718 srq_event = 1;
719 qp_event = 0;
720 break;
721 case OCRDMA_SRQ_LIMIT_EVENT:
722 ib_evt.element.srq = &qp->srq->ibsrq;
Parav Pandit804eaf22012-05-23 21:11:17 +0530723 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
Parav Panditfe2caef2012-03-21 04:09:06 +0530724 srq_event = 1;
725 qp_event = 0;
726 break;
727 case OCRDMA_QP_LAST_WQE_EVENT:
728 ib_evt.element.qp = &qp->ibqp;
729 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
730 break;
731 default:
732 cq_event = 0;
733 qp_event = 0;
734 srq_event = 0;
735 dev_event = 0;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000736 pr_err("%s() unknown type=0x%x\n", __func__, type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530737 break;
738 }
739
740 if (qp_event) {
741 if (qp->ibqp.event_handler)
742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
743 } else if (cq_event) {
744 if (cq->ibcq.event_handler)
745 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
746 } else if (srq_event) {
747 if (qp->srq->ibsrq.event_handler)
748 qp->srq->ibsrq.event_handler(&ib_evt,
749 qp->srq->ibsrq.
750 srq_context);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530751 } else if (dev_event) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530752 ib_dispatch_event(&ib_evt);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530753 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530754
755}
756
757static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
758{
759 /* async CQE processing */
760 struct ocrdma_ae_mcqe *cqe = ae_cqe;
761 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
762 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
763
764 if (evt_code == OCRDMA_ASYNC_EVE_CODE)
765 ocrdma_dispatch_ibevent(dev, cqe);
766 else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000767 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
768 dev->id, evt_code);
Parav Panditfe2caef2012-03-21 04:09:06 +0530769}
770
771static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
772{
773 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
774 dev->mqe_ctx.cqe_status = (cqe->status &
775 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
776 dev->mqe_ctx.ext_status =
777 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
778 >> OCRDMA_MCQE_ESTATUS_SHIFT;
779 dev->mqe_ctx.cmd_done = true;
780 wake_up(&dev->mqe_ctx.cmd_wait);
781 } else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000782 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
783 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
Parav Panditfe2caef2012-03-21 04:09:06 +0530784}
785
786static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
787{
788 u16 cqe_popped = 0;
789 struct ocrdma_mcqe *cqe;
790
791 while (1) {
792 cqe = ocrdma_get_mcqe(dev);
793 if (cqe == NULL)
794 break;
795 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
796 cqe_popped += 1;
797 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
798 ocrdma_process_acqe(dev, cqe);
799 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
800 ocrdma_process_mcqe(dev, cqe);
801 else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000802 pr_err("%s() cqe->compl is not set.\n", __func__);
Parav Panditfe2caef2012-03-21 04:09:06 +0530803 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
804 ocrdma_mcq_inc_tail(dev);
805 }
806 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
807 return 0;
808}
809
810static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
811 struct ocrdma_cq *cq)
812{
813 unsigned long flags;
814 struct ocrdma_qp *qp;
815 bool buddy_cq_found = false;
816 /* Go through list of QPs in error state which are using this CQ
817 * and invoke its callback handler to trigger CQE processing for
818 * error/flushed CQE. It is rare to find more than few entries in
819 * this list as most consumers stops after getting error CQE.
820 * List is traversed only once when a matching buddy cq found for a QP.
821 */
822 spin_lock_irqsave(&dev->flush_q_lock, flags);
823 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
824 if (qp->srq)
825 continue;
826 /* if wq and rq share the same cq, than comp_handler
827 * is already invoked.
828 */
829 if (qp->sq_cq == qp->rq_cq)
830 continue;
831 /* if completion came on sq, rq's cq is buddy cq.
832 * if completion came on rq, sq's cq is buddy cq.
833 */
834 if (qp->sq_cq == cq)
835 cq = qp->rq_cq;
836 else
837 cq = qp->sq_cq;
838 buddy_cq_found = true;
839 break;
840 }
841 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
842 if (buddy_cq_found == false)
843 return;
844 if (cq->ibcq.comp_handler) {
845 spin_lock_irqsave(&cq->comp_handler_lock, flags);
846 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
847 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
848 }
849}
850
851static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
852{
853 unsigned long flags;
854 struct ocrdma_cq *cq;
855
856 if (cq_idx >= OCRDMA_MAX_CQ)
857 BUG();
858
859 cq = dev->cq_tbl[cq_idx];
860 if (cq == NULL) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000861 pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530862 return;
863 }
864 spin_lock_irqsave(&cq->cq_lock, flags);
865 cq->armed = false;
866 cq->solicited = false;
867 spin_unlock_irqrestore(&cq->cq_lock, flags);
868
869 ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
870
871 if (cq->ibcq.comp_handler) {
872 spin_lock_irqsave(&cq->comp_handler_lock, flags);
873 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
874 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
875 }
876 ocrdma_qp_buddy_cq_handler(dev, cq);
877}
878
879static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
880{
881 /* process the MQ-CQE. */
882 if (cq_id == dev->mq.cq.id)
883 ocrdma_mq_cq_handler(dev, cq_id);
884 else
885 ocrdma_qp_cq_handler(dev, cq_id);
886}
887
888static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
889{
890 struct ocrdma_eq *eq = handle;
891 struct ocrdma_dev *dev = eq->dev;
892 struct ocrdma_eqe eqe;
893 struct ocrdma_eqe *ptr;
894 u16 eqe_popped = 0;
895 u16 cq_id;
896 while (1) {
897 ptr = ocrdma_get_eqe(eq);
898 eqe = *ptr;
899 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
900 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
901 break;
902 eqe_popped += 1;
903 ptr->id_valid = 0;
904 /* check whether its CQE or not. */
905 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
906 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
907 ocrdma_cq_handler(dev, cq_id);
908 }
909 ocrdma_eq_inc_tail(eq);
910 }
911 ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
912 /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
913 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
914 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
915 return IRQ_HANDLED;
916}
917
918static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
919{
920 struct ocrdma_mqe *mqe;
921
922 dev->mqe_ctx.tag = dev->mq.sq.head;
923 dev->mqe_ctx.cmd_done = false;
924 mqe = ocrdma_get_mqe(dev);
925 cmd->hdr.tag_lo = dev->mq.sq.head;
926 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
927 /* make sure descriptor is written before ringing doorbell */
928 wmb();
929 ocrdma_mq_inc_head(dev);
930 ocrdma_ring_mq_db(dev);
931}
932
933static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
934{
935 long status;
936 /* 30 sec timeout */
937 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
938 (dev->mqe_ctx.cmd_done != false),
939 msecs_to_jiffies(30000));
940 if (status)
941 return 0;
942 else
943 return -1;
944}
945
946/* issue a mailbox command on the MQ */
947static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
948{
949 int status = 0;
950 u16 cqe_status, ext_status;
951 struct ocrdma_mqe *rsp;
952
953 mutex_lock(&dev->mqe_ctx.lock);
954 ocrdma_post_mqe(dev, mqe);
955 status = ocrdma_wait_mqe_cmpl(dev);
956 if (status)
957 goto mbx_err;
958 cqe_status = dev->mqe_ctx.cqe_status;
959 ext_status = dev->mqe_ctx.ext_status;
960 rsp = ocrdma_get_mqe_rsp(dev);
961 ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
962 if (cqe_status || ext_status) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530963 pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
964 __func__,
Parav Panditfe2caef2012-03-21 04:09:06 +0530965 (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
966 OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
967 status = ocrdma_get_mbx_cqe_errno(cqe_status);
968 goto mbx_err;
969 }
970 if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
971 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
972mbx_err:
973 mutex_unlock(&dev->mqe_ctx.lock);
974 return status;
975}
976
977static void ocrdma_get_attr(struct ocrdma_dev *dev,
978 struct ocrdma_dev_attr *attr,
979 struct ocrdma_mbx_query_config *rsp)
980{
Parav Panditfe2caef2012-03-21 04:09:06 +0530981 attr->max_pd =
982 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
983 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
984 attr->max_qp =
985 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
986 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
987 attr->max_send_sge = ((rsp->max_write_send_sge &
988 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
989 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
990 attr->max_recv_sge = (rsp->max_write_send_sge &
991 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
992 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +0530993 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
994 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
995 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530996 attr->max_rdma_sge = (rsp->max_write_send_sge &
997 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
998 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +0530999 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1000 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1001 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1002 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1003 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1004 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1005 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1006 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1007 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1008 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1009 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1010 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1011 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1012 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1013 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1014 attr->max_mr = rsp->max_mr;
1015 attr->max_mr_size = ~0ull;
1016 attr->max_fmr = 0;
1017 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1018 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1019 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1020 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1021 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1022 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1023 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1024 OCRDMA_WQE_STRIDE;
1025 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1026 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1027 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1028 OCRDMA_WQE_STRIDE;
1029 attr->max_inline_data =
1030 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1031 sizeof(struct ocrdma_sge));
Parav Panditfe2caef2012-03-21 04:09:06 +05301032 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301033 attr->ird = 1;
1034 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1035 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
Mahesh Vardhamanaiah07bb5422012-06-08 21:25:52 +05301036 }
1037 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1038 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1039 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1040 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
Parav Panditfe2caef2012-03-21 04:09:06 +05301041}
1042
1043static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1044 struct ocrdma_fw_conf_rsp *conf)
1045{
1046 u32 fn_mode;
1047
1048 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1049 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1050 return -EINVAL;
1051 dev->base_eqid = conf->base_eqid;
1052 dev->max_eq = conf->max_eq;
1053 dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
1054 return 0;
1055}
1056
1057/* can be issued only during init time. */
1058static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1059{
1060 int status = -ENOMEM;
1061 struct ocrdma_mqe *cmd;
1062 struct ocrdma_fw_ver_rsp *rsp;
1063
1064 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1065 if (!cmd)
1066 return -ENOMEM;
1067 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1068 OCRDMA_CMD_GET_FW_VER,
1069 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1070
1071 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1072 if (status)
1073 goto mbx_err;
1074 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1075 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1076 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1077 sizeof(rsp->running_ver));
1078 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1079mbx_err:
1080 kfree(cmd);
1081 return status;
1082}
1083
1084/* can be issued only during init time. */
1085static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1086{
1087 int status = -ENOMEM;
1088 struct ocrdma_mqe *cmd;
1089 struct ocrdma_fw_conf_rsp *rsp;
1090
1091 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1092 if (!cmd)
1093 return -ENOMEM;
1094 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1095 OCRDMA_CMD_GET_FW_CONFIG,
1096 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1097 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1098 if (status)
1099 goto mbx_err;
1100 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1101 status = ocrdma_check_fw_config(dev, rsp);
1102mbx_err:
1103 kfree(cmd);
1104 return status;
1105}
1106
1107static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1108{
1109 int status = -ENOMEM;
1110 struct ocrdma_mbx_query_config *rsp;
1111 struct ocrdma_mqe *cmd;
1112
1113 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1114 if (!cmd)
1115 return status;
1116 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1117 if (status)
1118 goto mbx_err;
1119 rsp = (struct ocrdma_mbx_query_config *)cmd;
1120 ocrdma_get_attr(dev, &dev->attr, rsp);
1121mbx_err:
1122 kfree(cmd);
1123 return status;
1124}
1125
1126int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1127{
1128 int status = -ENOMEM;
1129 struct ocrdma_alloc_pd *cmd;
1130 struct ocrdma_alloc_pd_rsp *rsp;
1131
1132 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1133 if (!cmd)
1134 return status;
1135 if (pd->dpp_enabled)
1136 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1137 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1138 if (status)
1139 goto mbx_err;
1140 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1141 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1142 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1143 pd->dpp_enabled = true;
1144 pd->dpp_page = rsp->dpp_page_pdid >>
1145 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1146 } else {
1147 pd->dpp_enabled = false;
1148 pd->num_dpp_qp = 0;
1149 }
1150mbx_err:
1151 kfree(cmd);
1152 return status;
1153}
1154
1155int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1156{
1157 int status = -ENOMEM;
1158 struct ocrdma_dealloc_pd *cmd;
1159
1160 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1161 if (!cmd)
1162 return status;
1163 cmd->id = pd->id;
1164 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1165 kfree(cmd);
1166 return status;
1167}
1168
1169static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1170 int *num_pages, int *page_size)
1171{
1172 int i;
1173 int mem_size;
1174
1175 *num_entries = roundup_pow_of_two(*num_entries);
1176 mem_size = *num_entries * entry_size;
1177 /* find the possible lowest possible multiplier */
1178 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1179 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1180 break;
1181 }
1182 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1183 return -EINVAL;
1184 mem_size = roundup(mem_size,
1185 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1186 *num_pages =
1187 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1188 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1189 *num_entries = mem_size / entry_size;
1190 return 0;
1191}
1192
1193static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1194{
1195 int i ;
1196 int status = 0;
1197 int max_ah;
1198 struct ocrdma_create_ah_tbl *cmd;
1199 struct ocrdma_create_ah_tbl_rsp *rsp;
1200 struct pci_dev *pdev = dev->nic_info.pdev;
1201 dma_addr_t pa;
1202 struct ocrdma_pbe *pbes;
1203
1204 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1205 if (!cmd)
1206 return status;
1207
1208 max_ah = OCRDMA_MAX_AH;
1209 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1210
1211 /* number of PBEs in PBL */
1212 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1213 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1214 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1215
1216 /* page size */
1217 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1218 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1219 break;
1220 }
1221 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1222 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1223
1224 /* ah_entry size */
1225 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1226 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1227 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1228
1229 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1230 &dev->av_tbl.pbl.pa,
1231 GFP_KERNEL);
1232 if (dev->av_tbl.pbl.va == NULL)
1233 goto mem_err;
1234
1235 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1236 &pa, GFP_KERNEL);
1237 if (dev->av_tbl.va == NULL)
1238 goto mem_err_ah;
1239 dev->av_tbl.pa = pa;
1240 dev->av_tbl.num_ah = max_ah;
1241 memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1242
1243 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1244 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1245 pbes[i].pa_lo = (u32) (pa & 0xffffffff);
1246 pbes[i].pa_hi = (u32) upper_32_bits(pa);
1247 pa += PAGE_SIZE;
1248 }
1249 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1250 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1251 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1252 if (status)
1253 goto mbx_err;
1254 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1255 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1256 kfree(cmd);
1257 return 0;
1258
1259mbx_err:
1260 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1261 dev->av_tbl.pa);
1262 dev->av_tbl.va = NULL;
1263mem_err_ah:
1264 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1265 dev->av_tbl.pbl.pa);
1266 dev->av_tbl.pbl.va = NULL;
1267 dev->av_tbl.size = 0;
1268mem_err:
1269 kfree(cmd);
1270 return status;
1271}
1272
1273static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1274{
1275 struct ocrdma_delete_ah_tbl *cmd;
1276 struct pci_dev *pdev = dev->nic_info.pdev;
1277
1278 if (dev->av_tbl.va == NULL)
1279 return;
1280
1281 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1282 if (!cmd)
1283 return;
1284 cmd->ahid = dev->av_tbl.ahid;
1285
1286 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1287 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1288 dev->av_tbl.pa);
1289 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1290 dev->av_tbl.pbl.pa);
1291 kfree(cmd);
1292}
1293
1294/* Multiple CQs uses the EQ. This routine returns least used
1295 * EQ to associate with CQ. This will distributes the interrupt
1296 * processing and CPU load to associated EQ, vector and so to that CPU.
1297 */
1298static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1299{
1300 int i, selected_eq = 0, cq_cnt = 0;
1301 u16 eq_id;
1302
1303 mutex_lock(&dev->dev_lock);
1304 cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
1305 eq_id = dev->qp_eq_tbl[0].q.id;
1306 /* find the EQ which is has the least number of
1307 * CQs associated with it.
1308 */
1309 for (i = 0; i < dev->eq_cnt; i++) {
1310 if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
1311 cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
1312 eq_id = dev->qp_eq_tbl[i].q.id;
1313 selected_eq = i;
1314 }
1315 }
1316 dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
1317 mutex_unlock(&dev->dev_lock);
1318 return eq_id;
1319}
1320
1321static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1322{
1323 int i;
1324
1325 mutex_lock(&dev->dev_lock);
1326 for (i = 0; i < dev->eq_cnt; i++) {
1327 if (dev->qp_eq_tbl[i].q.id != eq_id)
1328 continue;
1329 dev->qp_eq_tbl[i].cq_cnt -= 1;
1330 break;
1331 }
1332 mutex_unlock(&dev->dev_lock);
1333}
1334
1335int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1336 int entries, int dpp_cq)
1337{
1338 int status = -ENOMEM; int max_hw_cqe;
1339 struct pci_dev *pdev = dev->nic_info.pdev;
1340 struct ocrdma_create_cq *cmd;
1341 struct ocrdma_create_cq_rsp *rsp;
1342 u32 hw_pages, cqe_size, page_size, cqe_count;
1343
Parav Panditfe2caef2012-03-21 04:09:06 +05301344 if (entries > dev->attr.max_cqe) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001345 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1346 __func__, dev->id, dev->attr.max_cqe, entries);
Parav Panditfe2caef2012-03-21 04:09:06 +05301347 return -EINVAL;
1348 }
1349 if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
1350 return -EINVAL;
1351
1352 if (dpp_cq) {
1353 cq->max_hw_cqe = 1;
1354 max_hw_cqe = 1;
1355 cqe_size = OCRDMA_DPP_CQE_SIZE;
1356 hw_pages = 1;
1357 } else {
1358 cq->max_hw_cqe = dev->attr.max_cqe;
1359 max_hw_cqe = dev->attr.max_cqe;
1360 cqe_size = sizeof(struct ocrdma_cqe);
1361 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1362 }
1363
1364 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1365
1366 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1367 if (!cmd)
1368 return -ENOMEM;
1369 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1370 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1371 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1372 if (!cq->va) {
1373 status = -ENOMEM;
1374 goto mem_err;
1375 }
1376 memset(cq->va, 0, cq->len);
1377 page_size = cq->len / hw_pages;
1378 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1379 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1380 cmd->cmd.pgsz_pgcnt |= hw_pages;
1381 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1382
Parav Panditfe2caef2012-03-21 04:09:06 +05301383 cq->eqn = ocrdma_bind_eq(dev);
1384 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
1385 cqe_count = cq->len / cqe_size;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301386 if (cqe_count > 1024) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301387 /* Set cnt to 3 to indicate more than 1024 cq entries */
1388 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301389 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301390 u8 count = 0;
1391 switch (cqe_count) {
1392 case 256:
1393 count = 0;
1394 break;
1395 case 512:
1396 count = 1;
1397 break;
1398 case 1024:
1399 count = 2;
1400 break;
1401 default:
1402 goto mbx_err;
1403 }
1404 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1405 }
1406 /* shared eq between all the consumer cqs. */
1407 cmd->cmd.eqn = cq->eqn;
1408 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1409 if (dpp_cq)
1410 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1411 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1412 cq->phase_change = false;
1413 cmd->cmd.cqe_count = (cq->len / cqe_size);
1414 } else {
1415 cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
1416 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1417 cq->phase_change = true;
1418 }
1419
1420 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1421 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1422 if (status)
1423 goto mbx_err;
1424
1425 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1426 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1427 kfree(cmd);
1428 return 0;
1429mbx_err:
1430 ocrdma_unbind_eq(dev, cq->eqn);
Parav Panditfe2caef2012-03-21 04:09:06 +05301431 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1432mem_err:
1433 kfree(cmd);
1434 return status;
1435}
1436
1437int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1438{
1439 int status = -ENOMEM;
1440 struct ocrdma_destroy_cq *cmd;
1441
1442 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1443 if (!cmd)
1444 return status;
1445 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1446 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1447
1448 cmd->bypass_flush_qid |=
1449 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1450 OCRDMA_DESTROY_CQ_QID_MASK;
1451
1452 ocrdma_unbind_eq(dev, cq->eqn);
1453 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1454 if (status)
1455 goto mbx_err;
1456 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1457mbx_err:
1458 kfree(cmd);
1459 return status;
1460}
1461
1462int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1463 u32 pdid, int addr_check)
1464{
1465 int status = -ENOMEM;
1466 struct ocrdma_alloc_lkey *cmd;
1467 struct ocrdma_alloc_lkey_rsp *rsp;
1468
1469 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1470 if (!cmd)
1471 return status;
1472 cmd->pdid = pdid;
1473 cmd->pbl_sz_flags |= addr_check;
1474 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1475 cmd->pbl_sz_flags |=
1476 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1477 cmd->pbl_sz_flags |=
1478 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1479 cmd->pbl_sz_flags |=
1480 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1481 cmd->pbl_sz_flags |=
1482 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1483 cmd->pbl_sz_flags |=
1484 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1485
1486 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1487 if (status)
1488 goto mbx_err;
1489 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1490 hwmr->lkey = rsp->lrkey;
1491mbx_err:
1492 kfree(cmd);
1493 return status;
1494}
1495
1496int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1497{
1498 int status = -ENOMEM;
1499 struct ocrdma_dealloc_lkey *cmd;
1500
1501 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1502 if (!cmd)
1503 return -ENOMEM;
1504 cmd->lkey = lkey;
1505 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1506 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1507 if (status)
1508 goto mbx_err;
1509mbx_err:
1510 kfree(cmd);
1511 return status;
1512}
1513
1514static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1515 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1516{
1517 int status = -ENOMEM;
1518 int i;
1519 struct ocrdma_reg_nsmr *cmd;
1520 struct ocrdma_reg_nsmr_rsp *rsp;
1521
1522 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1523 if (!cmd)
1524 return -ENOMEM;
1525 cmd->num_pbl_pdid =
1526 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1527
1528 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1529 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1530 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1531 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1532 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1533 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1534 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1535 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1536 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1537 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1538 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1539
1540 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1541 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1542 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1543 cmd->totlen_low = hwmr->len;
1544 cmd->totlen_high = upper_32_bits(hwmr->len);
1545 cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1546 cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1547 cmd->va_loaddr = (u32) hwmr->va;
1548 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1549
1550 for (i = 0; i < pbl_cnt; i++) {
1551 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1552 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1553 }
1554 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1555 if (status)
1556 goto mbx_err;
1557 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1558 hwmr->lkey = rsp->lrkey;
1559mbx_err:
1560 kfree(cmd);
1561 return status;
1562}
1563
1564static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1565 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1566 u32 pbl_offset, u32 last)
1567{
1568 int status = -ENOMEM;
1569 int i;
1570 struct ocrdma_reg_nsmr_cont *cmd;
1571
1572 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1573 if (!cmd)
1574 return -ENOMEM;
1575 cmd->lrkey = hwmr->lkey;
1576 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
1577 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1578 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1579
1580 for (i = 0; i < pbl_cnt; i++) {
1581 cmd->pbl[i].lo =
1582 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
1583 cmd->pbl[i].hi =
1584 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
1585 }
1586 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1587 if (status)
1588 goto mbx_err;
1589mbx_err:
1590 kfree(cmd);
1591 return status;
1592}
1593
1594int ocrdma_reg_mr(struct ocrdma_dev *dev,
1595 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
1596{
1597 int status;
1598 u32 last = 0;
1599 u32 cur_pbl_cnt, pbl_offset;
1600 u32 pending_pbl_cnt = hwmr->num_pbls;
1601
1602 pbl_offset = 0;
1603 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1604 if (cur_pbl_cnt == pending_pbl_cnt)
1605 last = 1;
1606
1607 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1608 cur_pbl_cnt, hwmr->pbe_size, last);
1609 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001610 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301611 return status;
1612 }
1613 /* if there is no more pbls to register then exit. */
1614 if (last)
1615 return 0;
1616
1617 while (!last) {
1618 pbl_offset += cur_pbl_cnt;
1619 pending_pbl_cnt -= cur_pbl_cnt;
1620 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1621 /* if we reach the end of the pbls, then need to set the last
1622 * bit, indicating no more pbls to register for this memory key.
1623 */
1624 if (cur_pbl_cnt == pending_pbl_cnt)
1625 last = 1;
1626
1627 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
1628 pbl_offset, last);
1629 if (status)
1630 break;
1631 }
1632 if (status)
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001633 pr_err("%s() err. status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301634
1635 return status;
1636}
1637
1638bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1639{
1640 struct ocrdma_qp *tmp;
1641 bool found = false;
1642 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
1643 if (qp == tmp) {
1644 found = true;
1645 break;
1646 }
1647 }
1648 return found;
1649}
1650
1651bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1652{
1653 struct ocrdma_qp *tmp;
1654 bool found = false;
1655 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
1656 if (qp == tmp) {
1657 found = true;
1658 break;
1659 }
1660 }
1661 return found;
1662}
1663
1664void ocrdma_flush_qp(struct ocrdma_qp *qp)
1665{
1666 bool found;
1667 unsigned long flags;
1668
1669 spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
1670 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1671 if (!found)
1672 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
1673 if (!qp->srq) {
1674 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1675 if (!found)
1676 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1677 }
1678 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
1679}
1680
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301681int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
1682 enum ib_qp_state *old_ib_state)
Parav Panditfe2caef2012-03-21 04:09:06 +05301683{
1684 unsigned long flags;
1685 int status = 0;
1686 enum ocrdma_qp_state new_state;
1687 new_state = get_ocrdma_qp_state(new_ib_state);
1688
1689 /* sync with wqe and rqe posting */
1690 spin_lock_irqsave(&qp->q_lock, flags);
1691
1692 if (old_ib_state)
1693 *old_ib_state = get_ibqp_state(qp->state);
1694 if (new_state == qp->state) {
1695 spin_unlock_irqrestore(&qp->q_lock, flags);
1696 return 1;
1697 }
1698
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301699
1700 if (new_state == OCRDMA_QPS_ERR)
1701 ocrdma_flush_qp(qp);
1702
1703 qp->state = new_state;
Parav Panditfe2caef2012-03-21 04:09:06 +05301704
1705 spin_unlock_irqrestore(&qp->q_lock, flags);
1706 return status;
1707}
1708
1709static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
1710{
1711 u32 flags = 0;
1712 if (qp->cap_flags & OCRDMA_QP_INB_RD)
1713 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
1714 if (qp->cap_flags & OCRDMA_QP_INB_WR)
1715 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
1716 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
1717 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
1718 if (qp->cap_flags & OCRDMA_QP_LKEY0)
1719 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
1720 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
1721 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
1722 return flags;
1723}
1724
1725static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1726 struct ib_qp_init_attr *attrs,
1727 struct ocrdma_qp *qp)
1728{
1729 int status;
1730 u32 len, hw_pages, hw_page_size;
1731 dma_addr_t pa;
1732 struct ocrdma_dev *dev = qp->dev;
1733 struct pci_dev *pdev = dev->nic_info.pdev;
1734 u32 max_wqe_allocated;
1735 u32 max_sges = attrs->cap.max_send_sge;
1736
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301737 /* QP1 may exceed 127 */
1738 max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1,
1739 dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301740
1741 status = ocrdma_build_q_conf(&max_wqe_allocated,
1742 dev->attr.wqe_size, &hw_pages, &hw_page_size);
1743 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001744 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
1745 max_wqe_allocated);
Parav Panditfe2caef2012-03-21 04:09:06 +05301746 return -EINVAL;
1747 }
1748 qp->sq.max_cnt = max_wqe_allocated;
1749 len = (hw_pages * hw_page_size);
1750
1751 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1752 if (!qp->sq.va)
1753 return -EINVAL;
1754 memset(qp->sq.va, 0, len);
1755 qp->sq.len = len;
1756 qp->sq.pa = pa;
1757 qp->sq.entry_size = dev->attr.wqe_size;
1758 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
1759
1760 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
1761 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
1762 cmd->num_wq_rq_pages |= (hw_pages <<
1763 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
1764 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
1765 cmd->max_sge_send_write |= (max_sges <<
1766 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
1767 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
1768 cmd->max_sge_send_write |= (max_sges <<
1769 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
1770 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
1771 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
1772 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
1773 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
1774 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
1775 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
1776 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
1777 return 0;
1778}
1779
1780static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
1781 struct ib_qp_init_attr *attrs,
1782 struct ocrdma_qp *qp)
1783{
1784 int status;
1785 u32 len, hw_pages, hw_page_size;
1786 dma_addr_t pa = 0;
1787 struct ocrdma_dev *dev = qp->dev;
1788 struct pci_dev *pdev = dev->nic_info.pdev;
1789 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
1790
1791 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
1792 &hw_pages, &hw_page_size);
1793 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001794 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
1795 attrs->cap.max_recv_wr + 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05301796 return status;
1797 }
1798 qp->rq.max_cnt = max_rqe_allocated;
1799 len = (hw_pages * hw_page_size);
1800
1801 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1802 if (!qp->rq.va)
Wei Yongjunc94e15c2013-06-23 09:07:19 +08001803 return -ENOMEM;
Parav Panditfe2caef2012-03-21 04:09:06 +05301804 memset(qp->rq.va, 0, len);
1805 qp->rq.pa = pa;
1806 qp->rq.len = len;
1807 qp->rq.entry_size = dev->attr.rqe_size;
1808
1809 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
1810 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1811 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
1812 cmd->num_wq_rq_pages |=
1813 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
1814 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
1815 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
1816 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
1817 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
1818 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
1819 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
1820 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
1821 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
1822 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
1823 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
1824 return 0;
1825}
1826
1827static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
1828 struct ocrdma_pd *pd,
1829 struct ocrdma_qp *qp,
1830 u8 enable_dpp_cq, u16 dpp_cq_id)
1831{
1832 pd->num_dpp_qp--;
1833 qp->dpp_enabled = true;
1834 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
1835 if (!enable_dpp_cq)
1836 return;
1837 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
1838 cmd->dpp_credits_cqid = dpp_cq_id;
1839 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
1840 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
1841}
1842
1843static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
1844 struct ocrdma_qp *qp)
1845{
1846 struct ocrdma_dev *dev = qp->dev;
1847 struct pci_dev *pdev = dev->nic_info.pdev;
1848 dma_addr_t pa = 0;
1849 int ird_page_size = dev->attr.ird_page_size;
1850 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301851 struct ocrdma_hdr_wqe *rqe;
1852 int i = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301853
1854 if (dev->attr.ird == 0)
1855 return 0;
1856
1857 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
1858 &pa, GFP_KERNEL);
1859 if (!qp->ird_q_va)
1860 return -ENOMEM;
1861 memset(qp->ird_q_va, 0, ird_q_len);
1862 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
1863 pa, ird_page_size);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301864 for (; i < ird_q_len / dev->attr.rqe_size; i++) {
1865 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
1866 (i * dev->attr.rqe_size));
1867 rqe->cw = 0;
1868 rqe->cw |= 2;
1869 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1870 rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
1871 rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
1872 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301873 return 0;
1874}
1875
1876static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
1877 struct ocrdma_qp *qp,
1878 struct ib_qp_init_attr *attrs,
1879 u16 *dpp_offset, u16 *dpp_credit_lmt)
1880{
1881 u32 max_wqe_allocated, max_rqe_allocated;
1882 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
1883 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
1884 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
1885 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
1886 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
1887 qp->dpp_enabled = false;
1888 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
1889 qp->dpp_enabled = true;
1890 *dpp_credit_lmt = (rsp->dpp_response &
1891 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
1892 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
1893 *dpp_offset = (rsp->dpp_response &
1894 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
1895 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
1896 }
1897 max_wqe_allocated =
1898 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
1899 max_wqe_allocated = 1 << max_wqe_allocated;
1900 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
1901
Parav Panditfe2caef2012-03-21 04:09:06 +05301902 qp->sq.max_cnt = max_wqe_allocated;
1903 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
1904
1905 if (!attrs->srq) {
1906 qp->rq.max_cnt = max_rqe_allocated;
1907 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301908 }
1909}
1910
1911int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1912 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
1913 u16 *dpp_credit_lmt)
1914{
1915 int status = -ENOMEM;
1916 u32 flags = 0;
1917 struct ocrdma_dev *dev = qp->dev;
1918 struct ocrdma_pd *pd = qp->pd;
1919 struct pci_dev *pdev = dev->nic_info.pdev;
1920 struct ocrdma_cq *cq;
1921 struct ocrdma_create_qp_req *cmd;
1922 struct ocrdma_create_qp_rsp *rsp;
1923 int qptype;
1924
1925 switch (attrs->qp_type) {
1926 case IB_QPT_GSI:
1927 qptype = OCRDMA_QPT_GSI;
1928 break;
1929 case IB_QPT_RC:
1930 qptype = OCRDMA_QPT_RC;
1931 break;
1932 case IB_QPT_UD:
1933 qptype = OCRDMA_QPT_UD;
1934 break;
1935 default:
1936 return -EINVAL;
1937 };
1938
1939 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
1940 if (!cmd)
1941 return status;
1942 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
1943 OCRDMA_CREATE_QP_REQ_QPT_MASK;
1944 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
1945 if (status)
1946 goto sq_err;
1947
1948 if (attrs->srq) {
1949 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
1950 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
1951 cmd->rq_addr[0].lo = srq->id;
1952 qp->srq = srq;
1953 } else {
1954 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
1955 if (status)
1956 goto rq_err;
1957 }
1958
1959 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
1960 if (status)
1961 goto mbx_err;
1962
1963 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
1964 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
1965
1966 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
1967
1968 cmd->max_sge_recv_flags |= flags;
1969 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
1970 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
1971 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
1972 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
1973 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
1974 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
1975 cq = get_ocrdma_cq(attrs->send_cq);
1976 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
1977 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
1978 qp->sq_cq = cq;
1979 cq = get_ocrdma_cq(attrs->recv_cq);
1980 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
1981 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
1982 qp->rq_cq = cq;
1983
1984 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301985 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301986 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
1987 dpp_cq_id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301988 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301989
1990 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1991 if (status)
1992 goto mbx_err;
1993 rsp = (struct ocrdma_create_qp_rsp *)cmd;
1994 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
1995 qp->state = OCRDMA_QPS_RST;
1996 kfree(cmd);
1997 return 0;
1998mbx_err:
1999 if (qp->rq.va)
2000 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2001rq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002002 pr_err("%s(%d) rq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302003 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2004sq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002005 pr_err("%s(%d) sq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302006 kfree(cmd);
2007 return status;
2008}
2009
2010int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2011 struct ocrdma_qp_params *param)
2012{
2013 int status = -ENOMEM;
2014 struct ocrdma_query_qp *cmd;
2015 struct ocrdma_query_qp_rsp *rsp;
2016
2017 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
2018 if (!cmd)
2019 return status;
2020 cmd->qp_id = qp->id;
2021 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2022 if (status)
2023 goto mbx_err;
2024 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2025 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2026mbx_err:
2027 kfree(cmd);
2028 return status;
2029}
2030
2031int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
2032 u8 *mac_addr)
2033{
2034 struct in6_addr in6;
2035
2036 memcpy(&in6, dgid, sizeof in6);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302037 if (rdma_is_multicast_addr(&in6)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302038 rdma_get_mcast_mac(&in6, mac_addr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302039 } else if (rdma_link_local_addr(&in6)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302040 rdma_get_ll_mac(&in6, mac_addr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302041 } else {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002042 pr_err("%s() fail to resolve mac_addr.\n", __func__);
Parav Panditfe2caef2012-03-21 04:09:06 +05302043 return -EINVAL;
2044 }
2045 return 0;
2046}
2047
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302048static int ocrdma_set_av_params(struct ocrdma_qp *qp,
Parav Panditfe2caef2012-03-21 04:09:06 +05302049 struct ocrdma_modify_qp *cmd,
2050 struct ib_qp_attr *attrs)
2051{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302052 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302053 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302054 union ib_gid sgid, zgid;
Parav Panditfe2caef2012-03-21 04:09:06 +05302055 u32 vlan_id;
2056 u8 mac_addr[6];
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302057
Parav Panditfe2caef2012-03-21 04:09:06 +05302058 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302059 return -EINVAL;
Parav Panditfe2caef2012-03-21 04:09:06 +05302060 cmd->params.tclass_sq_psn |=
2061 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2062 cmd->params.rnt_rc_sl_fl |=
2063 (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2064 cmd->params.hop_lmt_rq_psn |=
2065 (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2066 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2067 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2068 sizeof(cmd->params.dgid));
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302069 status = ocrdma_query_gid(&qp->dev->ibdev, 1,
Parav Panditfe2caef2012-03-21 04:09:06 +05302070 ah_attr->grh.sgid_index, &sgid);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302071 if (status)
2072 return status;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302073
2074 memset(&zgid, 0, sizeof(zgid));
2075 if (!memcmp(&sgid, &zgid, sizeof(zgid)))
2076 return -EINVAL;
2077
Parav Panditfe2caef2012-03-21 04:09:06 +05302078 qp->sgid_idx = ah_attr->grh.sgid_index;
2079 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2080 ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
2081 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2082 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2083 /* convert them to LE format. */
2084 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2085 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2086 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2087 vlan_id = rdma_get_vlan_id(&sgid);
2088 if (vlan_id && (vlan_id < 0x1000)) {
2089 cmd->params.vlan_dmac_b4_to_b5 |=
2090 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2091 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2092 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302093 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302094}
2095
2096static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2097 struct ocrdma_modify_qp *cmd,
2098 struct ib_qp_attr *attrs, int attr_mask,
2099 enum ib_qp_state old_qps)
2100{
2101 int status = 0;
2102 struct net_device *netdev = qp->dev->nic_info.netdev;
2103 int eth_mtu = iboe_get_mtu(netdev->mtu);
2104
2105 if (attr_mask & IB_QP_PKEY_INDEX) {
2106 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2107 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2108 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2109 }
2110 if (attr_mask & IB_QP_QKEY) {
2111 qp->qkey = attrs->qkey;
2112 cmd->params.qkey = attrs->qkey;
2113 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2114 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302115 if (attr_mask & IB_QP_AV) {
2116 status = ocrdma_set_av_params(qp, cmd, attrs);
2117 if (status)
2118 return status;
2119 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302120 /* set the default mac address for UD, GSI QPs */
2121 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
2122 (qp->dev->nic_info.mac_addr[1] << 8) |
2123 (qp->dev->nic_info.mac_addr[2] << 16) |
2124 (qp->dev->nic_info.mac_addr[3] << 24);
2125 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
2126 (qp->dev->nic_info.mac_addr[5] << 8);
2127 }
2128 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2129 attrs->en_sqd_async_notify) {
2130 cmd->params.max_sge_recv_flags |=
2131 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2132 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2133 }
2134 if (attr_mask & IB_QP_DEST_QPN) {
2135 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2136 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2137 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2138 }
2139 if (attr_mask & IB_QP_PATH_MTU) {
2140 if (ib_mtu_enum_to_int(eth_mtu) <
2141 ib_mtu_enum_to_int(attrs->path_mtu)) {
2142 status = -EINVAL;
2143 goto pmtu_err;
2144 }
2145 cmd->params.path_mtu_pkey_indx |=
2146 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2147 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2148 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2149 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2150 }
2151 if (attr_mask & IB_QP_TIMEOUT) {
2152 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2153 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2154 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2155 }
2156 if (attr_mask & IB_QP_RETRY_CNT) {
2157 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2158 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2159 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2160 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2161 }
2162 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2163 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2164 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2165 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2166 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2167 }
2168 if (attr_mask & IB_QP_RNR_RETRY) {
2169 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2170 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2171 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2172 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2173 }
2174 if (attr_mask & IB_QP_SQ_PSN) {
2175 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2176 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2177 }
2178 if (attr_mask & IB_QP_RQ_PSN) {
2179 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2180 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2181 }
2182 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2183 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
2184 status = -EINVAL;
2185 goto pmtu_err;
2186 }
2187 qp->max_ord = attrs->max_rd_atomic;
2188 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2189 }
2190 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2191 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
2192 status = -EINVAL;
2193 goto pmtu_err;
2194 }
2195 qp->max_ird = attrs->max_dest_rd_atomic;
2196 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2197 }
2198 cmd->params.max_ord_ird = (qp->max_ord <<
2199 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2200 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2201pmtu_err:
2202 return status;
2203}
2204
2205int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2206 struct ib_qp_attr *attrs, int attr_mask,
2207 enum ib_qp_state old_qps)
2208{
2209 int status = -ENOMEM;
2210 struct ocrdma_modify_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302211
2212 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2213 if (!cmd)
2214 return status;
2215
2216 cmd->params.id = qp->id;
2217 cmd->flags = 0;
2218 if (attr_mask & IB_QP_STATE) {
2219 cmd->params.max_sge_recv_flags |=
2220 (get_ocrdma_qp_state(attrs->qp_state) <<
2221 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2222 OCRDMA_QP_PARAMS_STATE_MASK;
2223 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302224 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302225 cmd->params.max_sge_recv_flags |=
2226 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2227 OCRDMA_QP_PARAMS_STATE_MASK;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302228 }
2229
Parav Panditfe2caef2012-03-21 04:09:06 +05302230 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
2231 if (status)
2232 goto mbx_err;
2233 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2234 if (status)
2235 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002236
Parav Panditfe2caef2012-03-21 04:09:06 +05302237mbx_err:
2238 kfree(cmd);
2239 return status;
2240}
2241
2242int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2243{
2244 int status = -ENOMEM;
2245 struct ocrdma_destroy_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302246 struct pci_dev *pdev = dev->nic_info.pdev;
2247
2248 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2249 if (!cmd)
2250 return status;
2251 cmd->qp_id = qp->id;
2252 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2253 if (status)
2254 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002255
Parav Panditfe2caef2012-03-21 04:09:06 +05302256mbx_err:
2257 kfree(cmd);
2258 if (qp->sq.va)
2259 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2260 if (!qp->srq && qp->rq.va)
2261 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2262 if (qp->dpp_enabled)
2263 qp->pd->num_dpp_qp++;
2264 return status;
2265}
2266
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302267int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
Parav Panditfe2caef2012-03-21 04:09:06 +05302268 struct ib_srq_init_attr *srq_attr,
2269 struct ocrdma_pd *pd)
2270{
2271 int status = -ENOMEM;
2272 int hw_pages, hw_page_size;
2273 int len;
2274 struct ocrdma_create_srq_rsp *rsp;
2275 struct ocrdma_create_srq *cmd;
2276 dma_addr_t pa;
Parav Panditfe2caef2012-03-21 04:09:06 +05302277 struct pci_dev *pdev = dev->nic_info.pdev;
2278 u32 max_rqe_allocated;
2279
2280 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2281 if (!cmd)
2282 return status;
2283
2284 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2285 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2286 status = ocrdma_build_q_conf(&max_rqe_allocated,
2287 dev->attr.rqe_size,
2288 &hw_pages, &hw_page_size);
2289 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002290 pr_err("%s() req. max_wr=0x%x\n", __func__,
2291 srq_attr->attr.max_wr);
Parav Panditfe2caef2012-03-21 04:09:06 +05302292 status = -EINVAL;
2293 goto ret;
2294 }
2295 len = hw_pages * hw_page_size;
2296 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2297 if (!srq->rq.va) {
2298 status = -ENOMEM;
2299 goto ret;
2300 }
2301 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2302
2303 srq->rq.entry_size = dev->attr.rqe_size;
2304 srq->rq.pa = pa;
2305 srq->rq.len = len;
2306 srq->rq.max_cnt = max_rqe_allocated;
2307
2308 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2309 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2310 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2311
2312 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2313 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2314 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2315 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2316 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2317 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2318
2319 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2320 if (status)
2321 goto mbx_err;
2322 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2323 srq->id = rsp->id;
2324 srq->rq.dbid = rsp->id;
2325 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2326 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2327 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2328 max_rqe_allocated = (1 << max_rqe_allocated);
2329 srq->rq.max_cnt = max_rqe_allocated;
2330 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2331 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2332 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2333 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2334 goto ret;
2335mbx_err:
2336 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2337ret:
2338 kfree(cmd);
2339 return status;
2340}
2341
2342int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2343{
2344 int status = -ENOMEM;
2345 struct ocrdma_modify_srq *cmd;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302346 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2347
Parav Panditfe2caef2012-03-21 04:09:06 +05302348 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2349 if (!cmd)
2350 return status;
2351 cmd->id = srq->id;
2352 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2353 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302354 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302355 kfree(cmd);
2356 return status;
2357}
2358
2359int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2360{
2361 int status = -ENOMEM;
2362 struct ocrdma_query_srq *cmd;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302363 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2364
Parav Panditfe2caef2012-03-21 04:09:06 +05302365 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2366 if (!cmd)
2367 return status;
2368 cmd->id = srq->rq.dbid;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302369 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302370 if (status == 0) {
2371 struct ocrdma_query_srq_rsp *rsp =
2372 (struct ocrdma_query_srq_rsp *)cmd;
2373 srq_attr->max_sge =
2374 rsp->srq_lmt_max_sge &
2375 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2376 srq_attr->max_wr =
2377 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2378 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2379 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2380 }
2381 kfree(cmd);
2382 return status;
2383}
2384
2385int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2386{
2387 int status = -ENOMEM;
2388 struct ocrdma_destroy_srq *cmd;
2389 struct pci_dev *pdev = dev->nic_info.pdev;
2390 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2391 if (!cmd)
2392 return status;
2393 cmd->id = srq->id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302394 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302395 if (srq->rq.va)
2396 dma_free_coherent(&pdev->dev, srq->rq.len,
2397 srq->rq.va, srq->rq.pa);
2398 kfree(cmd);
2399 return status;
2400}
2401
2402int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2403{
2404 int i;
2405 int status = -EINVAL;
2406 struct ocrdma_av *av;
2407 unsigned long flags;
2408
2409 av = dev->av_tbl.va;
2410 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2411 for (i = 0; i < dev->av_tbl.num_ah; i++) {
2412 if (av->valid == 0) {
2413 av->valid = OCRDMA_AV_VALID;
2414 ah->av = av;
2415 ah->id = i;
2416 status = 0;
2417 break;
2418 }
2419 av++;
2420 }
2421 if (i == dev->av_tbl.num_ah)
2422 status = -EAGAIN;
2423 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2424 return status;
2425}
2426
2427int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2428{
2429 unsigned long flags;
2430 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2431 ah->av->valid = 0;
2432 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2433 return 0;
2434}
2435
2436static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
2437{
2438 int status;
2439 int irq;
2440 unsigned long flags = 0;
2441 int num_eq = 0;
2442
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302443 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302444 flags = IRQF_SHARED;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302445 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302446 num_eq = dev->nic_info.msix.num_vectors -
2447 dev->nic_info.msix.start_vector;
2448 /* minimum two vectors/eq are required for rdma to work.
2449 * one for control path and one for data path.
2450 */
2451 if (num_eq < 2)
2452 return -EBUSY;
2453 }
2454
2455 status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
2456 if (status)
2457 return status;
2458 sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
2459 irq = ocrdma_get_irq(dev, &dev->meq);
2460 status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
2461 &dev->meq);
2462 if (status)
2463 _ocrdma_destroy_eq(dev, &dev->meq);
2464 return status;
2465}
2466
2467static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
2468{
Roland Dreierda496432012-04-16 11:32:17 -07002469 int num_eq, i, status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302470 int irq;
2471 unsigned long flags = 0;
2472
2473 num_eq = dev->nic_info.msix.num_vectors -
2474 dev->nic_info.msix.start_vector;
2475 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2476 num_eq = 1;
2477 flags = IRQF_SHARED;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302478 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302479 num_eq = min_t(u32, num_eq, num_online_cpus());
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302480 }
2481
Parav Panditfe2caef2012-03-21 04:09:06 +05302482 dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
2483 if (!dev->qp_eq_tbl)
2484 return -ENOMEM;
2485
2486 for (i = 0; i < num_eq; i++) {
2487 status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
2488 OCRDMA_EQ_LEN);
2489 if (status) {
2490 status = -EINVAL;
2491 break;
2492 }
2493 sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
2494 dev->id, i);
2495 irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
2496 status = request_irq(irq, ocrdma_irq_handler, flags,
2497 dev->qp_eq_tbl[i].irq_name,
2498 &dev->qp_eq_tbl[i]);
2499 if (status) {
2500 _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
2501 status = -EINVAL;
2502 break;
2503 }
2504 dev->eq_cnt += 1;
2505 }
2506 /* one eq is sufficient for data path to work */
2507 if (dev->eq_cnt >= 1)
2508 return 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302509 ocrdma_destroy_qp_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05302510 return status;
2511}
2512
2513int ocrdma_init_hw(struct ocrdma_dev *dev)
2514{
2515 int status;
2516 /* set up control path eq */
2517 status = ocrdma_create_mq_eq(dev);
2518 if (status)
2519 return status;
2520 /* set up data path eq */
2521 status = ocrdma_create_qp_eqs(dev);
2522 if (status)
2523 goto qpeq_err;
2524 status = ocrdma_create_mq(dev);
2525 if (status)
2526 goto mq_err;
2527 status = ocrdma_mbx_query_fw_config(dev);
2528 if (status)
2529 goto conf_err;
2530 status = ocrdma_mbx_query_dev(dev);
2531 if (status)
2532 goto conf_err;
2533 status = ocrdma_mbx_query_fw_ver(dev);
2534 if (status)
2535 goto conf_err;
2536 status = ocrdma_mbx_create_ah_tbl(dev);
2537 if (status)
2538 goto conf_err;
2539 return 0;
2540
2541conf_err:
2542 ocrdma_destroy_mq(dev);
2543mq_err:
2544 ocrdma_destroy_qp_eqs(dev);
2545qpeq_err:
2546 ocrdma_destroy_eq(dev, &dev->meq);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002547 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05302548 return status;
2549}
2550
2551void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2552{
2553 ocrdma_mbx_delete_ah_tbl(dev);
2554
2555 /* cleanup the data path eqs */
2556 ocrdma_destroy_qp_eqs(dev);
2557
2558 /* cleanup the control path */
2559 ocrdma_destroy_mq(dev);
2560 ocrdma_destroy_eq(dev, &dev->meq);
2561}