blob: af01ba202d40fff1cbfbb2152d726cb2abda39fb [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/sched.h>
29#include <linux/interrupt.h>
30#include <linux/log2.h>
31#include <linux/dma-mapping.h>
32
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_user_verbs.h>
35#include <rdma/ib_addr.h>
36
37#include "ocrdma.h"
38#include "ocrdma_hw.h"
39#include "ocrdma_verbs.h"
40#include "ocrdma_ah.h"
41
42enum mbx_status {
43 OCRDMA_MBX_STATUS_FAILED = 1,
44 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
45 OCRDMA_MBX_STATUS_OOR = 100,
46 OCRDMA_MBX_STATUS_INVALID_PD = 101,
47 OCRDMA_MBX_STATUS_PD_INUSE = 102,
48 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
49 OCRDMA_MBX_STATUS_INVALID_QP = 104,
50 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
51 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
52 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
53 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
54 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
55 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
56 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
57 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
58 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
59 OCRDMA_MBX_STATUS_MW_BOUND = 114,
60 OCRDMA_MBX_STATUS_INVALID_VA = 115,
61 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
62 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
63 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
64 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
65 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
66 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
67 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
68 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
69 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
70 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
71 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
72 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
73 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
74 OCRDMA_MBX_STATUS_QP_BOUND = 130,
75 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
76 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
77 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
78 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
79 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
80 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
81};
82
83enum additional_status {
84 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
85};
86
87enum cqe_status {
88 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
89 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
90 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
91 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
92 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
93};
94
95static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
96{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +053097 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
Parav Panditfe2caef2012-03-21 04:09:06 +053098}
99
100static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
101{
102 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
103}
104
105static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
106{
107 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530108 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
Parav Panditfe2caef2012-03-21 04:09:06 +0530109
110 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
111 return NULL;
112 return cqe;
113}
114
115static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
116{
117 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
118}
119
120static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
121{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530122 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530123}
124
125static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
126{
127 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +0530128}
129
130static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
131{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530132 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
Parav Panditfe2caef2012-03-21 04:09:06 +0530133}
134
135enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
136{
137 switch (qps) {
138 case OCRDMA_QPS_RST:
139 return IB_QPS_RESET;
140 case OCRDMA_QPS_INIT:
141 return IB_QPS_INIT;
142 case OCRDMA_QPS_RTR:
143 return IB_QPS_RTR;
144 case OCRDMA_QPS_RTS:
145 return IB_QPS_RTS;
146 case OCRDMA_QPS_SQD:
147 case OCRDMA_QPS_SQ_DRAINING:
148 return IB_QPS_SQD;
149 case OCRDMA_QPS_SQE:
150 return IB_QPS_SQE;
151 case OCRDMA_QPS_ERR:
152 return IB_QPS_ERR;
153 };
154 return IB_QPS_ERR;
155}
156
Roland Dreierabe3afa2012-04-16 11:36:29 -0700157static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
Parav Panditfe2caef2012-03-21 04:09:06 +0530158{
159 switch (qps) {
160 case IB_QPS_RESET:
161 return OCRDMA_QPS_RST;
162 case IB_QPS_INIT:
163 return OCRDMA_QPS_INIT;
164 case IB_QPS_RTR:
165 return OCRDMA_QPS_RTR;
166 case IB_QPS_RTS:
167 return OCRDMA_QPS_RTS;
168 case IB_QPS_SQD:
169 return OCRDMA_QPS_SQD;
170 case IB_QPS_SQE:
171 return OCRDMA_QPS_SQE;
172 case IB_QPS_ERR:
173 return OCRDMA_QPS_ERR;
174 };
175 return OCRDMA_QPS_ERR;
176}
177
178static int ocrdma_get_mbx_errno(u32 status)
179{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530180 int err_num;
Parav Panditfe2caef2012-03-21 04:09:06 +0530181 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
182 OCRDMA_MBX_RSP_STATUS_SHIFT;
183 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
184 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
185
186 switch (mbox_status) {
187 case OCRDMA_MBX_STATUS_OOR:
188 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
189 err_num = -EAGAIN;
190 break;
191
192 case OCRDMA_MBX_STATUS_INVALID_PD:
193 case OCRDMA_MBX_STATUS_INVALID_CQ:
194 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
195 case OCRDMA_MBX_STATUS_INVALID_QP:
196 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
197 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
198 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
199 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
200 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
201 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
202 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
203 case OCRDMA_MBX_STATUS_INVALID_LKEY:
204 case OCRDMA_MBX_STATUS_INVALID_VA:
205 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
206 case OCRDMA_MBX_STATUS_INVALID_FBO:
207 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
208 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
209 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
210 case OCRDMA_MBX_STATUS_SRQ_ERROR:
211 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
212 err_num = -EINVAL;
213 break;
214
215 case OCRDMA_MBX_STATUS_PD_INUSE:
216 case OCRDMA_MBX_STATUS_QP_BOUND:
217 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
218 case OCRDMA_MBX_STATUS_MW_BOUND:
219 err_num = -EBUSY;
220 break;
221
222 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
223 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
224 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
225 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
226 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
227 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
228 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
229 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
230 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
231 err_num = -ENOBUFS;
232 break;
233
234 case OCRDMA_MBX_STATUS_FAILED:
235 switch (add_status) {
236 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
237 err_num = -EAGAIN;
238 break;
239 }
240 default:
241 err_num = -EFAULT;
242 }
243 return err_num;
244}
245
246static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
247{
248 int err_num = -EINVAL;
249
250 switch (cqe_status) {
251 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
252 err_num = -EPERM;
253 break;
254 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
255 err_num = -EINVAL;
256 break;
257 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
258 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
259 err_num = -EAGAIN;
260 break;
261 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
262 err_num = -EIO;
263 break;
264 }
265 return err_num;
266}
267
268void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
269 bool solicited, u16 cqe_popped)
270{
271 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
272
273 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
274 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
275
276 if (armed)
277 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
278 if (solicited)
279 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
280 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
281 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
282}
283
284static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
285{
286 u32 val = 0;
287
288 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
289 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
290 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
291}
292
293static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
294 bool arm, bool clear_int, u16 num_eqe)
295{
296 u32 val = 0;
297
298 val |= eq_id & OCRDMA_EQ_ID_MASK;
299 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
300 if (arm)
301 val |= (1 << OCRDMA_REARM_SHIFT);
302 if (clear_int)
303 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
304 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
305 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
306 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
307}
308
309static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
310 u8 opcode, u8 subsys, u32 cmd_len)
311{
312 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
313 cmd_hdr->timeout = 20; /* seconds */
314 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
315}
316
317static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
318{
319 struct ocrdma_mqe *mqe;
320
321 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
322 if (!mqe)
323 return NULL;
324 mqe->hdr.spcl_sge_cnt_emb |=
325 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
326 OCRDMA_MQE_HDR_EMB_MASK;
327 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
328
329 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
330 mqe->hdr.pyld_len);
331 return mqe;
332}
333
334static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
335{
336 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
337}
338
339static int ocrdma_alloc_q(struct ocrdma_dev *dev,
340 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
341{
342 memset(q, 0, sizeof(*q));
343 q->len = len;
344 q->entry_size = entry_size;
345 q->size = len * entry_size;
346 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
347 &q->dma, GFP_KERNEL);
348 if (!q->va)
349 return -ENOMEM;
350 memset(q->va, 0, q->size);
351 return 0;
352}
353
354static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
355 dma_addr_t host_pa, int hw_page_size)
356{
357 int i;
358
359 for (i = 0; i < cnt; i++) {
360 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
361 q_pa[i].hi = (u32) upper_32_bits(host_pa);
362 host_pa += hw_page_size;
363 }
364}
365
366static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
367 struct ocrdma_eq *eq)
368{
369 /* assign vector and update vector id for next EQ */
370 eq->vector = dev->nic_info.msix.start_vector;
371 dev->nic_info.msix.start_vector += 1;
372}
373
374static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
375{
376 /* this assumes that EQs are freed in exactly reverse order
377 * as its allocation.
378 */
379 dev->nic_info.msix.start_vector -= 1;
380}
381
Roland Dreierabe3afa2012-04-16 11:36:29 -0700382static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
383 int queue_type)
Parav Panditfe2caef2012-03-21 04:09:06 +0530384{
385 u8 opcode = 0;
386 int status;
387 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
388
389 switch (queue_type) {
390 case QTYPE_MCCQ:
391 opcode = OCRDMA_CMD_DELETE_MQ;
392 break;
393 case QTYPE_CQ:
394 opcode = OCRDMA_CMD_DELETE_CQ;
395 break;
396 case QTYPE_EQ:
397 opcode = OCRDMA_CMD_DELETE_EQ;
398 break;
399 default:
400 BUG();
401 }
402 memset(cmd, 0, sizeof(*cmd));
403 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
404 cmd->id = q->id;
405
406 status = be_roce_mcc_cmd(dev->nic_info.netdev,
407 cmd, sizeof(*cmd), NULL, NULL);
408 if (!status)
409 q->created = false;
410 return status;
411}
412
413static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
414{
415 int status;
416 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
417 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
418
419 memset(cmd, 0, sizeof(*cmd));
420 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
421 sizeof(*cmd));
422 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
423 cmd->req.rsvd_version = 0;
424 else
425 cmd->req.rsvd_version = 2;
426
427 cmd->num_pages = 4;
428 cmd->valid = OCRDMA_CREATE_EQ_VALID;
429 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
430
431 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
432 PAGE_SIZE_4K);
433 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
434 NULL);
435 if (!status) {
436 eq->q.id = rsp->vector_eqid & 0xffff;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530437 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530438 ocrdma_assign_eq_vect_gen2(dev, eq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530439 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530440 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
441 dev->nic_info.msix.start_vector += 1;
442 }
443 eq->q.created = true;
444 }
445 return status;
446}
447
448static int ocrdma_create_eq(struct ocrdma_dev *dev,
449 struct ocrdma_eq *eq, u16 q_len)
450{
451 int status;
452
453 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
454 sizeof(struct ocrdma_eqe));
455 if (status)
456 return status;
457
458 status = ocrdma_mbx_create_eq(dev, eq);
459 if (status)
460 goto mbx_err;
461 eq->dev = dev;
462 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
463
464 return 0;
465mbx_err:
466 ocrdma_free_q(dev, &eq->q);
467 return status;
468}
469
470static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
471{
472 int irq;
473
474 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
475 irq = dev->nic_info.pdev->irq;
476 else
477 irq = dev->nic_info.msix.vector_list[eq->vector];
478 return irq;
479}
480
481static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
482{
483 if (eq->q.created) {
484 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
485 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
486 ocrdma_free_eq_vect_gen2(dev);
487 ocrdma_free_q(dev, &eq->q);
488 }
489}
490
491static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
492{
493 int irq;
494
495 /* disarm EQ so that interrupts are not generated
496 * during freeing and EQ delete is in progress.
497 */
498 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
499
500 irq = ocrdma_get_irq(dev, eq);
501 free_irq(irq, eq);
502 _ocrdma_destroy_eq(dev, eq);
503}
504
505static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
506{
507 int i;
508
509 /* deallocate the data path eqs */
510 for (i = 0; i < dev->eq_cnt; i++)
511 ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
512}
513
Roland Dreierabe3afa2012-04-16 11:36:29 -0700514static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
515 struct ocrdma_queue_info *cq,
516 struct ocrdma_queue_info *eq)
Parav Panditfe2caef2012-03-21 04:09:06 +0530517{
518 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
519 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
520 int status;
521
522 memset(cmd, 0, sizeof(*cmd));
523 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
524 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
525
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530526 cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
527 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
528 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
529 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
Parav Panditfe2caef2012-03-21 04:09:06 +0530530
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530531 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
532 cmd->eqn = eq->id;
533 cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
534
535 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
Parav Panditfe2caef2012-03-21 04:09:06 +0530536 cq->dma, PAGE_SIZE_4K);
537 status = be_roce_mcc_cmd(dev->nic_info.netdev,
538 cmd, sizeof(*cmd), NULL, NULL);
539 if (!status) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530540 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +0530541 cq->created = true;
542 }
543 return status;
544}
545
546static u32 ocrdma_encoded_q_len(int q_len)
547{
548 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
549
550 if (len_encoded == 16)
551 len_encoded = 0;
552 return len_encoded;
553}
554
555static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
556 struct ocrdma_queue_info *mq,
557 struct ocrdma_queue_info *cq)
558{
559 int num_pages, status;
560 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
561 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
562 struct ocrdma_pa *pa;
563
564 memset(cmd, 0, sizeof(*cmd));
565 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
566
Naresh Gottumukkalab1d58b92013-06-10 04:42:38 +0000567 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
568 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
569 cmd->req.rsvd_version = 1;
570 cmd->cqid_pages = num_pages;
571 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
572 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
573 cmd->async_event_bitmap = Bit(20);
574 cmd->async_cqid_ringsize = cq->id;
575 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
576 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
577 cmd->valid = OCRDMA_CREATE_MQ_VALID;
578 pa = &cmd->pa[0];
579
Parav Panditfe2caef2012-03-21 04:09:06 +0530580 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
581 status = be_roce_mcc_cmd(dev->nic_info.netdev,
582 cmd, sizeof(*cmd), NULL, NULL);
583 if (!status) {
584 mq->id = rsp->id;
585 mq->created = true;
586 }
587 return status;
588}
589
590static int ocrdma_create_mq(struct ocrdma_dev *dev)
591{
592 int status;
593
594 /* Alloc completion queue for Mailbox queue */
595 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
596 sizeof(struct ocrdma_mcqe));
597 if (status)
598 goto alloc_err;
599
600 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
601 if (status)
602 goto mbx_cq_free;
603
604 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
605 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
606 mutex_init(&dev->mqe_ctx.lock);
607
608 /* Alloc Mailbox queue */
609 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
610 sizeof(struct ocrdma_mqe));
611 if (status)
612 goto mbx_cq_destroy;
613 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
614 if (status)
615 goto mbx_q_free;
616 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
617 return 0;
618
619mbx_q_free:
620 ocrdma_free_q(dev, &dev->mq.sq);
621mbx_cq_destroy:
622 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
623mbx_cq_free:
624 ocrdma_free_q(dev, &dev->mq.cq);
625alloc_err:
626 return status;
627}
628
629static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
630{
631 struct ocrdma_queue_info *mbxq, *cq;
632
633 /* mqe_ctx lock synchronizes with any other pending cmds. */
634 mutex_lock(&dev->mqe_ctx.lock);
635 mbxq = &dev->mq.sq;
636 if (mbxq->created) {
637 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
638 ocrdma_free_q(dev, mbxq);
639 }
640 mutex_unlock(&dev->mqe_ctx.lock);
641
642 cq = &dev->mq.cq;
643 if (cq->created) {
644 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
645 ocrdma_free_q(dev, cq);
646 }
647}
648
649static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
650 struct ocrdma_qp *qp)
651{
652 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
653 enum ib_qp_state old_ib_qps;
654
655 if (qp == NULL)
656 BUG();
657 ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
658}
659
660static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
661 struct ocrdma_ae_mcqe *cqe)
662{
663 struct ocrdma_qp *qp = NULL;
664 struct ocrdma_cq *cq = NULL;
Roland Dreiere9db2952012-04-16 12:13:24 -0700665 struct ib_event ib_evt;
Parav Panditfe2caef2012-03-21 04:09:06 +0530666 int cq_event = 0;
667 int qp_event = 1;
668 int srq_event = 0;
669 int dev_event = 0;
670 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
671 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
672
673 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
674 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
675 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
676 cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
677
Roland Dreiere9db2952012-04-16 12:13:24 -0700678 ib_evt.device = &dev->ibdev;
679
Parav Panditfe2caef2012-03-21 04:09:06 +0530680 switch (type) {
681 case OCRDMA_CQ_ERROR:
682 ib_evt.element.cq = &cq->ibcq;
683 ib_evt.event = IB_EVENT_CQ_ERR;
684 cq_event = 1;
685 qp_event = 0;
686 break;
687 case OCRDMA_CQ_OVERRUN_ERROR:
688 ib_evt.element.cq = &cq->ibcq;
689 ib_evt.event = IB_EVENT_CQ_ERR;
690 break;
691 case OCRDMA_CQ_QPCAT_ERROR:
692 ib_evt.element.qp = &qp->ibqp;
693 ib_evt.event = IB_EVENT_QP_FATAL;
694 ocrdma_process_qpcat_error(dev, qp);
695 break;
696 case OCRDMA_QP_ACCESS_ERROR:
697 ib_evt.element.qp = &qp->ibqp;
698 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
699 break;
700 case OCRDMA_QP_COMM_EST_EVENT:
701 ib_evt.element.qp = &qp->ibqp;
702 ib_evt.event = IB_EVENT_COMM_EST;
703 break;
704 case OCRDMA_SQ_DRAINED_EVENT:
705 ib_evt.element.qp = &qp->ibqp;
706 ib_evt.event = IB_EVENT_SQ_DRAINED;
707 break;
708 case OCRDMA_DEVICE_FATAL_EVENT:
709 ib_evt.element.port_num = 1;
710 ib_evt.event = IB_EVENT_DEVICE_FATAL;
711 qp_event = 0;
712 dev_event = 1;
713 break;
714 case OCRDMA_SRQCAT_ERROR:
715 ib_evt.element.srq = &qp->srq->ibsrq;
716 ib_evt.event = IB_EVENT_SRQ_ERR;
717 srq_event = 1;
718 qp_event = 0;
719 break;
720 case OCRDMA_SRQ_LIMIT_EVENT:
721 ib_evt.element.srq = &qp->srq->ibsrq;
Parav Pandit804eaf22012-05-23 21:11:17 +0530722 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
Parav Panditfe2caef2012-03-21 04:09:06 +0530723 srq_event = 1;
724 qp_event = 0;
725 break;
726 case OCRDMA_QP_LAST_WQE_EVENT:
727 ib_evt.element.qp = &qp->ibqp;
728 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
729 break;
730 default:
731 cq_event = 0;
732 qp_event = 0;
733 srq_event = 0;
734 dev_event = 0;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000735 pr_err("%s() unknown type=0x%x\n", __func__, type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530736 break;
737 }
738
739 if (qp_event) {
740 if (qp->ibqp.event_handler)
741 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
742 } else if (cq_event) {
743 if (cq->ibcq.event_handler)
744 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
745 } else if (srq_event) {
746 if (qp->srq->ibsrq.event_handler)
747 qp->srq->ibsrq.event_handler(&ib_evt,
748 qp->srq->ibsrq.
749 srq_context);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530750 } else if (dev_event) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530751 ib_dispatch_event(&ib_evt);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530752 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530753
754}
755
756static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
757{
758 /* async CQE processing */
759 struct ocrdma_ae_mcqe *cqe = ae_cqe;
760 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
761 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
762
763 if (evt_code == OCRDMA_ASYNC_EVE_CODE)
764 ocrdma_dispatch_ibevent(dev, cqe);
765 else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000766 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
767 dev->id, evt_code);
Parav Panditfe2caef2012-03-21 04:09:06 +0530768}
769
770static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
771{
772 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
773 dev->mqe_ctx.cqe_status = (cqe->status &
774 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
775 dev->mqe_ctx.ext_status =
776 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
777 >> OCRDMA_MCQE_ESTATUS_SHIFT;
778 dev->mqe_ctx.cmd_done = true;
779 wake_up(&dev->mqe_ctx.cmd_wait);
780 } else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000781 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
782 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
Parav Panditfe2caef2012-03-21 04:09:06 +0530783}
784
785static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
786{
787 u16 cqe_popped = 0;
788 struct ocrdma_mcqe *cqe;
789
790 while (1) {
791 cqe = ocrdma_get_mcqe(dev);
792 if (cqe == NULL)
793 break;
794 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
795 cqe_popped += 1;
796 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
797 ocrdma_process_acqe(dev, cqe);
798 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
799 ocrdma_process_mcqe(dev, cqe);
800 else
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000801 pr_err("%s() cqe->compl is not set.\n", __func__);
Parav Panditfe2caef2012-03-21 04:09:06 +0530802 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
803 ocrdma_mcq_inc_tail(dev);
804 }
805 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
806 return 0;
807}
808
809static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
810 struct ocrdma_cq *cq)
811{
812 unsigned long flags;
813 struct ocrdma_qp *qp;
814 bool buddy_cq_found = false;
815 /* Go through list of QPs in error state which are using this CQ
816 * and invoke its callback handler to trigger CQE processing for
817 * error/flushed CQE. It is rare to find more than few entries in
818 * this list as most consumers stops after getting error CQE.
819 * List is traversed only once when a matching buddy cq found for a QP.
820 */
821 spin_lock_irqsave(&dev->flush_q_lock, flags);
822 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
823 if (qp->srq)
824 continue;
825 /* if wq and rq share the same cq, than comp_handler
826 * is already invoked.
827 */
828 if (qp->sq_cq == qp->rq_cq)
829 continue;
830 /* if completion came on sq, rq's cq is buddy cq.
831 * if completion came on rq, sq's cq is buddy cq.
832 */
833 if (qp->sq_cq == cq)
834 cq = qp->rq_cq;
835 else
836 cq = qp->sq_cq;
837 buddy_cq_found = true;
838 break;
839 }
840 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
841 if (buddy_cq_found == false)
842 return;
843 if (cq->ibcq.comp_handler) {
844 spin_lock_irqsave(&cq->comp_handler_lock, flags);
845 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
846 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
847 }
848}
849
850static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
851{
852 unsigned long flags;
853 struct ocrdma_cq *cq;
854
855 if (cq_idx >= OCRDMA_MAX_CQ)
856 BUG();
857
858 cq = dev->cq_tbl[cq_idx];
859 if (cq == NULL) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000860 pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530861 return;
862 }
863 spin_lock_irqsave(&cq->cq_lock, flags);
864 cq->armed = false;
865 cq->solicited = false;
866 spin_unlock_irqrestore(&cq->cq_lock, flags);
867
868 ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
869
870 if (cq->ibcq.comp_handler) {
871 spin_lock_irqsave(&cq->comp_handler_lock, flags);
872 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
873 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
874 }
875 ocrdma_qp_buddy_cq_handler(dev, cq);
876}
877
878static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
879{
880 /* process the MQ-CQE. */
881 if (cq_id == dev->mq.cq.id)
882 ocrdma_mq_cq_handler(dev, cq_id);
883 else
884 ocrdma_qp_cq_handler(dev, cq_id);
885}
886
887static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
888{
889 struct ocrdma_eq *eq = handle;
890 struct ocrdma_dev *dev = eq->dev;
891 struct ocrdma_eqe eqe;
892 struct ocrdma_eqe *ptr;
893 u16 eqe_popped = 0;
894 u16 cq_id;
895 while (1) {
896 ptr = ocrdma_get_eqe(eq);
897 eqe = *ptr;
898 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
899 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
900 break;
901 eqe_popped += 1;
902 ptr->id_valid = 0;
903 /* check whether its CQE or not. */
904 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
905 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
906 ocrdma_cq_handler(dev, cq_id);
907 }
908 ocrdma_eq_inc_tail(eq);
909 }
910 ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
911 /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
912 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
913 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
914 return IRQ_HANDLED;
915}
916
917static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
918{
919 struct ocrdma_mqe *mqe;
920
921 dev->mqe_ctx.tag = dev->mq.sq.head;
922 dev->mqe_ctx.cmd_done = false;
923 mqe = ocrdma_get_mqe(dev);
924 cmd->hdr.tag_lo = dev->mq.sq.head;
925 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
926 /* make sure descriptor is written before ringing doorbell */
927 wmb();
928 ocrdma_mq_inc_head(dev);
929 ocrdma_ring_mq_db(dev);
930}
931
932static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
933{
934 long status;
935 /* 30 sec timeout */
936 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
937 (dev->mqe_ctx.cmd_done != false),
938 msecs_to_jiffies(30000));
939 if (status)
940 return 0;
941 else
942 return -1;
943}
944
945/* issue a mailbox command on the MQ */
946static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
947{
948 int status = 0;
949 u16 cqe_status, ext_status;
950 struct ocrdma_mqe *rsp;
951
952 mutex_lock(&dev->mqe_ctx.lock);
953 ocrdma_post_mqe(dev, mqe);
954 status = ocrdma_wait_mqe_cmpl(dev);
955 if (status)
956 goto mbx_err;
957 cqe_status = dev->mqe_ctx.cqe_status;
958 ext_status = dev->mqe_ctx.ext_status;
959 rsp = ocrdma_get_mqe_rsp(dev);
960 ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
961 if (cqe_status || ext_status) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530962 pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
963 __func__,
Parav Panditfe2caef2012-03-21 04:09:06 +0530964 (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
965 OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
966 status = ocrdma_get_mbx_cqe_errno(cqe_status);
967 goto mbx_err;
968 }
969 if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
970 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
971mbx_err:
972 mutex_unlock(&dev->mqe_ctx.lock);
973 return status;
974}
975
976static void ocrdma_get_attr(struct ocrdma_dev *dev,
977 struct ocrdma_dev_attr *attr,
978 struct ocrdma_mbx_query_config *rsp)
979{
Parav Panditfe2caef2012-03-21 04:09:06 +0530980 attr->max_pd =
981 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
982 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
983 attr->max_qp =
984 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
985 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
986 attr->max_send_sge = ((rsp->max_write_send_sge &
987 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
988 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
989 attr->max_recv_sge = (rsp->max_write_send_sge &
990 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
991 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +0530992 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
993 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
994 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +0530995 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
996 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
997 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
998 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
999 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1000 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1001 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1002 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1003 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1004 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1005 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1006 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1007 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1008 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1009 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1010 attr->max_mr = rsp->max_mr;
1011 attr->max_mr_size = ~0ull;
1012 attr->max_fmr = 0;
1013 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1014 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1015 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1016 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1017 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1018 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1019 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1020 OCRDMA_WQE_STRIDE;
1021 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1022 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1023 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1024 OCRDMA_WQE_STRIDE;
1025 attr->max_inline_data =
1026 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1027 sizeof(struct ocrdma_sge));
Parav Panditfe2caef2012-03-21 04:09:06 +05301028 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301029 attr->ird = 1;
1030 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1031 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
Mahesh Vardhamanaiah07bb5422012-06-08 21:25:52 +05301032 }
1033 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1034 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1035 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1036 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
Parav Panditfe2caef2012-03-21 04:09:06 +05301037}
1038
1039static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1040 struct ocrdma_fw_conf_rsp *conf)
1041{
1042 u32 fn_mode;
1043
1044 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1045 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1046 return -EINVAL;
1047 dev->base_eqid = conf->base_eqid;
1048 dev->max_eq = conf->max_eq;
1049 dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
1050 return 0;
1051}
1052
1053/* can be issued only during init time. */
1054static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1055{
1056 int status = -ENOMEM;
1057 struct ocrdma_mqe *cmd;
1058 struct ocrdma_fw_ver_rsp *rsp;
1059
1060 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1061 if (!cmd)
1062 return -ENOMEM;
1063 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1064 OCRDMA_CMD_GET_FW_VER,
1065 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1066
1067 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1068 if (status)
1069 goto mbx_err;
1070 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1071 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1072 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1073 sizeof(rsp->running_ver));
1074 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1075mbx_err:
1076 kfree(cmd);
1077 return status;
1078}
1079
1080/* can be issued only during init time. */
1081static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1082{
1083 int status = -ENOMEM;
1084 struct ocrdma_mqe *cmd;
1085 struct ocrdma_fw_conf_rsp *rsp;
1086
1087 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1088 if (!cmd)
1089 return -ENOMEM;
1090 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1091 OCRDMA_CMD_GET_FW_CONFIG,
1092 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1093 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1094 if (status)
1095 goto mbx_err;
1096 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1097 status = ocrdma_check_fw_config(dev, rsp);
1098mbx_err:
1099 kfree(cmd);
1100 return status;
1101}
1102
1103static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1104{
1105 int status = -ENOMEM;
1106 struct ocrdma_mbx_query_config *rsp;
1107 struct ocrdma_mqe *cmd;
1108
1109 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1110 if (!cmd)
1111 return status;
1112 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1113 if (status)
1114 goto mbx_err;
1115 rsp = (struct ocrdma_mbx_query_config *)cmd;
1116 ocrdma_get_attr(dev, &dev->attr, rsp);
1117mbx_err:
1118 kfree(cmd);
1119 return status;
1120}
1121
1122int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1123{
1124 int status = -ENOMEM;
1125 struct ocrdma_alloc_pd *cmd;
1126 struct ocrdma_alloc_pd_rsp *rsp;
1127
1128 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1129 if (!cmd)
1130 return status;
1131 if (pd->dpp_enabled)
1132 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1133 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1134 if (status)
1135 goto mbx_err;
1136 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1137 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1138 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1139 pd->dpp_enabled = true;
1140 pd->dpp_page = rsp->dpp_page_pdid >>
1141 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1142 } else {
1143 pd->dpp_enabled = false;
1144 pd->num_dpp_qp = 0;
1145 }
1146mbx_err:
1147 kfree(cmd);
1148 return status;
1149}
1150
1151int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1152{
1153 int status = -ENOMEM;
1154 struct ocrdma_dealloc_pd *cmd;
1155
1156 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1157 if (!cmd)
1158 return status;
1159 cmd->id = pd->id;
1160 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1161 kfree(cmd);
1162 return status;
1163}
1164
1165static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1166 int *num_pages, int *page_size)
1167{
1168 int i;
1169 int mem_size;
1170
1171 *num_entries = roundup_pow_of_two(*num_entries);
1172 mem_size = *num_entries * entry_size;
1173 /* find the possible lowest possible multiplier */
1174 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1175 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1176 break;
1177 }
1178 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1179 return -EINVAL;
1180 mem_size = roundup(mem_size,
1181 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1182 *num_pages =
1183 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1184 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1185 *num_entries = mem_size / entry_size;
1186 return 0;
1187}
1188
1189static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1190{
1191 int i ;
1192 int status = 0;
1193 int max_ah;
1194 struct ocrdma_create_ah_tbl *cmd;
1195 struct ocrdma_create_ah_tbl_rsp *rsp;
1196 struct pci_dev *pdev = dev->nic_info.pdev;
1197 dma_addr_t pa;
1198 struct ocrdma_pbe *pbes;
1199
1200 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1201 if (!cmd)
1202 return status;
1203
1204 max_ah = OCRDMA_MAX_AH;
1205 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1206
1207 /* number of PBEs in PBL */
1208 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1209 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1210 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1211
1212 /* page size */
1213 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1214 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1215 break;
1216 }
1217 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1218 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1219
1220 /* ah_entry size */
1221 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1222 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1223 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1224
1225 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1226 &dev->av_tbl.pbl.pa,
1227 GFP_KERNEL);
1228 if (dev->av_tbl.pbl.va == NULL)
1229 goto mem_err;
1230
1231 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1232 &pa, GFP_KERNEL);
1233 if (dev->av_tbl.va == NULL)
1234 goto mem_err_ah;
1235 dev->av_tbl.pa = pa;
1236 dev->av_tbl.num_ah = max_ah;
1237 memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1238
1239 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1240 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1241 pbes[i].pa_lo = (u32) (pa & 0xffffffff);
1242 pbes[i].pa_hi = (u32) upper_32_bits(pa);
1243 pa += PAGE_SIZE;
1244 }
1245 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1246 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1247 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1248 if (status)
1249 goto mbx_err;
1250 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1251 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1252 kfree(cmd);
1253 return 0;
1254
1255mbx_err:
1256 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1257 dev->av_tbl.pa);
1258 dev->av_tbl.va = NULL;
1259mem_err_ah:
1260 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1261 dev->av_tbl.pbl.pa);
1262 dev->av_tbl.pbl.va = NULL;
1263 dev->av_tbl.size = 0;
1264mem_err:
1265 kfree(cmd);
1266 return status;
1267}
1268
1269static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1270{
1271 struct ocrdma_delete_ah_tbl *cmd;
1272 struct pci_dev *pdev = dev->nic_info.pdev;
1273
1274 if (dev->av_tbl.va == NULL)
1275 return;
1276
1277 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1278 if (!cmd)
1279 return;
1280 cmd->ahid = dev->av_tbl.ahid;
1281
1282 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1283 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1284 dev->av_tbl.pa);
1285 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1286 dev->av_tbl.pbl.pa);
1287 kfree(cmd);
1288}
1289
1290/* Multiple CQs uses the EQ. This routine returns least used
1291 * EQ to associate with CQ. This will distributes the interrupt
1292 * processing and CPU load to associated EQ, vector and so to that CPU.
1293 */
1294static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1295{
1296 int i, selected_eq = 0, cq_cnt = 0;
1297 u16 eq_id;
1298
1299 mutex_lock(&dev->dev_lock);
1300 cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
1301 eq_id = dev->qp_eq_tbl[0].q.id;
1302 /* find the EQ which is has the least number of
1303 * CQs associated with it.
1304 */
1305 for (i = 0; i < dev->eq_cnt; i++) {
1306 if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
1307 cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
1308 eq_id = dev->qp_eq_tbl[i].q.id;
1309 selected_eq = i;
1310 }
1311 }
1312 dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
1313 mutex_unlock(&dev->dev_lock);
1314 return eq_id;
1315}
1316
1317static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1318{
1319 int i;
1320
1321 mutex_lock(&dev->dev_lock);
1322 for (i = 0; i < dev->eq_cnt; i++) {
1323 if (dev->qp_eq_tbl[i].q.id != eq_id)
1324 continue;
1325 dev->qp_eq_tbl[i].cq_cnt -= 1;
1326 break;
1327 }
1328 mutex_unlock(&dev->dev_lock);
1329}
1330
1331int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1332 int entries, int dpp_cq)
1333{
1334 int status = -ENOMEM; int max_hw_cqe;
1335 struct pci_dev *pdev = dev->nic_info.pdev;
1336 struct ocrdma_create_cq *cmd;
1337 struct ocrdma_create_cq_rsp *rsp;
1338 u32 hw_pages, cqe_size, page_size, cqe_count;
1339
1340 if (dpp_cq)
1341 return -EINVAL;
1342 if (entries > dev->attr.max_cqe) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001343 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1344 __func__, dev->id, dev->attr.max_cqe, entries);
Parav Panditfe2caef2012-03-21 04:09:06 +05301345 return -EINVAL;
1346 }
1347 if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
1348 return -EINVAL;
1349
1350 if (dpp_cq) {
1351 cq->max_hw_cqe = 1;
1352 max_hw_cqe = 1;
1353 cqe_size = OCRDMA_DPP_CQE_SIZE;
1354 hw_pages = 1;
1355 } else {
1356 cq->max_hw_cqe = dev->attr.max_cqe;
1357 max_hw_cqe = dev->attr.max_cqe;
1358 cqe_size = sizeof(struct ocrdma_cqe);
1359 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1360 }
1361
1362 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1363
1364 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1365 if (!cmd)
1366 return -ENOMEM;
1367 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1368 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1369 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1370 if (!cq->va) {
1371 status = -ENOMEM;
1372 goto mem_err;
1373 }
1374 memset(cq->va, 0, cq->len);
1375 page_size = cq->len / hw_pages;
1376 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1377 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1378 cmd->cmd.pgsz_pgcnt |= hw_pages;
1379 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1380
Parav Panditfe2caef2012-03-21 04:09:06 +05301381 cq->eqn = ocrdma_bind_eq(dev);
1382 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
1383 cqe_count = cq->len / cqe_size;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301384 if (cqe_count > 1024) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301385 /* Set cnt to 3 to indicate more than 1024 cq entries */
1386 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301387 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301388 u8 count = 0;
1389 switch (cqe_count) {
1390 case 256:
1391 count = 0;
1392 break;
1393 case 512:
1394 count = 1;
1395 break;
1396 case 1024:
1397 count = 2;
1398 break;
1399 default:
1400 goto mbx_err;
1401 }
1402 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1403 }
1404 /* shared eq between all the consumer cqs. */
1405 cmd->cmd.eqn = cq->eqn;
1406 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1407 if (dpp_cq)
1408 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1409 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1410 cq->phase_change = false;
1411 cmd->cmd.cqe_count = (cq->len / cqe_size);
1412 } else {
1413 cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
1414 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1415 cq->phase_change = true;
1416 }
1417
1418 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1419 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1420 if (status)
1421 goto mbx_err;
1422
1423 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1424 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1425 kfree(cmd);
1426 return 0;
1427mbx_err:
1428 ocrdma_unbind_eq(dev, cq->eqn);
Parav Panditfe2caef2012-03-21 04:09:06 +05301429 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1430mem_err:
1431 kfree(cmd);
1432 return status;
1433}
1434
1435int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1436{
1437 int status = -ENOMEM;
1438 struct ocrdma_destroy_cq *cmd;
1439
1440 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1441 if (!cmd)
1442 return status;
1443 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1444 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1445
1446 cmd->bypass_flush_qid |=
1447 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1448 OCRDMA_DESTROY_CQ_QID_MASK;
1449
1450 ocrdma_unbind_eq(dev, cq->eqn);
1451 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1452 if (status)
1453 goto mbx_err;
1454 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1455mbx_err:
1456 kfree(cmd);
1457 return status;
1458}
1459
1460int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1461 u32 pdid, int addr_check)
1462{
1463 int status = -ENOMEM;
1464 struct ocrdma_alloc_lkey *cmd;
1465 struct ocrdma_alloc_lkey_rsp *rsp;
1466
1467 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1468 if (!cmd)
1469 return status;
1470 cmd->pdid = pdid;
1471 cmd->pbl_sz_flags |= addr_check;
1472 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1473 cmd->pbl_sz_flags |=
1474 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1475 cmd->pbl_sz_flags |=
1476 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1477 cmd->pbl_sz_flags |=
1478 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1479 cmd->pbl_sz_flags |=
1480 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1481 cmd->pbl_sz_flags |=
1482 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1483
1484 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1485 if (status)
1486 goto mbx_err;
1487 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1488 hwmr->lkey = rsp->lrkey;
1489mbx_err:
1490 kfree(cmd);
1491 return status;
1492}
1493
1494int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1495{
1496 int status = -ENOMEM;
1497 struct ocrdma_dealloc_lkey *cmd;
1498
1499 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1500 if (!cmd)
1501 return -ENOMEM;
1502 cmd->lkey = lkey;
1503 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1504 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1505 if (status)
1506 goto mbx_err;
1507mbx_err:
1508 kfree(cmd);
1509 return status;
1510}
1511
1512static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1513 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1514{
1515 int status = -ENOMEM;
1516 int i;
1517 struct ocrdma_reg_nsmr *cmd;
1518 struct ocrdma_reg_nsmr_rsp *rsp;
1519
1520 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1521 if (!cmd)
1522 return -ENOMEM;
1523 cmd->num_pbl_pdid =
1524 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1525
1526 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1527 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1528 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1529 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1530 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1531 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1532 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1533 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1534 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1535 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1536 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1537
1538 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1539 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1540 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1541 cmd->totlen_low = hwmr->len;
1542 cmd->totlen_high = upper_32_bits(hwmr->len);
1543 cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1544 cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1545 cmd->va_loaddr = (u32) hwmr->va;
1546 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1547
1548 for (i = 0; i < pbl_cnt; i++) {
1549 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1550 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1551 }
1552 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1553 if (status)
1554 goto mbx_err;
1555 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1556 hwmr->lkey = rsp->lrkey;
1557mbx_err:
1558 kfree(cmd);
1559 return status;
1560}
1561
1562static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1563 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1564 u32 pbl_offset, u32 last)
1565{
1566 int status = -ENOMEM;
1567 int i;
1568 struct ocrdma_reg_nsmr_cont *cmd;
1569
1570 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1571 if (!cmd)
1572 return -ENOMEM;
1573 cmd->lrkey = hwmr->lkey;
1574 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
1575 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1576 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1577
1578 for (i = 0; i < pbl_cnt; i++) {
1579 cmd->pbl[i].lo =
1580 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
1581 cmd->pbl[i].hi =
1582 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
1583 }
1584 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1585 if (status)
1586 goto mbx_err;
1587mbx_err:
1588 kfree(cmd);
1589 return status;
1590}
1591
1592int ocrdma_reg_mr(struct ocrdma_dev *dev,
1593 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
1594{
1595 int status;
1596 u32 last = 0;
1597 u32 cur_pbl_cnt, pbl_offset;
1598 u32 pending_pbl_cnt = hwmr->num_pbls;
1599
1600 pbl_offset = 0;
1601 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1602 if (cur_pbl_cnt == pending_pbl_cnt)
1603 last = 1;
1604
1605 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1606 cur_pbl_cnt, hwmr->pbe_size, last);
1607 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001608 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301609 return status;
1610 }
1611 /* if there is no more pbls to register then exit. */
1612 if (last)
1613 return 0;
1614
1615 while (!last) {
1616 pbl_offset += cur_pbl_cnt;
1617 pending_pbl_cnt -= cur_pbl_cnt;
1618 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1619 /* if we reach the end of the pbls, then need to set the last
1620 * bit, indicating no more pbls to register for this memory key.
1621 */
1622 if (cur_pbl_cnt == pending_pbl_cnt)
1623 last = 1;
1624
1625 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
1626 pbl_offset, last);
1627 if (status)
1628 break;
1629 }
1630 if (status)
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001631 pr_err("%s() err. status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301632
1633 return status;
1634}
1635
1636bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1637{
1638 struct ocrdma_qp *tmp;
1639 bool found = false;
1640 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
1641 if (qp == tmp) {
1642 found = true;
1643 break;
1644 }
1645 }
1646 return found;
1647}
1648
1649bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1650{
1651 struct ocrdma_qp *tmp;
1652 bool found = false;
1653 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
1654 if (qp == tmp) {
1655 found = true;
1656 break;
1657 }
1658 }
1659 return found;
1660}
1661
1662void ocrdma_flush_qp(struct ocrdma_qp *qp)
1663{
1664 bool found;
1665 unsigned long flags;
1666
1667 spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
1668 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1669 if (!found)
1670 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
1671 if (!qp->srq) {
1672 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1673 if (!found)
1674 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1675 }
1676 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
1677}
1678
1679int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
1680 enum ib_qp_state *old_ib_state)
1681{
1682 unsigned long flags;
1683 int status = 0;
1684 enum ocrdma_qp_state new_state;
1685 new_state = get_ocrdma_qp_state(new_ib_state);
1686
1687 /* sync with wqe and rqe posting */
1688 spin_lock_irqsave(&qp->q_lock, flags);
1689
1690 if (old_ib_state)
1691 *old_ib_state = get_ibqp_state(qp->state);
1692 if (new_state == qp->state) {
1693 spin_unlock_irqrestore(&qp->q_lock, flags);
1694 return 1;
1695 }
1696
1697 switch (qp->state) {
1698 case OCRDMA_QPS_RST:
1699 switch (new_state) {
1700 case OCRDMA_QPS_RST:
1701 case OCRDMA_QPS_INIT:
1702 break;
1703 default:
1704 status = -EINVAL;
1705 break;
1706 };
1707 break;
1708 case OCRDMA_QPS_INIT:
1709 /* qps: INIT->XXX */
1710 switch (new_state) {
1711 case OCRDMA_QPS_INIT:
1712 case OCRDMA_QPS_RTR:
1713 break;
1714 case OCRDMA_QPS_ERR:
1715 ocrdma_flush_qp(qp);
1716 break;
1717 default:
1718 status = -EINVAL;
1719 break;
1720 };
1721 break;
1722 case OCRDMA_QPS_RTR:
1723 /* qps: RTS->XXX */
1724 switch (new_state) {
1725 case OCRDMA_QPS_RTS:
1726 break;
1727 case OCRDMA_QPS_ERR:
1728 ocrdma_flush_qp(qp);
1729 break;
1730 default:
1731 status = -EINVAL;
1732 break;
1733 };
1734 break;
1735 case OCRDMA_QPS_RTS:
1736 /* qps: RTS->XXX */
1737 switch (new_state) {
1738 case OCRDMA_QPS_SQD:
1739 case OCRDMA_QPS_SQE:
1740 break;
1741 case OCRDMA_QPS_ERR:
1742 ocrdma_flush_qp(qp);
1743 break;
1744 default:
1745 status = -EINVAL;
1746 break;
1747 };
1748 break;
1749 case OCRDMA_QPS_SQD:
1750 /* qps: SQD->XXX */
1751 switch (new_state) {
1752 case OCRDMA_QPS_RTS:
1753 case OCRDMA_QPS_SQE:
1754 case OCRDMA_QPS_ERR:
1755 break;
1756 default:
1757 status = -EINVAL;
1758 break;
1759 };
1760 break;
1761 case OCRDMA_QPS_SQE:
1762 switch (new_state) {
1763 case OCRDMA_QPS_RTS:
1764 case OCRDMA_QPS_ERR:
1765 break;
1766 default:
1767 status = -EINVAL;
1768 break;
1769 };
1770 break;
1771 case OCRDMA_QPS_ERR:
1772 /* qps: ERR->XXX */
1773 switch (new_state) {
1774 case OCRDMA_QPS_RST:
1775 break;
1776 default:
1777 status = -EINVAL;
1778 break;
1779 };
1780 break;
1781 default:
1782 status = -EINVAL;
1783 break;
1784 };
1785 if (!status)
1786 qp->state = new_state;
1787
1788 spin_unlock_irqrestore(&qp->q_lock, flags);
1789 return status;
1790}
1791
1792static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
1793{
1794 u32 flags = 0;
1795 if (qp->cap_flags & OCRDMA_QP_INB_RD)
1796 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
1797 if (qp->cap_flags & OCRDMA_QP_INB_WR)
1798 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
1799 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
1800 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
1801 if (qp->cap_flags & OCRDMA_QP_LKEY0)
1802 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
1803 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
1804 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
1805 return flags;
1806}
1807
1808static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1809 struct ib_qp_init_attr *attrs,
1810 struct ocrdma_qp *qp)
1811{
1812 int status;
1813 u32 len, hw_pages, hw_page_size;
1814 dma_addr_t pa;
1815 struct ocrdma_dev *dev = qp->dev;
1816 struct pci_dev *pdev = dev->nic_info.pdev;
1817 u32 max_wqe_allocated;
1818 u32 max_sges = attrs->cap.max_send_sge;
1819
1820 max_wqe_allocated = attrs->cap.max_send_wr;
1821 /* need to allocate one extra to for GEN1 family */
1822 if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
1823 max_wqe_allocated += 1;
1824
1825 status = ocrdma_build_q_conf(&max_wqe_allocated,
1826 dev->attr.wqe_size, &hw_pages, &hw_page_size);
1827 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001828 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
1829 max_wqe_allocated);
Parav Panditfe2caef2012-03-21 04:09:06 +05301830 return -EINVAL;
1831 }
1832 qp->sq.max_cnt = max_wqe_allocated;
1833 len = (hw_pages * hw_page_size);
1834
1835 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1836 if (!qp->sq.va)
1837 return -EINVAL;
1838 memset(qp->sq.va, 0, len);
1839 qp->sq.len = len;
1840 qp->sq.pa = pa;
1841 qp->sq.entry_size = dev->attr.wqe_size;
1842 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
1843
1844 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
1845 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
1846 cmd->num_wq_rq_pages |= (hw_pages <<
1847 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
1848 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
1849 cmd->max_sge_send_write |= (max_sges <<
1850 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
1851 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
1852 cmd->max_sge_send_write |= (max_sges <<
1853 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
1854 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
1855 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
1856 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
1857 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
1858 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
1859 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
1860 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
1861 return 0;
1862}
1863
1864static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
1865 struct ib_qp_init_attr *attrs,
1866 struct ocrdma_qp *qp)
1867{
1868 int status;
1869 u32 len, hw_pages, hw_page_size;
1870 dma_addr_t pa = 0;
1871 struct ocrdma_dev *dev = qp->dev;
1872 struct pci_dev *pdev = dev->nic_info.pdev;
1873 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
1874
1875 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
1876 &hw_pages, &hw_page_size);
1877 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001878 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
1879 attrs->cap.max_recv_wr + 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05301880 return status;
1881 }
1882 qp->rq.max_cnt = max_rqe_allocated;
1883 len = (hw_pages * hw_page_size);
1884
1885 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1886 if (!qp->rq.va)
Wei Yongjunc94e15c2013-06-23 09:07:19 +08001887 return -ENOMEM;
Parav Panditfe2caef2012-03-21 04:09:06 +05301888 memset(qp->rq.va, 0, len);
1889 qp->rq.pa = pa;
1890 qp->rq.len = len;
1891 qp->rq.entry_size = dev->attr.rqe_size;
1892
1893 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
1894 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1895 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
1896 cmd->num_wq_rq_pages |=
1897 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
1898 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
1899 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
1900 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
1901 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
1902 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
1903 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
1904 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
1905 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
1906 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
1907 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
1908 return 0;
1909}
1910
1911static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
1912 struct ocrdma_pd *pd,
1913 struct ocrdma_qp *qp,
1914 u8 enable_dpp_cq, u16 dpp_cq_id)
1915{
1916 pd->num_dpp_qp--;
1917 qp->dpp_enabled = true;
1918 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
1919 if (!enable_dpp_cq)
1920 return;
1921 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
1922 cmd->dpp_credits_cqid = dpp_cq_id;
1923 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
1924 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
1925}
1926
1927static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
1928 struct ocrdma_qp *qp)
1929{
1930 struct ocrdma_dev *dev = qp->dev;
1931 struct pci_dev *pdev = dev->nic_info.pdev;
1932 dma_addr_t pa = 0;
1933 int ird_page_size = dev->attr.ird_page_size;
1934 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
1935
1936 if (dev->attr.ird == 0)
1937 return 0;
1938
1939 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
1940 &pa, GFP_KERNEL);
1941 if (!qp->ird_q_va)
1942 return -ENOMEM;
1943 memset(qp->ird_q_va, 0, ird_q_len);
1944 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
1945 pa, ird_page_size);
1946 return 0;
1947}
1948
1949static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
1950 struct ocrdma_qp *qp,
1951 struct ib_qp_init_attr *attrs,
1952 u16 *dpp_offset, u16 *dpp_credit_lmt)
1953{
1954 u32 max_wqe_allocated, max_rqe_allocated;
1955 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
1956 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
1957 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
1958 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
1959 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
1960 qp->dpp_enabled = false;
1961 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
1962 qp->dpp_enabled = true;
1963 *dpp_credit_lmt = (rsp->dpp_response &
1964 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
1965 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
1966 *dpp_offset = (rsp->dpp_response &
1967 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
1968 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
1969 }
1970 max_wqe_allocated =
1971 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
1972 max_wqe_allocated = 1 << max_wqe_allocated;
1973 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
1974
Parav Panditfe2caef2012-03-21 04:09:06 +05301975 qp->sq.max_cnt = max_wqe_allocated;
1976 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
1977
1978 if (!attrs->srq) {
1979 qp->rq.max_cnt = max_rqe_allocated;
1980 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
Parav Panditfe2caef2012-03-21 04:09:06 +05301981 }
1982}
1983
1984int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1985 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
1986 u16 *dpp_credit_lmt)
1987{
1988 int status = -ENOMEM;
1989 u32 flags = 0;
1990 struct ocrdma_dev *dev = qp->dev;
1991 struct ocrdma_pd *pd = qp->pd;
1992 struct pci_dev *pdev = dev->nic_info.pdev;
1993 struct ocrdma_cq *cq;
1994 struct ocrdma_create_qp_req *cmd;
1995 struct ocrdma_create_qp_rsp *rsp;
1996 int qptype;
1997
1998 switch (attrs->qp_type) {
1999 case IB_QPT_GSI:
2000 qptype = OCRDMA_QPT_GSI;
2001 break;
2002 case IB_QPT_RC:
2003 qptype = OCRDMA_QPT_RC;
2004 break;
2005 case IB_QPT_UD:
2006 qptype = OCRDMA_QPT_UD;
2007 break;
2008 default:
2009 return -EINVAL;
2010 };
2011
2012 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2013 if (!cmd)
2014 return status;
2015 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2016 OCRDMA_CREATE_QP_REQ_QPT_MASK;
2017 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2018 if (status)
2019 goto sq_err;
2020
2021 if (attrs->srq) {
2022 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2023 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2024 cmd->rq_addr[0].lo = srq->id;
2025 qp->srq = srq;
2026 } else {
2027 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2028 if (status)
2029 goto rq_err;
2030 }
2031
2032 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2033 if (status)
2034 goto mbx_err;
2035
2036 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2037 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2038
2039 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2040
2041 cmd->max_sge_recv_flags |= flags;
2042 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2043 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2044 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2045 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2046 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2047 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2048 cq = get_ocrdma_cq(attrs->send_cq);
2049 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2050 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2051 qp->sq_cq = cq;
2052 cq = get_ocrdma_cq(attrs->recv_cq);
2053 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2054 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2055 qp->rq_cq = cq;
2056
2057 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302058 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302059 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2060 dpp_cq_id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302061 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302062
2063 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2064 if (status)
2065 goto mbx_err;
2066 rsp = (struct ocrdma_create_qp_rsp *)cmd;
2067 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2068 qp->state = OCRDMA_QPS_RST;
2069 kfree(cmd);
2070 return 0;
2071mbx_err:
2072 if (qp->rq.va)
2073 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2074rq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002075 pr_err("%s(%d) rq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302076 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2077sq_err:
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002078 pr_err("%s(%d) sq_err\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05302079 kfree(cmd);
2080 return status;
2081}
2082
2083int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2084 struct ocrdma_qp_params *param)
2085{
2086 int status = -ENOMEM;
2087 struct ocrdma_query_qp *cmd;
2088 struct ocrdma_query_qp_rsp *rsp;
2089
2090 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
2091 if (!cmd)
2092 return status;
2093 cmd->qp_id = qp->id;
2094 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2095 if (status)
2096 goto mbx_err;
2097 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2098 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2099mbx_err:
2100 kfree(cmd);
2101 return status;
2102}
2103
2104int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
2105 u8 *mac_addr)
2106{
2107 struct in6_addr in6;
2108
2109 memcpy(&in6, dgid, sizeof in6);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302110 if (rdma_is_multicast_addr(&in6)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302111 rdma_get_mcast_mac(&in6, mac_addr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302112 } else if (rdma_link_local_addr(&in6)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302113 rdma_get_ll_mac(&in6, mac_addr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302114 } else {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002115 pr_err("%s() fail to resolve mac_addr.\n", __func__);
Parav Panditfe2caef2012-03-21 04:09:06 +05302116 return -EINVAL;
2117 }
2118 return 0;
2119}
2120
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302121static int ocrdma_set_av_params(struct ocrdma_qp *qp,
Parav Panditfe2caef2012-03-21 04:09:06 +05302122 struct ocrdma_modify_qp *cmd,
2123 struct ib_qp_attr *attrs)
2124{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302125 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302126 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302127 union ib_gid sgid, zgid;
Parav Panditfe2caef2012-03-21 04:09:06 +05302128 u32 vlan_id;
2129 u8 mac_addr[6];
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302130
Parav Panditfe2caef2012-03-21 04:09:06 +05302131 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302132 return -EINVAL;
Parav Panditfe2caef2012-03-21 04:09:06 +05302133 cmd->params.tclass_sq_psn |=
2134 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2135 cmd->params.rnt_rc_sl_fl |=
2136 (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2137 cmd->params.hop_lmt_rq_psn |=
2138 (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2139 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2140 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2141 sizeof(cmd->params.dgid));
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302142 status = ocrdma_query_gid(&qp->dev->ibdev, 1,
Parav Panditfe2caef2012-03-21 04:09:06 +05302143 ah_attr->grh.sgid_index, &sgid);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302144 if (status)
2145 return status;
Naresh Gottumukkala9c587262013-08-07 12:52:34 +05302146
2147 memset(&zgid, 0, sizeof(zgid));
2148 if (!memcmp(&sgid, &zgid, sizeof(zgid)))
2149 return -EINVAL;
2150
Parav Panditfe2caef2012-03-21 04:09:06 +05302151 qp->sgid_idx = ah_attr->grh.sgid_index;
2152 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2153 ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
2154 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2155 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2156 /* convert them to LE format. */
2157 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2158 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2159 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2160 vlan_id = rdma_get_vlan_id(&sgid);
2161 if (vlan_id && (vlan_id < 0x1000)) {
2162 cmd->params.vlan_dmac_b4_to_b5 |=
2163 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2164 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2165 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302166 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302167}
2168
2169static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2170 struct ocrdma_modify_qp *cmd,
2171 struct ib_qp_attr *attrs, int attr_mask,
2172 enum ib_qp_state old_qps)
2173{
2174 int status = 0;
2175 struct net_device *netdev = qp->dev->nic_info.netdev;
2176 int eth_mtu = iboe_get_mtu(netdev->mtu);
2177
2178 if (attr_mask & IB_QP_PKEY_INDEX) {
2179 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2180 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2181 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2182 }
2183 if (attr_mask & IB_QP_QKEY) {
2184 qp->qkey = attrs->qkey;
2185 cmd->params.qkey = attrs->qkey;
2186 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2187 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302188 if (attr_mask & IB_QP_AV) {
2189 status = ocrdma_set_av_params(qp, cmd, attrs);
2190 if (status)
2191 return status;
2192 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302193 /* set the default mac address for UD, GSI QPs */
2194 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
2195 (qp->dev->nic_info.mac_addr[1] << 8) |
2196 (qp->dev->nic_info.mac_addr[2] << 16) |
2197 (qp->dev->nic_info.mac_addr[3] << 24);
2198 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
2199 (qp->dev->nic_info.mac_addr[5] << 8);
2200 }
2201 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2202 attrs->en_sqd_async_notify) {
2203 cmd->params.max_sge_recv_flags |=
2204 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2205 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2206 }
2207 if (attr_mask & IB_QP_DEST_QPN) {
2208 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2209 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2210 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2211 }
2212 if (attr_mask & IB_QP_PATH_MTU) {
2213 if (ib_mtu_enum_to_int(eth_mtu) <
2214 ib_mtu_enum_to_int(attrs->path_mtu)) {
2215 status = -EINVAL;
2216 goto pmtu_err;
2217 }
2218 cmd->params.path_mtu_pkey_indx |=
2219 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2220 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2221 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2222 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2223 }
2224 if (attr_mask & IB_QP_TIMEOUT) {
2225 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2226 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2227 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2228 }
2229 if (attr_mask & IB_QP_RETRY_CNT) {
2230 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2231 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2232 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2233 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2234 }
2235 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2236 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2237 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2238 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2239 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2240 }
2241 if (attr_mask & IB_QP_RNR_RETRY) {
2242 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2243 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2244 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2245 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2246 }
2247 if (attr_mask & IB_QP_SQ_PSN) {
2248 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2249 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2250 }
2251 if (attr_mask & IB_QP_RQ_PSN) {
2252 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2253 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2254 }
2255 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2256 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
2257 status = -EINVAL;
2258 goto pmtu_err;
2259 }
2260 qp->max_ord = attrs->max_rd_atomic;
2261 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2262 }
2263 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2264 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
2265 status = -EINVAL;
2266 goto pmtu_err;
2267 }
2268 qp->max_ird = attrs->max_dest_rd_atomic;
2269 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2270 }
2271 cmd->params.max_ord_ird = (qp->max_ord <<
2272 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2273 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2274pmtu_err:
2275 return status;
2276}
2277
2278int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2279 struct ib_qp_attr *attrs, int attr_mask,
2280 enum ib_qp_state old_qps)
2281{
2282 int status = -ENOMEM;
2283 struct ocrdma_modify_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302284
2285 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2286 if (!cmd)
2287 return status;
2288
2289 cmd->params.id = qp->id;
2290 cmd->flags = 0;
2291 if (attr_mask & IB_QP_STATE) {
2292 cmd->params.max_sge_recv_flags |=
2293 (get_ocrdma_qp_state(attrs->qp_state) <<
2294 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2295 OCRDMA_QP_PARAMS_STATE_MASK;
2296 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302297 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302298 cmd->params.max_sge_recv_flags |=
2299 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2300 OCRDMA_QP_PARAMS_STATE_MASK;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302301 }
2302
Parav Panditfe2caef2012-03-21 04:09:06 +05302303 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
2304 if (status)
2305 goto mbx_err;
2306 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2307 if (status)
2308 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002309
Parav Panditfe2caef2012-03-21 04:09:06 +05302310mbx_err:
2311 kfree(cmd);
2312 return status;
2313}
2314
2315int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2316{
2317 int status = -ENOMEM;
2318 struct ocrdma_destroy_qp *cmd;
Parav Panditfe2caef2012-03-21 04:09:06 +05302319 struct pci_dev *pdev = dev->nic_info.pdev;
2320
2321 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2322 if (!cmd)
2323 return status;
2324 cmd->qp_id = qp->id;
2325 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2326 if (status)
2327 goto mbx_err;
Roland Dreierc592c422012-04-17 01:18:28 -07002328
Parav Panditfe2caef2012-03-21 04:09:06 +05302329mbx_err:
2330 kfree(cmd);
2331 if (qp->sq.va)
2332 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2333 if (!qp->srq && qp->rq.va)
2334 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2335 if (qp->dpp_enabled)
2336 qp->pd->num_dpp_qp++;
2337 return status;
2338}
2339
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302340int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
Parav Panditfe2caef2012-03-21 04:09:06 +05302341 struct ib_srq_init_attr *srq_attr,
2342 struct ocrdma_pd *pd)
2343{
2344 int status = -ENOMEM;
2345 int hw_pages, hw_page_size;
2346 int len;
2347 struct ocrdma_create_srq_rsp *rsp;
2348 struct ocrdma_create_srq *cmd;
2349 dma_addr_t pa;
Parav Panditfe2caef2012-03-21 04:09:06 +05302350 struct pci_dev *pdev = dev->nic_info.pdev;
2351 u32 max_rqe_allocated;
2352
2353 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2354 if (!cmd)
2355 return status;
2356
2357 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2358 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2359 status = ocrdma_build_q_conf(&max_rqe_allocated,
2360 dev->attr.rqe_size,
2361 &hw_pages, &hw_page_size);
2362 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002363 pr_err("%s() req. max_wr=0x%x\n", __func__,
2364 srq_attr->attr.max_wr);
Parav Panditfe2caef2012-03-21 04:09:06 +05302365 status = -EINVAL;
2366 goto ret;
2367 }
2368 len = hw_pages * hw_page_size;
2369 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2370 if (!srq->rq.va) {
2371 status = -ENOMEM;
2372 goto ret;
2373 }
2374 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2375
2376 srq->rq.entry_size = dev->attr.rqe_size;
2377 srq->rq.pa = pa;
2378 srq->rq.len = len;
2379 srq->rq.max_cnt = max_rqe_allocated;
2380
2381 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2382 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2383 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2384
2385 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2386 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2387 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2388 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2389 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2390 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2391
2392 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2393 if (status)
2394 goto mbx_err;
2395 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2396 srq->id = rsp->id;
2397 srq->rq.dbid = rsp->id;
2398 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2399 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2400 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2401 max_rqe_allocated = (1 << max_rqe_allocated);
2402 srq->rq.max_cnt = max_rqe_allocated;
2403 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2404 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2405 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2406 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2407 goto ret;
2408mbx_err:
2409 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2410ret:
2411 kfree(cmd);
2412 return status;
2413}
2414
2415int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2416{
2417 int status = -ENOMEM;
2418 struct ocrdma_modify_srq *cmd;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302419 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2420
Parav Panditfe2caef2012-03-21 04:09:06 +05302421 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2422 if (!cmd)
2423 return status;
2424 cmd->id = srq->id;
2425 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2426 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302427 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302428 kfree(cmd);
2429 return status;
2430}
2431
2432int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2433{
2434 int status = -ENOMEM;
2435 struct ocrdma_query_srq *cmd;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302436 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2437
Parav Panditfe2caef2012-03-21 04:09:06 +05302438 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2439 if (!cmd)
2440 return status;
2441 cmd->id = srq->rq.dbid;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302443 if (status == 0) {
2444 struct ocrdma_query_srq_rsp *rsp =
2445 (struct ocrdma_query_srq_rsp *)cmd;
2446 srq_attr->max_sge =
2447 rsp->srq_lmt_max_sge &
2448 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2449 srq_attr->max_wr =
2450 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2451 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2452 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2453 }
2454 kfree(cmd);
2455 return status;
2456}
2457
2458int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2459{
2460 int status = -ENOMEM;
2461 struct ocrdma_destroy_srq *cmd;
2462 struct pci_dev *pdev = dev->nic_info.pdev;
2463 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2464 if (!cmd)
2465 return status;
2466 cmd->id = srq->id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302467 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
Parav Panditfe2caef2012-03-21 04:09:06 +05302468 if (srq->rq.va)
2469 dma_free_coherent(&pdev->dev, srq->rq.len,
2470 srq->rq.va, srq->rq.pa);
2471 kfree(cmd);
2472 return status;
2473}
2474
2475int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2476{
2477 int i;
2478 int status = -EINVAL;
2479 struct ocrdma_av *av;
2480 unsigned long flags;
2481
2482 av = dev->av_tbl.va;
2483 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2484 for (i = 0; i < dev->av_tbl.num_ah; i++) {
2485 if (av->valid == 0) {
2486 av->valid = OCRDMA_AV_VALID;
2487 ah->av = av;
2488 ah->id = i;
2489 status = 0;
2490 break;
2491 }
2492 av++;
2493 }
2494 if (i == dev->av_tbl.num_ah)
2495 status = -EAGAIN;
2496 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2497 return status;
2498}
2499
2500int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2501{
2502 unsigned long flags;
2503 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2504 ah->av->valid = 0;
2505 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2506 return 0;
2507}
2508
2509static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
2510{
2511 int status;
2512 int irq;
2513 unsigned long flags = 0;
2514 int num_eq = 0;
2515
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302516 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302517 flags = IRQF_SHARED;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302518 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302519 num_eq = dev->nic_info.msix.num_vectors -
2520 dev->nic_info.msix.start_vector;
2521 /* minimum two vectors/eq are required for rdma to work.
2522 * one for control path and one for data path.
2523 */
2524 if (num_eq < 2)
2525 return -EBUSY;
2526 }
2527
2528 status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
2529 if (status)
2530 return status;
2531 sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
2532 irq = ocrdma_get_irq(dev, &dev->meq);
2533 status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
2534 &dev->meq);
2535 if (status)
2536 _ocrdma_destroy_eq(dev, &dev->meq);
2537 return status;
2538}
2539
2540static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
2541{
Roland Dreierda496432012-04-16 11:32:17 -07002542 int num_eq, i, status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302543 int irq;
2544 unsigned long flags = 0;
2545
2546 num_eq = dev->nic_info.msix.num_vectors -
2547 dev->nic_info.msix.start_vector;
2548 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2549 num_eq = 1;
2550 flags = IRQF_SHARED;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302551 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302552 num_eq = min_t(u32, num_eq, num_online_cpus());
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302553 }
2554
Parav Panditfe2caef2012-03-21 04:09:06 +05302555 dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
2556 if (!dev->qp_eq_tbl)
2557 return -ENOMEM;
2558
2559 for (i = 0; i < num_eq; i++) {
2560 status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
2561 OCRDMA_EQ_LEN);
2562 if (status) {
2563 status = -EINVAL;
2564 break;
2565 }
2566 sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
2567 dev->id, i);
2568 irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
2569 status = request_irq(irq, ocrdma_irq_handler, flags,
2570 dev->qp_eq_tbl[i].irq_name,
2571 &dev->qp_eq_tbl[i]);
2572 if (status) {
2573 _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
2574 status = -EINVAL;
2575 break;
2576 }
2577 dev->eq_cnt += 1;
2578 }
2579 /* one eq is sufficient for data path to work */
2580 if (dev->eq_cnt >= 1)
2581 return 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302582 ocrdma_destroy_qp_eqs(dev);
Parav Panditfe2caef2012-03-21 04:09:06 +05302583 return status;
2584}
2585
2586int ocrdma_init_hw(struct ocrdma_dev *dev)
2587{
2588 int status;
2589 /* set up control path eq */
2590 status = ocrdma_create_mq_eq(dev);
2591 if (status)
2592 return status;
2593 /* set up data path eq */
2594 status = ocrdma_create_qp_eqs(dev);
2595 if (status)
2596 goto qpeq_err;
2597 status = ocrdma_create_mq(dev);
2598 if (status)
2599 goto mq_err;
2600 status = ocrdma_mbx_query_fw_config(dev);
2601 if (status)
2602 goto conf_err;
2603 status = ocrdma_mbx_query_dev(dev);
2604 if (status)
2605 goto conf_err;
2606 status = ocrdma_mbx_query_fw_ver(dev);
2607 if (status)
2608 goto conf_err;
2609 status = ocrdma_mbx_create_ah_tbl(dev);
2610 if (status)
2611 goto conf_err;
2612 return 0;
2613
2614conf_err:
2615 ocrdma_destroy_mq(dev);
2616mq_err:
2617 ocrdma_destroy_qp_eqs(dev);
2618qpeq_err:
2619 ocrdma_destroy_eq(dev, &dev->meq);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002620 pr_err("%s() status=%d\n", __func__, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05302621 return status;
2622}
2623
2624void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2625{
2626 ocrdma_mbx_delete_ah_tbl(dev);
2627
2628 /* cleanup the data path eqs */
2629 ocrdma_destroy_qp_eqs(dev);
2630
2631 /* cleanup the control path */
2632 ocrdma_destroy_mq(dev);
2633 ocrdma_destroy_eq(dev, &dev->meq);
2634}