blob: 77fc50a5cc933585b30f60461c01112854c4ff41 [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
Dan Carpenter7b33dc22012-06-14 21:36:09 +030056 if (index >= OCRDMA_MAX_SGID)
Parav Panditfe2caef2012-03-21 04:09:06 +053057 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
64int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65{
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 attr->max_mr_size = ~0ull;
73 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
76 attr->hw_ver = 0;
77 attr->max_qp = dev->attr.max_qp;
78 attr->max_ah = dev->attr.max_qp;
79 attr->max_qp_wr = dev->attr.max_wqe;
80
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
85 IB_DEVICE_LOCAL_DMA_LKEY;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +053086 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
87 attr->max_sge_rd = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +053088 attr->max_cq = dev->attr.max_cq;
89 attr->max_cqe = dev->attr.max_cqe;
90 attr->max_mr = dev->attr.max_mr;
91 attr->max_mw = 0;
92 attr->max_pd = dev->attr.max_pd;
93 attr->atomic_cap = 0;
94 attr->max_fmr = 0;
95 attr->max_map_per_fmr = 0;
96 attr->max_qp_rd_atom =
97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
99 attr->max_srq = (dev->attr.max_qp - 1);
Roland Dreierd1e09eb2012-07-07 15:13:47 -0700100 attr->max_srq_sge = dev->attr.max_srq_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +0530101 attr->max_srq_wr = dev->attr.max_rqe;
102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
103 attr->max_fast_reg_page_list_len = 0;
104 attr->max_pkeys = 1;
105 return 0;
106}
107
108int ocrdma_query_port(struct ib_device *ibdev,
109 u8 port, struct ib_port_attr *props)
110{
111 enum ib_port_state port_state;
112 struct ocrdma_dev *dev;
113 struct net_device *netdev;
114
115 dev = get_ocrdma_dev(ibdev);
116 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000117 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
118 dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530119 return -EINVAL;
120 }
121 netdev = dev->nic_info.netdev;
122 if (netif_running(netdev) && netif_oper_up(netdev)) {
123 port_state = IB_PORT_ACTIVE;
124 props->phys_state = 5;
125 } else {
126 port_state = IB_PORT_DOWN;
127 props->phys_state = 3;
128 }
129 props->max_mtu = IB_MTU_4096;
130 props->active_mtu = iboe_get_mtu(netdev->mtu);
131 props->lid = 0;
132 props->lmc = 0;
133 props->sm_lid = 0;
134 props->sm_sl = 0;
135 props->state = port_state;
136 props->port_cap_flags =
137 IB_PORT_CM_SUP |
138 IB_PORT_REINIT_SUP |
139 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
140 props->gid_tbl_len = OCRDMA_MAX_SGID;
141 props->pkey_tbl_len = 1;
142 props->bad_pkey_cntr = 0;
143 props->qkey_viol_cntr = 0;
144 props->active_width = IB_WIDTH_1X;
145 props->active_speed = 4;
146 props->max_msg_sz = 0x80000000;
147 props->max_vl_num = 4;
148 return 0;
149}
150
151int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
152 struct ib_port_modify *props)
153{
154 struct ocrdma_dev *dev;
155
156 dev = get_ocrdma_dev(ibdev);
157 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000158 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530159 return -EINVAL;
160 }
161 return 0;
162}
163
164static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
165 unsigned long len)
166{
167 struct ocrdma_mm *mm;
168
169 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
170 if (mm == NULL)
171 return -ENOMEM;
172 mm->key.phy_addr = phy_addr;
173 mm->key.len = len;
174 INIT_LIST_HEAD(&mm->entry);
175
176 mutex_lock(&uctx->mm_list_lock);
177 list_add_tail(&mm->entry, &uctx->mm_head);
178 mutex_unlock(&uctx->mm_list_lock);
179 return 0;
180}
181
182static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
183 unsigned long len)
184{
185 struct ocrdma_mm *mm, *tmp;
186
187 mutex_lock(&uctx->mm_list_lock);
188 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
189 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
190 continue;
191
192 list_del(&mm->entry);
193 kfree(mm);
194 break;
195 }
196 mutex_unlock(&uctx->mm_list_lock);
197}
198
199static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
200 unsigned long len)
201{
202 bool found = false;
203 struct ocrdma_mm *mm;
204
205 mutex_lock(&uctx->mm_list_lock);
206 list_for_each_entry(mm, &uctx->mm_head, entry) {
207 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
208 continue;
209
210 found = true;
211 break;
212 }
213 mutex_unlock(&uctx->mm_list_lock);
214 return found;
215}
216
217struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
218 struct ib_udata *udata)
219{
220 int status;
221 struct ocrdma_ucontext *ctx;
222 struct ocrdma_alloc_ucontext_resp resp;
223 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
224 struct pci_dev *pdev = dev->nic_info.pdev;
225 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
226
227 if (!udata)
228 return ERR_PTR(-EFAULT);
229 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
230 if (!ctx)
231 return ERR_PTR(-ENOMEM);
232 ctx->dev = dev;
233 INIT_LIST_HEAD(&ctx->mm_head);
234 mutex_init(&ctx->mm_list_lock);
235
236 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
237 &ctx->ah_tbl.pa, GFP_KERNEL);
238 if (!ctx->ah_tbl.va) {
239 kfree(ctx);
240 return ERR_PTR(-ENOMEM);
241 }
242 memset(ctx->ah_tbl.va, 0, map_len);
243 ctx->ah_tbl.len = map_len;
244
Dan Carpenter63ea3742013-07-29 22:34:29 +0300245 memset(&resp, 0, sizeof(resp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530246 resp.ah_tbl_len = ctx->ah_tbl.len;
247 resp.ah_tbl_page = ctx->ah_tbl.pa;
248
249 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
250 if (status)
251 goto map_err;
252 resp.dev_id = dev->id;
253 resp.max_inline_data = dev->attr.max_inline_data;
254 resp.wqe_size = dev->attr.wqe_size;
255 resp.rqe_size = dev->attr.rqe_size;
256 resp.dpp_wqe_size = dev->attr.wqe_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530257
258 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
259 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
260 if (status)
261 goto cpy_err;
262 return &ctx->ibucontext;
263
264cpy_err:
265 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
266map_err:
267 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
268 ctx->ah_tbl.pa);
269 kfree(ctx);
270 return ERR_PTR(status);
271}
272
273int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
274{
275 struct ocrdma_mm *mm, *tmp;
276 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
277 struct pci_dev *pdev = uctx->dev->nic_info.pdev;
278
279 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
280 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
281 uctx->ah_tbl.pa);
282
283 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
284 list_del(&mm->entry);
285 kfree(mm);
286 }
287 kfree(uctx);
288 return 0;
289}
290
291int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
292{
293 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
294 struct ocrdma_dev *dev = ucontext->dev;
295 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
296 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
297 unsigned long len = (vma->vm_end - vma->vm_start);
298 int status = 0;
299 bool found;
300
301 if (vma->vm_start & (PAGE_SIZE - 1))
302 return -EINVAL;
303 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
304 if (!found)
305 return -EINVAL;
306
307 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
308 dev->nic_info.db_total_size)) &&
309 (len <= dev->nic_info.db_page_size)) {
310 /* doorbell mapping */
311 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
312 len, vma->vm_page_prot);
313 } else if (dev->nic_info.dpp_unmapped_len &&
314 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
315 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
316 dev->nic_info.dpp_unmapped_len)) &&
317 (len <= dev->nic_info.dpp_unmapped_len)) {
318 /* dpp area mapping */
319 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
320 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
321 len, vma->vm_page_prot);
322 } else {
323 /* queue memory mapping */
324 status = remap_pfn_range(vma, vma->vm_start,
325 vma->vm_pgoff, len, vma->vm_page_prot);
326 }
327 return status;
328}
329
330static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
331 struct ib_ucontext *ib_ctx,
332 struct ib_udata *udata)
333{
334 int status;
335 u64 db_page_addr;
Roland Dreierda496432012-04-16 11:32:17 -0700336 u64 dpp_page_addr = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530337 u32 db_page_size;
338 struct ocrdma_alloc_pd_uresp rsp;
339 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530340 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530341
Dan Carpenter63ea3742013-07-29 22:34:29 +0300342 memset(&rsp, 0, sizeof(rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530343 rsp.id = pd->id;
344 rsp.dpp_enabled = pd->dpp_enabled;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530345 db_page_addr = dev->nic_info.unmapped_db +
346 (pd->id * dev->nic_info.db_page_size);
347 db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530348
349 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
350 if (status)
351 return status;
352
353 if (pd->dpp_enabled) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530354 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
Parav Panditfe2caef2012-03-21 04:09:06 +0530355 (pd->id * OCRDMA_DPP_PAGE_SIZE);
356 status = ocrdma_add_mmap(uctx, dpp_page_addr,
357 OCRDMA_DPP_PAGE_SIZE);
358 if (status)
359 goto dpp_map_err;
360 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
361 rsp.dpp_page_addr_lo = dpp_page_addr;
362 }
363
364 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
365 if (status)
366 goto ucopy_err;
367
368 pd->uctx = uctx;
369 return 0;
370
371ucopy_err:
Roland Dreierda496432012-04-16 11:32:17 -0700372 if (pd->dpp_enabled)
373 ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530374dpp_map_err:
375 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
376 return status;
377}
378
379struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
380 struct ib_ucontext *context,
381 struct ib_udata *udata)
382{
383 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
384 struct ocrdma_pd *pd;
385 int status;
386
387 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
388 if (!pd)
389 return ERR_PTR(-ENOMEM);
Parav Panditfe2caef2012-03-21 04:09:06 +0530390 if (udata && context) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530391 pd->dpp_enabled =
392 (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY);
Parav Panditfe2caef2012-03-21 04:09:06 +0530393 pd->num_dpp_qp =
394 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
395 }
396 status = ocrdma_mbx_alloc_pd(dev, pd);
397 if (status) {
398 kfree(pd);
399 return ERR_PTR(status);
400 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530401
402 if (udata && context) {
403 status = ocrdma_copy_pd_uresp(pd, context, udata);
404 if (status)
405 goto err;
406 }
407 return &pd->ibpd;
408
409err:
410 ocrdma_dealloc_pd(&pd->ibpd);
411 return ERR_PTR(status);
412}
413
414int ocrdma_dealloc_pd(struct ib_pd *ibpd)
415{
416 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530417 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530418 int status;
419 u64 usr_db;
420
Parav Panditfe2caef2012-03-21 04:09:06 +0530421 status = ocrdma_mbx_dealloc_pd(dev, pd);
422 if (pd->uctx) {
423 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
424 (pd->id * OCRDMA_DPP_PAGE_SIZE);
425 if (pd->dpp_enabled)
426 ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);
427 usr_db = dev->nic_info.unmapped_db +
428 (pd->id * dev->nic_info.db_page_size);
429 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
430 }
431 kfree(pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530432 return status;
433}
434
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530435static int ocrdma_alloc_lkey(struct ocrdma_mr *mr, u32 pdid, int acc,
436 u32 num_pbls, u32 addr_check)
Parav Panditfe2caef2012-03-21 04:09:06 +0530437{
438 int status;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530439 struct ocrdma_dev *dev = mr->hwmr.dev;
Parav Panditfe2caef2012-03-21 04:09:06 +0530440
Parav Panditfe2caef2012-03-21 04:09:06 +0530441 mr->hwmr.fr_mr = 0;
442 mr->hwmr.local_rd = 1;
443 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
444 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
445 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
446 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
447 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
448 mr->hwmr.num_pbls = num_pbls;
449
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530450 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
451 if (status)
452 return status;
453
Parav Panditfe2caef2012-03-21 04:09:06 +0530454 mr->ibmr.lkey = mr->hwmr.lkey;
455 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
456 mr->ibmr.rkey = mr->hwmr.lkey;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530457 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530458}
459
460struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
461{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530462 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530463 struct ocrdma_mr *mr;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530464 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
465 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530466
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530467 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
468 pr_err("%s err, invalid access rights\n", __func__);
469 return ERR_PTR(-EINVAL);
470 }
471
472 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
473 if (!mr)
474 return ERR_PTR(-ENOMEM);
475
476 mr->hwmr.dev = dev;
477 status = ocrdma_alloc_lkey(mr, pd->id, acc, 0,
478 OCRDMA_ADDR_CHECK_DISABLE);
479 if (status) {
480 kfree(mr);
481 return ERR_PTR(status);
482 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530483
484 return &mr->ibmr;
485}
486
487static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
488 struct ocrdma_hw_mr *mr)
489{
490 struct pci_dev *pdev = dev->nic_info.pdev;
491 int i = 0;
492
493 if (mr->pbl_table) {
494 for (i = 0; i < mr->num_pbls; i++) {
495 if (!mr->pbl_table[i].va)
496 continue;
497 dma_free_coherent(&pdev->dev, mr->pbl_size,
498 mr->pbl_table[i].va,
499 mr->pbl_table[i].pa);
500 }
501 kfree(mr->pbl_table);
502 mr->pbl_table = NULL;
503 }
504}
505
506static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
507{
508 u32 num_pbls = 0;
509 u32 idx = 0;
510 int status = 0;
511 u32 pbl_size;
512
513 do {
514 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
515 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
516 status = -EFAULT;
517 break;
518 }
519 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
520 num_pbls = num_pbls / (pbl_size / sizeof(u64));
521 idx++;
522 } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
523
524 mr->hwmr.num_pbes = num_pbes;
525 mr->hwmr.num_pbls = num_pbls;
526 mr->hwmr.pbl_size = pbl_size;
527 return status;
528}
529
530static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
531{
532 int status = 0;
533 int i;
534 u32 dma_len = mr->pbl_size;
535 struct pci_dev *pdev = dev->nic_info.pdev;
536 void *va;
537 dma_addr_t pa;
538
539 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
540 mr->num_pbls, GFP_KERNEL);
541
542 if (!mr->pbl_table)
543 return -ENOMEM;
544
545 for (i = 0; i < mr->num_pbls; i++) {
546 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
547 if (!va) {
548 ocrdma_free_mr_pbl_tbl(dev, mr);
549 status = -ENOMEM;
550 break;
551 }
552 memset(va, 0, dma_len);
553 mr->pbl_table[i].va = va;
554 mr->pbl_table[i].pa = pa;
555 }
556 return status;
557}
558
559static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
560 u32 num_pbes)
561{
562 struct ocrdma_pbe *pbe;
563 struct ib_umem_chunk *chunk;
564 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
565 struct ib_umem *umem = mr->umem;
566 int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
567
568 if (!mr->hwmr.num_pbes)
569 return;
570
571 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
572 pbe_cnt = 0;
573
574 shift = ilog2(umem->page_size);
575
576 list_for_each_entry(chunk, &umem->chunk_list, list) {
577 /* get all the dma regions from the chunk. */
578 for (i = 0; i < chunk->nmap; i++) {
579 pages = sg_dma_len(&chunk->page_list[i]) >> shift;
580 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
581 /* store the page address in pbe */
582 pbe->pa_lo =
583 cpu_to_le32(sg_dma_address
584 (&chunk->page_list[i]) +
585 (umem->page_size * pg_cnt));
586 pbe->pa_hi =
587 cpu_to_le32(upper_32_bits
588 ((sg_dma_address
589 (&chunk->page_list[i]) +
590 umem->page_size * pg_cnt)));
591 pbe_cnt += 1;
592 total_num_pbes += 1;
593 pbe++;
594
595 /* if done building pbes, issue the mbx cmd. */
596 if (total_num_pbes == num_pbes)
597 return;
598
599 /* if the given pbl is full storing the pbes,
600 * move to next pbl.
601 */
602 if (pbe_cnt ==
603 (mr->hwmr.pbl_size / sizeof(u64))) {
604 pbl_tbl++;
605 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
606 pbe_cnt = 0;
607 }
608 }
609 }
610 }
611}
612
613struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
614 u64 usr_addr, int acc, struct ib_udata *udata)
615{
616 int status = -ENOMEM;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530617 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530618 struct ocrdma_mr *mr;
619 struct ocrdma_pd *pd;
Parav Panditfe2caef2012-03-21 04:09:06 +0530620 u32 num_pbes;
621
622 pd = get_ocrdma_pd(ibpd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530623
624 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
625 return ERR_PTR(-EINVAL);
626
627 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
628 if (!mr)
629 return ERR_PTR(status);
630 mr->hwmr.dev = dev;
631 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
632 if (IS_ERR(mr->umem)) {
633 status = -EFAULT;
634 goto umem_err;
635 }
636 num_pbes = ib_umem_page_count(mr->umem);
637 status = ocrdma_get_pbl_info(mr, num_pbes);
638 if (status)
639 goto umem_err;
640
641 mr->hwmr.pbe_size = mr->umem->page_size;
642 mr->hwmr.fbo = mr->umem->offset;
643 mr->hwmr.va = usr_addr;
644 mr->hwmr.len = len;
645 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
646 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
647 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
648 mr->hwmr.local_rd = 1;
649 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
650 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
651 if (status)
652 goto umem_err;
653 build_user_pbes(dev, mr, num_pbes);
654 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
655 if (status)
656 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +0530657 mr->ibmr.lkey = mr->hwmr.lkey;
658 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
659 mr->ibmr.rkey = mr->hwmr.lkey;
660
661 return &mr->ibmr;
662
663mbx_err:
664 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
665umem_err:
666 kfree(mr);
667 return ERR_PTR(status);
668}
669
670int ocrdma_dereg_mr(struct ib_mr *ib_mr)
671{
672 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
673 struct ocrdma_dev *dev = mr->hwmr.dev;
674 int status;
675
676 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
677
678 if (mr->hwmr.fr_mr == 0)
679 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
680
Parav Panditfe2caef2012-03-21 04:09:06 +0530681 /* it could be user registered memory. */
682 if (mr->umem)
683 ib_umem_release(mr->umem);
684 kfree(mr);
685 return status;
686}
687
688static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
689 struct ib_ucontext *ib_ctx)
690{
691 int status;
692 struct ocrdma_ucontext *uctx;
693 struct ocrdma_create_cq_uresp uresp;
694
Dan Carpenter63ea3742013-07-29 22:34:29 +0300695 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530696 uresp.cq_id = cq->id;
697 uresp.page_size = cq->len;
698 uresp.num_pages = 1;
699 uresp.max_hw_cqe = cq->max_hw_cqe;
700 uresp.page_addr[0] = cq->pa;
701 uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
702 uresp.db_page_size = cq->dev->nic_info.db_page_size;
703 uresp.phase_change = cq->phase_change ? 1 : 0;
704 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
705 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000706 pr_err("%s(%d) copy error cqid=0x%x.\n",
707 __func__, cq->dev->id, cq->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530708 goto err;
709 }
710 uctx = get_ocrdma_ucontext(ib_ctx);
711 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
712 if (status)
713 goto err;
714 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
715 if (status) {
716 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
717 goto err;
718 }
719 cq->ucontext = uctx;
720err:
721 return status;
722}
723
724struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
725 struct ib_ucontext *ib_ctx,
726 struct ib_udata *udata)
727{
728 struct ocrdma_cq *cq;
729 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
730 int status;
731 struct ocrdma_create_cq_ureq ureq;
732
733 if (udata) {
734 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
735 return ERR_PTR(-EFAULT);
736 } else
737 ureq.dpp_cq = 0;
738 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
739 if (!cq)
740 return ERR_PTR(-ENOMEM);
741
742 spin_lock_init(&cq->cq_lock);
743 spin_lock_init(&cq->comp_handler_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +0530744 INIT_LIST_HEAD(&cq->sq_head);
745 INIT_LIST_HEAD(&cq->rq_head);
746 cq->dev = dev;
747
748 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
749 if (status) {
750 kfree(cq);
751 return ERR_PTR(status);
752 }
753 if (ib_ctx) {
754 status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
755 if (status)
756 goto ctx_err;
757 }
758 cq->phase = OCRDMA_CQE_VALID;
759 cq->arm_needed = true;
760 dev->cq_tbl[cq->id] = cq;
761
762 return &cq->ibcq;
763
764ctx_err:
765 ocrdma_mbx_destroy_cq(dev, cq);
766 kfree(cq);
767 return ERR_PTR(status);
768}
769
770int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
771 struct ib_udata *udata)
772{
773 int status = 0;
774 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
775
776 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
777 status = -EINVAL;
778 return status;
779 }
780 ibcq->cqe = new_cnt;
781 return status;
782}
783
784int ocrdma_destroy_cq(struct ib_cq *ibcq)
785{
786 int status;
787 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
788 struct ocrdma_dev *dev = cq->dev;
789
Parav Panditfe2caef2012-03-21 04:09:06 +0530790 status = ocrdma_mbx_destroy_cq(dev, cq);
791
792 if (cq->ucontext) {
793 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);
794 ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
795 dev->nic_info.db_page_size);
796 }
797 dev->cq_tbl[cq->id] = NULL;
798
799 kfree(cq);
800 return status;
801}
802
803static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
804{
805 int status = -EINVAL;
806
807 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
808 dev->qp_tbl[qp->id] = qp;
809 status = 0;
810 }
811 return status;
812}
813
814static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
815{
816 dev->qp_tbl[qp->id] = NULL;
817}
818
819static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
820 struct ib_qp_init_attr *attrs)
821{
822 if (attrs->qp_type != IB_QPT_GSI &&
823 attrs->qp_type != IB_QPT_RC &&
824 attrs->qp_type != IB_QPT_UD) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000825 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
826 __func__, dev->id, attrs->qp_type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530827 return -EINVAL;
828 }
829 if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000830 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
831 __func__, dev->id, attrs->cap.max_send_wr);
832 pr_err("%s(%d) supported send_wr=0x%x\n",
833 __func__, dev->id, dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530834 return -EINVAL;
835 }
836 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000837 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
838 __func__, dev->id, attrs->cap.max_recv_wr);
839 pr_err("%s(%d) supported recv_wr=0x%x\n",
840 __func__, dev->id, dev->attr.max_rqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530841 return -EINVAL;
842 }
843 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000844 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
845 __func__, dev->id, attrs->cap.max_inline_data);
846 pr_err("%s(%d) supported inline data size=0x%x\n",
847 __func__, dev->id, dev->attr.max_inline_data);
Parav Panditfe2caef2012-03-21 04:09:06 +0530848 return -EINVAL;
849 }
850 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000851 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
852 __func__, dev->id, attrs->cap.max_send_sge);
853 pr_err("%s(%d) supported send_sge=0x%x\n",
854 __func__, dev->id, dev->attr.max_send_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +0530855 return -EINVAL;
856 }
857 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000858 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
859 __func__, dev->id, attrs->cap.max_recv_sge);
860 pr_err("%s(%d) supported recv_sge=0x%x\n",
861 __func__, dev->id, dev->attr.max_recv_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +0530862 return -EINVAL;
863 }
864 /* unprivileged user space cannot create special QP */
865 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000866 pr_err
Parav Panditfe2caef2012-03-21 04:09:06 +0530867 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
868 __func__, dev->id, attrs->qp_type);
869 return -EINVAL;
870 }
871 /* allow creating only one GSI type of QP */
872 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000873 pr_err("%s(%d) GSI special QPs already created.\n",
874 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530875 return -EINVAL;
876 }
877 /* verify consumer QPs are not trying to use GSI QP's CQ */
878 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
879 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
Roland Dreier9e8fa042012-07-27 13:14:44 -0700880 (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
881 (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
882 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000883 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
884 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530885 return -EINVAL;
886 }
887 }
888 return 0;
889}
890
891static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
892 struct ib_udata *udata, int dpp_offset,
893 int dpp_credit_lmt, int srq)
894{
895 int status = 0;
896 u64 usr_db;
897 struct ocrdma_create_qp_uresp uresp;
898 struct ocrdma_dev *dev = qp->dev;
899 struct ocrdma_pd *pd = qp->pd;
900
901 memset(&uresp, 0, sizeof(uresp));
902 usr_db = dev->nic_info.unmapped_db +
903 (pd->id * dev->nic_info.db_page_size);
904 uresp.qp_id = qp->id;
905 uresp.sq_dbid = qp->sq.dbid;
906 uresp.num_sq_pages = 1;
907 uresp.sq_page_size = qp->sq.len;
908 uresp.sq_page_addr[0] = qp->sq.pa;
909 uresp.num_wqe_allocated = qp->sq.max_cnt;
910 if (!srq) {
911 uresp.rq_dbid = qp->rq.dbid;
912 uresp.num_rq_pages = 1;
913 uresp.rq_page_size = qp->rq.len;
914 uresp.rq_page_addr[0] = qp->rq.pa;
915 uresp.num_rqe_allocated = qp->rq.max_cnt;
916 }
917 uresp.db_page_addr = usr_db;
918 uresp.db_page_size = dev->nic_info.db_page_size;
919 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
920 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
921 uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
922 OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
923 uresp.db_shift = (qp->id < 128) ? 24 : 16;
924 } else {
925 uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
926 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
927 uresp.db_shift = 16;
928 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530929
930 if (qp->dpp_enabled) {
931 uresp.dpp_credit = dpp_credit_lmt;
932 uresp.dpp_offset = dpp_offset;
933 }
934 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
935 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000936 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530937 goto err;
938 }
939 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
940 uresp.sq_page_size);
941 if (status)
942 goto err;
943
944 if (!srq) {
945 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
946 uresp.rq_page_size);
947 if (status)
948 goto rq_map_err;
949 }
950 return status;
951rq_map_err:
952 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
953err:
954 return status;
955}
956
957static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
958 struct ocrdma_pd *pd)
959{
960 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
961 qp->sq_db = dev->nic_info.db +
962 (pd->id * dev->nic_info.db_page_size) +
963 OCRDMA_DB_GEN2_SQ_OFFSET;
964 qp->rq_db = dev->nic_info.db +
965 (pd->id * dev->nic_info.db_page_size) +
966 ((qp->id < 128) ?
967 OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
968 } else {
969 qp->sq_db = dev->nic_info.db +
970 (pd->id * dev->nic_info.db_page_size) +
971 OCRDMA_DB_SQ_OFFSET;
972 qp->rq_db = dev->nic_info.db +
973 (pd->id * dev->nic_info.db_page_size) +
974 OCRDMA_DB_RQ_OFFSET;
975 }
976}
977
978static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
979{
980 qp->wqe_wr_id_tbl =
981 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
982 GFP_KERNEL);
983 if (qp->wqe_wr_id_tbl == NULL)
984 return -ENOMEM;
985 qp->rqe_wr_id_tbl =
986 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
987 if (qp->rqe_wr_id_tbl == NULL)
988 return -ENOMEM;
989
990 return 0;
991}
992
993static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
994 struct ocrdma_pd *pd,
995 struct ib_qp_init_attr *attrs)
996{
997 qp->pd = pd;
998 spin_lock_init(&qp->q_lock);
999 INIT_LIST_HEAD(&qp->sq_entry);
1000 INIT_LIST_HEAD(&qp->rq_entry);
1001
1002 qp->qp_type = attrs->qp_type;
1003 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1004 qp->max_inline_data = attrs->cap.max_inline_data;
1005 qp->sq.max_sges = attrs->cap.max_send_sge;
1006 qp->rq.max_sges = attrs->cap.max_recv_sge;
1007 qp->state = OCRDMA_QPS_RST;
1008}
1009
Parav Panditfe2caef2012-03-21 04:09:06 +05301010
1011static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1012 struct ib_qp_init_attr *attrs)
1013{
1014 if (attrs->qp_type == IB_QPT_GSI) {
1015 dev->gsi_qp_created = 1;
1016 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1017 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1018 }
1019}
1020
1021struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1022 struct ib_qp_init_attr *attrs,
1023 struct ib_udata *udata)
1024{
1025 int status;
1026 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1027 struct ocrdma_qp *qp;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301028 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301029 struct ocrdma_create_qp_ureq ureq;
1030 u16 dpp_credit_lmt, dpp_offset;
1031
1032 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1033 if (status)
1034 goto gen_err;
1035
1036 memset(&ureq, 0, sizeof(ureq));
1037 if (udata) {
1038 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1039 return ERR_PTR(-EFAULT);
1040 }
1041 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1042 if (!qp) {
1043 status = -ENOMEM;
1044 goto gen_err;
1045 }
1046 qp->dev = dev;
1047 ocrdma_set_qp_init_params(qp, pd, attrs);
1048
1049 mutex_lock(&dev->dev_lock);
1050 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1051 ureq.dpp_cq_id,
1052 &dpp_offset, &dpp_credit_lmt);
1053 if (status)
1054 goto mbx_err;
1055
1056 /* user space QP's wr_id table are managed in library */
1057 if (udata == NULL) {
1058 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1059 OCRDMA_QP_FAST_REG);
1060 status = ocrdma_alloc_wr_id_tbl(qp);
1061 if (status)
1062 goto map_err;
1063 }
1064
1065 status = ocrdma_add_qpn_map(dev, qp);
1066 if (status)
1067 goto map_err;
1068 ocrdma_set_qp_db(dev, qp, pd);
1069 if (udata) {
1070 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1071 dpp_credit_lmt,
1072 (attrs->srq != NULL));
1073 if (status)
1074 goto cpy_err;
1075 }
1076 ocrdma_store_gsi_qp_cq(dev, attrs);
Gottumukkala, Naresh27159f52013-06-05 08:50:46 +00001077 qp->ibqp.qp_num = qp->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301078 mutex_unlock(&dev->dev_lock);
1079 return &qp->ibqp;
1080
1081cpy_err:
1082 ocrdma_del_qpn_map(dev, qp);
1083map_err:
1084 ocrdma_mbx_destroy_qp(dev, qp);
1085mbx_err:
1086 mutex_unlock(&dev->dev_lock);
1087 kfree(qp->wqe_wr_id_tbl);
1088 kfree(qp->rqe_wr_id_tbl);
1089 kfree(qp);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001090 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301091gen_err:
1092 return ERR_PTR(status);
1093}
1094
1095int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1096 int attr_mask)
1097{
1098 int status = 0;
1099 struct ocrdma_qp *qp;
1100 struct ocrdma_dev *dev;
1101 enum ib_qp_state old_qps;
1102
1103 qp = get_ocrdma_qp(ibqp);
1104 dev = qp->dev;
1105 if (attr_mask & IB_QP_STATE)
1106 status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
1107 /* if new and previous states are same hw doesn't need to
1108 * know about it.
1109 */
1110 if (status < 0)
1111 return status;
1112 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
1113 return status;
1114}
1115
1116int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1117 int attr_mask, struct ib_udata *udata)
1118{
1119 unsigned long flags;
1120 int status = -EINVAL;
1121 struct ocrdma_qp *qp;
1122 struct ocrdma_dev *dev;
1123 enum ib_qp_state old_qps, new_qps;
1124
1125 qp = get_ocrdma_qp(ibqp);
1126 dev = qp->dev;
1127
1128 /* syncronize with multiple context trying to change, retrive qps */
1129 mutex_lock(&dev->dev_lock);
1130 /* syncronize with wqe, rqe posting and cqe processing contexts */
1131 spin_lock_irqsave(&qp->q_lock, flags);
1132 old_qps = get_ibqp_state(qp->state);
1133 if (attr_mask & IB_QP_STATE)
1134 new_qps = attr->qp_state;
1135 else
1136 new_qps = old_qps;
1137 spin_unlock_irqrestore(&qp->q_lock, flags);
1138
1139 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001140 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1141 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1142 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1143 old_qps, new_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301144 goto param_err;
1145 }
1146
1147 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1148 if (status > 0)
1149 status = 0;
1150param_err:
1151 mutex_unlock(&dev->dev_lock);
1152 return status;
1153}
1154
1155static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1156{
1157 switch (mtu) {
1158 case 256:
1159 return IB_MTU_256;
1160 case 512:
1161 return IB_MTU_512;
1162 case 1024:
1163 return IB_MTU_1024;
1164 case 2048:
1165 return IB_MTU_2048;
1166 case 4096:
1167 return IB_MTU_4096;
1168 default:
1169 return IB_MTU_1024;
1170 }
1171}
1172
1173static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1174{
1175 int ib_qp_acc_flags = 0;
1176
1177 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1178 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1179 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1180 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1181 return ib_qp_acc_flags;
1182}
1183
1184int ocrdma_query_qp(struct ib_qp *ibqp,
1185 struct ib_qp_attr *qp_attr,
1186 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1187{
1188 int status;
1189 u32 qp_state;
1190 struct ocrdma_qp_params params;
1191 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1192 struct ocrdma_dev *dev = qp->dev;
1193
1194 memset(&params, 0, sizeof(params));
1195 mutex_lock(&dev->dev_lock);
1196 status = ocrdma_mbx_query_qp(dev, qp, &params);
1197 mutex_unlock(&dev->dev_lock);
1198 if (status)
1199 goto mbx_err;
1200 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1201 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1202 qp_attr->path_mtu =
1203 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1204 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1205 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1206 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1207 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1208 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1209 qp_attr->dest_qp_num =
1210 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1211
1212 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1213 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1214 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1215 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1216 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1217 qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
1218 qp_init_attr->cap = qp_attr->cap;
1219 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1220 sizeof(params.dgid));
1221 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1222 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1223 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1224 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1225 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1226 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1227 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1228 OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
1229 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1230
1231 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1232 qp_attr->ah_attr.port_num = 1;
1233 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1234 OCRDMA_QP_PARAMS_SL_MASK) >>
1235 OCRDMA_QP_PARAMS_SL_SHIFT;
1236 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1237 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1238 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1239 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1240 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1241 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1242 qp_attr->retry_cnt =
1243 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1244 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1245 qp_attr->min_rnr_timer = 0;
1246 qp_attr->pkey_index = 0;
1247 qp_attr->port_num = 1;
1248 qp_attr->ah_attr.src_path_bits = 0;
1249 qp_attr->ah_attr.static_rate = 0;
1250 qp_attr->alt_pkey_index = 0;
1251 qp_attr->alt_port_num = 0;
1252 qp_attr->alt_timeout = 0;
1253 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1254 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1255 OCRDMA_QP_PARAMS_STATE_SHIFT;
1256 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1257 qp_attr->max_dest_rd_atomic =
1258 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1259 qp_attr->max_rd_atomic =
1260 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1261 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1262 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1263mbx_err:
1264 return status;
1265}
1266
1267static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1268{
1269 int i = idx / 32;
1270 unsigned int mask = (1 << (idx % 32));
1271
1272 if (srq->idx_bit_fields[i] & mask)
1273 srq->idx_bit_fields[i] &= ~mask;
1274 else
1275 srq->idx_bit_fields[i] |= mask;
1276}
1277
1278static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1279{
1280 int free_cnt;
1281 if (q->head >= q->tail)
1282 free_cnt = (q->max_cnt - q->head) + q->tail;
1283 else
1284 free_cnt = q->tail - q->head;
Parav Panditfe2caef2012-03-21 04:09:06 +05301285 return free_cnt;
1286}
1287
1288static int is_hw_sq_empty(struct ocrdma_qp *qp)
1289{
1290 return (qp->sq.tail == qp->sq.head &&
1291 ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
1292}
1293
1294static int is_hw_rq_empty(struct ocrdma_qp *qp)
1295{
1296 return (qp->rq.tail == qp->rq.head) ? 1 : 0;
1297}
1298
1299static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1300{
1301 return q->va + (q->head * q->entry_size);
1302}
1303
1304static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1305 u32 idx)
1306{
1307 return q->va + (idx * q->entry_size);
1308}
1309
1310static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1311{
1312 q->head = (q->head + 1) & q->max_wqe_idx;
1313}
1314
1315static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1316{
1317 q->tail = (q->tail + 1) & q->max_wqe_idx;
1318}
1319
1320/* discard the cqe for a given QP */
1321static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1322{
1323 unsigned long cq_flags;
1324 unsigned long flags;
1325 int discard_cnt = 0;
1326 u32 cur_getp, stop_getp;
1327 struct ocrdma_cqe *cqe;
1328 u32 qpn = 0;
1329
1330 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1331
1332 /* traverse through the CQEs in the hw CQ,
1333 * find the matching CQE for a given qp,
1334 * mark the matching one discarded by clearing qpn.
1335 * ring the doorbell in the poll_cq() as
1336 * we don't complete out of order cqe.
1337 */
1338
1339 cur_getp = cq->getp;
1340 /* find upto when do we reap the cq. */
1341 stop_getp = cur_getp;
1342 do {
1343 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1344 break;
1345
1346 cqe = cq->va + cur_getp;
1347 /* if (a) done reaping whole hw cq, or
1348 * (b) qp_xq becomes empty.
1349 * then exit
1350 */
1351 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1352 /* if previously discarded cqe found, skip that too. */
1353 /* check for matching qp */
1354 if (qpn == 0 || qpn != qp->id)
1355 goto skip_cqe;
1356
1357 /* mark cqe discarded so that it is not picked up later
1358 * in the poll_cq().
1359 */
1360 discard_cnt += 1;
1361 cqe->cmn.qpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301362 if (is_cqe_for_sq(cqe)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301363 ocrdma_hwq_inc_tail(&qp->sq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301364 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301365 if (qp->srq) {
1366 spin_lock_irqsave(&qp->srq->q_lock, flags);
1367 ocrdma_hwq_inc_tail(&qp->srq->rq);
1368 ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1369 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1370
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301371 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301372 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301373 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301374 }
1375skip_cqe:
1376 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1377 } while (cur_getp != stop_getp);
1378 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1379}
1380
1381static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1382{
1383 int found = false;
1384 unsigned long flags;
1385 struct ocrdma_dev *dev = qp->dev;
1386 /* sync with any active CQ poll */
1387
1388 spin_lock_irqsave(&dev->flush_q_lock, flags);
1389 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1390 if (found)
1391 list_del(&qp->sq_entry);
1392 if (!qp->srq) {
1393 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1394 if (found)
1395 list_del(&qp->rq_entry);
1396 }
1397 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1398}
1399
1400int ocrdma_destroy_qp(struct ib_qp *ibqp)
1401{
1402 int status;
1403 struct ocrdma_pd *pd;
1404 struct ocrdma_qp *qp;
1405 struct ocrdma_dev *dev;
1406 struct ib_qp_attr attrs;
1407 int attr_mask = IB_QP_STATE;
Dan Carpenterd19081e2012-05-02 09:14:47 +03001408 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05301409
1410 qp = get_ocrdma_qp(ibqp);
1411 dev = qp->dev;
1412
1413 attrs.qp_state = IB_QPS_ERR;
1414 pd = qp->pd;
1415
1416 /* change the QP state to ERROR */
1417 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1418
1419 /* ensure that CQEs for newly created QP (whose id may be same with
1420 * one which just getting destroyed are same), dont get
1421 * discarded until the old CQEs are discarded.
1422 */
1423 mutex_lock(&dev->dev_lock);
1424 status = ocrdma_mbx_destroy_qp(dev, qp);
1425
1426 /*
1427 * acquire CQ lock while destroy is in progress, in order to
1428 * protect against proessing in-flight CQEs for this QP.
1429 */
Dan Carpenterd19081e2012-05-02 09:14:47 +03001430 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301431 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001432 spin_lock(&qp->rq_cq->cq_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301433
1434 ocrdma_del_qpn_map(dev, qp);
1435
1436 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001437 spin_unlock(&qp->rq_cq->cq_lock);
1438 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301439
1440 if (!pd->uctx) {
1441 ocrdma_discard_cqes(qp, qp->sq_cq);
1442 ocrdma_discard_cqes(qp, qp->rq_cq);
1443 }
1444 mutex_unlock(&dev->dev_lock);
1445
1446 if (pd->uctx) {
1447 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);
1448 if (!qp->srq)
1449 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);
1450 }
1451
1452 ocrdma_del_flush_qp(qp);
1453
Parav Panditfe2caef2012-03-21 04:09:06 +05301454 kfree(qp->wqe_wr_id_tbl);
1455 kfree(qp->rqe_wr_id_tbl);
1456 kfree(qp);
1457 return status;
1458}
1459
1460static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
1461{
1462 int status;
1463 struct ocrdma_create_srq_uresp uresp;
1464
Dan Carpenter63ea3742013-07-29 22:34:29 +03001465 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301466 uresp.rq_dbid = srq->rq.dbid;
1467 uresp.num_rq_pages = 1;
1468 uresp.rq_page_addr[0] = srq->rq.pa;
1469 uresp.rq_page_size = srq->rq.len;
1470 uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
1471 (srq->pd->id * srq->dev->nic_info.db_page_size);
1472 uresp.db_page_size = srq->dev->nic_info.db_page_size;
1473 uresp.num_rqe_allocated = srq->rq.max_cnt;
Parav Panditfe2caef2012-03-21 04:09:06 +05301474 if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1475 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
1476 uresp.db_shift = 24;
1477 } else {
1478 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1479 uresp.db_shift = 16;
1480 }
1481
1482 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1483 if (status)
1484 return status;
1485 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1486 uresp.rq_page_size);
1487 if (status)
1488 return status;
1489 return status;
1490}
1491
1492struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1493 struct ib_srq_init_attr *init_attr,
1494 struct ib_udata *udata)
1495{
1496 int status = -ENOMEM;
1497 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301498 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301499 struct ocrdma_srq *srq;
1500
1501 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1502 return ERR_PTR(-EINVAL);
1503 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1504 return ERR_PTR(-EINVAL);
1505
1506 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1507 if (!srq)
1508 return ERR_PTR(status);
1509
1510 spin_lock_init(&srq->q_lock);
1511 srq->dev = dev;
1512 srq->pd = pd;
1513 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1514 status = ocrdma_mbx_create_srq(srq, init_attr, pd);
1515 if (status)
1516 goto err;
1517
1518 if (udata == NULL) {
1519 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1520 GFP_KERNEL);
1521 if (srq->rqe_wr_id_tbl == NULL)
1522 goto arm_err;
1523
1524 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1525 (srq->rq.max_cnt % 32 ? 1 : 0);
1526 srq->idx_bit_fields =
1527 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1528 if (srq->idx_bit_fields == NULL)
1529 goto arm_err;
1530 memset(srq->idx_bit_fields, 0xff,
1531 srq->bit_fields_len * sizeof(u32));
1532 }
1533
1534 if (init_attr->attr.srq_limit) {
1535 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1536 if (status)
1537 goto arm_err;
1538 }
1539
Parav Panditfe2caef2012-03-21 04:09:06 +05301540 if (udata) {
1541 status = ocrdma_copy_srq_uresp(srq, udata);
1542 if (status)
1543 goto arm_err;
1544 }
1545
Parav Panditfe2caef2012-03-21 04:09:06 +05301546 return &srq->ibsrq;
1547
1548arm_err:
1549 ocrdma_mbx_destroy_srq(dev, srq);
1550err:
1551 kfree(srq->rqe_wr_id_tbl);
1552 kfree(srq->idx_bit_fields);
1553 kfree(srq);
1554 return ERR_PTR(status);
1555}
1556
1557int ocrdma_modify_srq(struct ib_srq *ibsrq,
1558 struct ib_srq_attr *srq_attr,
1559 enum ib_srq_attr_mask srq_attr_mask,
1560 struct ib_udata *udata)
1561{
1562 int status = 0;
1563 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301564
1565 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301566 if (srq_attr_mask & IB_SRQ_MAX_WR)
1567 status = -EINVAL;
1568 else
1569 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1570 return status;
1571}
1572
1573int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1574{
1575 int status;
1576 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301577
1578 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301579 status = ocrdma_mbx_query_srq(srq, srq_attr);
1580 return status;
1581}
1582
1583int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1584{
1585 int status;
1586 struct ocrdma_srq *srq;
1587 struct ocrdma_dev *dev;
1588
1589 srq = get_ocrdma_srq(ibsrq);
1590 dev = srq->dev;
Parav Panditfe2caef2012-03-21 04:09:06 +05301591
1592 status = ocrdma_mbx_destroy_srq(dev, srq);
1593
1594 if (srq->pd->uctx)
1595 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
1596
Parav Panditfe2caef2012-03-21 04:09:06 +05301597 kfree(srq->idx_bit_fields);
1598 kfree(srq->rqe_wr_id_tbl);
1599 kfree(srq);
1600 return status;
1601}
1602
1603/* unprivileged verbs and their support functions. */
1604static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1605 struct ocrdma_hdr_wqe *hdr,
1606 struct ib_send_wr *wr)
1607{
1608 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1609 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1610 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1611
1612 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1613 if (qp->qp_type == IB_QPT_GSI)
1614 ud_hdr->qkey = qp->qkey;
1615 else
1616 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1617 ud_hdr->rsvd_ahid = ah->id;
1618}
1619
1620static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1621 struct ocrdma_sge *sge, int num_sge,
1622 struct ib_sge *sg_list)
1623{
1624 int i;
1625
1626 for (i = 0; i < num_sge; i++) {
1627 sge[i].lrkey = sg_list[i].lkey;
1628 sge[i].addr_lo = sg_list[i].addr;
1629 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1630 sge[i].len = sg_list[i].length;
1631 hdr->total_len += sg_list[i].length;
1632 }
1633 if (num_sge == 0)
1634 memset(sge, 0, sizeof(*sge));
1635}
1636
1637static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1638 struct ocrdma_hdr_wqe *hdr,
1639 struct ocrdma_sge *sge,
1640 struct ib_send_wr *wr, u32 wqe_size)
1641{
1642 if (wr->send_flags & IB_SEND_INLINE) {
1643 if (wr->sg_list[0].length > qp->max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001644 pr_err("%s() supported_len=0x%x,\n"
1645 " unspported len req=0x%x\n", __func__,
1646 qp->max_inline_data, wr->sg_list[0].length);
Parav Panditfe2caef2012-03-21 04:09:06 +05301647 return -EINVAL;
1648 }
1649 memcpy(sge,
1650 (void *)(unsigned long)wr->sg_list[0].addr,
1651 wr->sg_list[0].length);
1652 hdr->total_len = wr->sg_list[0].length;
1653 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1654 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1655 } else {
1656 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1657 if (wr->num_sge)
1658 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1659 else
1660 wqe_size += sizeof(struct ocrdma_sge);
1661 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1662 }
1663 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1664 return 0;
1665}
1666
1667static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1668 struct ib_send_wr *wr)
1669{
1670 int status;
1671 struct ocrdma_sge *sge;
1672 u32 wqe_size = sizeof(*hdr);
1673
1674 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1675 ocrdma_build_ud_hdr(qp, hdr, wr);
1676 sge = (struct ocrdma_sge *)(hdr + 2);
1677 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301678 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301679 sge = (struct ocrdma_sge *)(hdr + 1);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301680 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301681
1682 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1683 return status;
1684}
1685
1686static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1687 struct ib_send_wr *wr)
1688{
1689 int status;
1690 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1691 struct ocrdma_sge *sge = ext_rw + 1;
1692 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1693
1694 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1695 if (status)
1696 return status;
1697 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1698 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1699 ext_rw->lrkey = wr->wr.rdma.rkey;
1700 ext_rw->len = hdr->total_len;
1701 return 0;
1702}
1703
1704static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1705 struct ib_send_wr *wr)
1706{
1707 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1708 struct ocrdma_sge *sge = ext_rw + 1;
1709 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1710 sizeof(struct ocrdma_hdr_wqe);
1711
1712 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1713 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1714 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1715 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1716
1717 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1718 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1719 ext_rw->lrkey = wr->wr.rdma.rkey;
1720 ext_rw->len = hdr->total_len;
1721}
1722
1723static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
1724{
1725 u32 val = qp->sq.dbid | (1 << 16);
1726
1727 iowrite32(val, qp->sq_db);
1728}
1729
1730int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1731 struct ib_send_wr **bad_wr)
1732{
1733 int status = 0;
1734 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1735 struct ocrdma_hdr_wqe *hdr;
1736 unsigned long flags;
1737
1738 spin_lock_irqsave(&qp->q_lock, flags);
1739 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
1740 spin_unlock_irqrestore(&qp->q_lock, flags);
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00001741 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301742 return -EINVAL;
1743 }
1744
1745 while (wr) {
1746 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
1747 wr->num_sge > qp->sq.max_sges) {
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00001748 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301749 status = -ENOMEM;
1750 break;
1751 }
1752 hdr = ocrdma_hwq_head(&qp->sq);
1753 hdr->cw = 0;
1754 if (wr->send_flags & IB_SEND_SIGNALED)
1755 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1756 if (wr->send_flags & IB_SEND_FENCE)
1757 hdr->cw |=
1758 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
1759 if (wr->send_flags & IB_SEND_SOLICITED)
1760 hdr->cw |=
1761 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
1762 hdr->total_len = 0;
1763 switch (wr->opcode) {
1764 case IB_WR_SEND_WITH_IMM:
1765 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1766 hdr->immdt = ntohl(wr->ex.imm_data);
1767 case IB_WR_SEND:
1768 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1769 ocrdma_build_send(qp, hdr, wr);
1770 break;
1771 case IB_WR_SEND_WITH_INV:
1772 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1773 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1774 hdr->lkey = wr->ex.invalidate_rkey;
1775 status = ocrdma_build_send(qp, hdr, wr);
1776 break;
1777 case IB_WR_RDMA_WRITE_WITH_IMM:
1778 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1779 hdr->immdt = ntohl(wr->ex.imm_data);
1780 case IB_WR_RDMA_WRITE:
1781 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
1782 status = ocrdma_build_write(qp, hdr, wr);
1783 break;
1784 case IB_WR_RDMA_READ_WITH_INV:
1785 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1786 case IB_WR_RDMA_READ:
1787 ocrdma_build_read(qp, hdr, wr);
1788 break;
1789 case IB_WR_LOCAL_INV:
1790 hdr->cw |=
1791 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
1792 hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
1793 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
1794 hdr->lkey = wr->ex.invalidate_rkey;
1795 break;
1796 default:
1797 status = -EINVAL;
1798 break;
1799 }
1800 if (status) {
1801 *bad_wr = wr;
1802 break;
1803 }
1804 if (wr->send_flags & IB_SEND_SIGNALED)
1805 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
1806 else
1807 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
1808 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
1809 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
1810 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
1811 /* make sure wqe is written before adapter can access it */
1812 wmb();
1813 /* inform hw to start processing it */
1814 ocrdma_ring_sq_db(qp);
1815
1816 /* update pointer, counter for next wr */
1817 ocrdma_hwq_inc_head(&qp->sq);
1818 wr = wr->next;
1819 }
1820 spin_unlock_irqrestore(&qp->q_lock, flags);
1821 return status;
1822}
1823
1824static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
1825{
Naresh Gottumukkaladf176ea2013-06-10 04:42:41 +00001826 u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301827
1828 iowrite32(val, qp->rq_db);
1829}
1830
1831static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
1832 u16 tag)
1833{
1834 u32 wqe_size = 0;
1835 struct ocrdma_sge *sge;
1836 if (wr->num_sge)
1837 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
1838 else
1839 wqe_size = sizeof(*sge) + sizeof(*rqe);
1840
1841 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
1842 OCRDMA_WQE_SIZE_SHIFT);
1843 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1844 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1845 rqe->total_len = 0;
1846 rqe->rsvd_tag = tag;
1847 sge = (struct ocrdma_sge *)(rqe + 1);
1848 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
1849 ocrdma_cpu_to_le32(rqe, wqe_size);
1850}
1851
1852int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1853 struct ib_recv_wr **bad_wr)
1854{
1855 int status = 0;
1856 unsigned long flags;
1857 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1858 struct ocrdma_hdr_wqe *rqe;
1859
1860 spin_lock_irqsave(&qp->q_lock, flags);
1861 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
1862 spin_unlock_irqrestore(&qp->q_lock, flags);
1863 *bad_wr = wr;
1864 return -EINVAL;
1865 }
1866 while (wr) {
1867 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
1868 wr->num_sge > qp->rq.max_sges) {
1869 *bad_wr = wr;
1870 status = -ENOMEM;
1871 break;
1872 }
1873 rqe = ocrdma_hwq_head(&qp->rq);
1874 ocrdma_build_rqe(rqe, wr, 0);
1875
1876 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
1877 /* make sure rqe is written before adapter can access it */
1878 wmb();
1879
1880 /* inform hw to start processing it */
1881 ocrdma_ring_rq_db(qp);
1882
1883 /* update pointer, counter for next wr */
1884 ocrdma_hwq_inc_head(&qp->rq);
1885 wr = wr->next;
1886 }
1887 spin_unlock_irqrestore(&qp->q_lock, flags);
1888 return status;
1889}
1890
1891/* cqe for srq's rqe can potentially arrive out of order.
1892 * index gives the entry in the shadow table where to store
1893 * the wr_id. tag/index is returned in cqe to reference back
1894 * for a given rqe.
1895 */
1896static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
1897{
1898 int row = 0;
1899 int indx = 0;
1900
1901 for (row = 0; row < srq->bit_fields_len; row++) {
1902 if (srq->idx_bit_fields[row]) {
1903 indx = ffs(srq->idx_bit_fields[row]);
1904 indx = (row * 32) + (indx - 1);
1905 if (indx >= srq->rq.max_cnt)
1906 BUG();
1907 ocrdma_srq_toggle_bit(srq, indx);
1908 break;
1909 }
1910 }
1911
1912 if (row == srq->bit_fields_len)
1913 BUG();
1914 return indx;
1915}
1916
1917static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
1918{
1919 u32 val = srq->rq.dbid | (1 << 16);
1920
1921 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
1922}
1923
1924int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1925 struct ib_recv_wr **bad_wr)
1926{
1927 int status = 0;
1928 unsigned long flags;
1929 struct ocrdma_srq *srq;
1930 struct ocrdma_hdr_wqe *rqe;
1931 u16 tag;
1932
1933 srq = get_ocrdma_srq(ibsrq);
1934
1935 spin_lock_irqsave(&srq->q_lock, flags);
1936 while (wr) {
1937 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
1938 wr->num_sge > srq->rq.max_sges) {
1939 status = -ENOMEM;
1940 *bad_wr = wr;
1941 break;
1942 }
1943 tag = ocrdma_srq_get_idx(srq);
1944 rqe = ocrdma_hwq_head(&srq->rq);
1945 ocrdma_build_rqe(rqe, wr, tag);
1946
1947 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
1948 /* make sure rqe is written before adapter can perform DMA */
1949 wmb();
1950 /* inform hw to start processing it */
1951 ocrdma_ring_srq_db(srq);
1952 /* update pointer, counter for next wr */
1953 ocrdma_hwq_inc_head(&srq->rq);
1954 wr = wr->next;
1955 }
1956 spin_unlock_irqrestore(&srq->q_lock, flags);
1957 return status;
1958}
1959
1960static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
1961{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301962 enum ib_wc_status ibwc_status;
Parav Panditfe2caef2012-03-21 04:09:06 +05301963
1964 switch (status) {
1965 case OCRDMA_CQE_GENERAL_ERR:
1966 ibwc_status = IB_WC_GENERAL_ERR;
1967 break;
1968 case OCRDMA_CQE_LOC_LEN_ERR:
1969 ibwc_status = IB_WC_LOC_LEN_ERR;
1970 break;
1971 case OCRDMA_CQE_LOC_QP_OP_ERR:
1972 ibwc_status = IB_WC_LOC_QP_OP_ERR;
1973 break;
1974 case OCRDMA_CQE_LOC_EEC_OP_ERR:
1975 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
1976 break;
1977 case OCRDMA_CQE_LOC_PROT_ERR:
1978 ibwc_status = IB_WC_LOC_PROT_ERR;
1979 break;
1980 case OCRDMA_CQE_WR_FLUSH_ERR:
1981 ibwc_status = IB_WC_WR_FLUSH_ERR;
1982 break;
1983 case OCRDMA_CQE_MW_BIND_ERR:
1984 ibwc_status = IB_WC_MW_BIND_ERR;
1985 break;
1986 case OCRDMA_CQE_BAD_RESP_ERR:
1987 ibwc_status = IB_WC_BAD_RESP_ERR;
1988 break;
1989 case OCRDMA_CQE_LOC_ACCESS_ERR:
1990 ibwc_status = IB_WC_LOC_ACCESS_ERR;
1991 break;
1992 case OCRDMA_CQE_REM_INV_REQ_ERR:
1993 ibwc_status = IB_WC_REM_INV_REQ_ERR;
1994 break;
1995 case OCRDMA_CQE_REM_ACCESS_ERR:
1996 ibwc_status = IB_WC_REM_ACCESS_ERR;
1997 break;
1998 case OCRDMA_CQE_REM_OP_ERR:
1999 ibwc_status = IB_WC_REM_OP_ERR;
2000 break;
2001 case OCRDMA_CQE_RETRY_EXC_ERR:
2002 ibwc_status = IB_WC_RETRY_EXC_ERR;
2003 break;
2004 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2005 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2006 break;
2007 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2008 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2009 break;
2010 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2011 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2012 break;
2013 case OCRDMA_CQE_REM_ABORT_ERR:
2014 ibwc_status = IB_WC_REM_ABORT_ERR;
2015 break;
2016 case OCRDMA_CQE_INV_EECN_ERR:
2017 ibwc_status = IB_WC_INV_EECN_ERR;
2018 break;
2019 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2020 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2021 break;
2022 case OCRDMA_CQE_FATAL_ERR:
2023 ibwc_status = IB_WC_FATAL_ERR;
2024 break;
2025 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2026 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2027 break;
2028 default:
2029 ibwc_status = IB_WC_GENERAL_ERR;
2030 break;
2031 };
2032 return ibwc_status;
2033}
2034
2035static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2036 u32 wqe_idx)
2037{
2038 struct ocrdma_hdr_wqe *hdr;
2039 struct ocrdma_sge *rw;
2040 int opcode;
2041
2042 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2043
2044 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2045 /* Undo the hdr->cw swap */
2046 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2047 switch (opcode) {
2048 case OCRDMA_WRITE:
2049 ibwc->opcode = IB_WC_RDMA_WRITE;
2050 break;
2051 case OCRDMA_READ:
2052 rw = (struct ocrdma_sge *)(hdr + 1);
2053 ibwc->opcode = IB_WC_RDMA_READ;
2054 ibwc->byte_len = rw->len;
2055 break;
2056 case OCRDMA_SEND:
2057 ibwc->opcode = IB_WC_SEND;
2058 break;
2059 case OCRDMA_LKEY_INV:
2060 ibwc->opcode = IB_WC_LOCAL_INV;
2061 break;
2062 default:
2063 ibwc->status = IB_WC_GENERAL_ERR;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002064 pr_err("%s() invalid opcode received = 0x%x\n",
2065 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +05302066 break;
2067 };
2068}
2069
2070static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2071 struct ocrdma_cqe *cqe)
2072{
2073 if (is_cqe_for_sq(cqe)) {
2074 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2075 cqe->flags_status_srcqpn) &
2076 ~OCRDMA_CQE_STATUS_MASK);
2077 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2078 cqe->flags_status_srcqpn) |
2079 (OCRDMA_CQE_WR_FLUSH_ERR <<
2080 OCRDMA_CQE_STATUS_SHIFT));
2081 } else {
2082 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2083 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2084 cqe->flags_status_srcqpn) &
2085 ~OCRDMA_CQE_UD_STATUS_MASK);
2086 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2087 cqe->flags_status_srcqpn) |
2088 (OCRDMA_CQE_WR_FLUSH_ERR <<
2089 OCRDMA_CQE_UD_STATUS_SHIFT));
2090 } else {
2091 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2092 cqe->flags_status_srcqpn) &
2093 ~OCRDMA_CQE_STATUS_MASK);
2094 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2095 cqe->flags_status_srcqpn) |
2096 (OCRDMA_CQE_WR_FLUSH_ERR <<
2097 OCRDMA_CQE_STATUS_SHIFT));
2098 }
2099 }
2100}
2101
2102static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2103 struct ocrdma_qp *qp, int status)
2104{
2105 bool expand = false;
2106
2107 ibwc->byte_len = 0;
2108 ibwc->qp = &qp->ibqp;
2109 ibwc->status = ocrdma_to_ibwc_err(status);
2110
2111 ocrdma_flush_qp(qp);
2112 ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
2113
2114 /* if wqe/rqe pending for which cqe needs to be returned,
2115 * trigger inflating it.
2116 */
2117 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2118 expand = true;
2119 ocrdma_set_cqe_status_flushed(qp, cqe);
2120 }
2121 return expand;
2122}
2123
2124static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2125 struct ocrdma_qp *qp, int status)
2126{
2127 ibwc->opcode = IB_WC_RECV;
2128 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2129 ocrdma_hwq_inc_tail(&qp->rq);
2130
2131 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2132}
2133
2134static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2135 struct ocrdma_qp *qp, int status)
2136{
2137 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2138 ocrdma_hwq_inc_tail(&qp->sq);
2139
2140 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2141}
2142
2143
2144static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2145 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2146 bool *polled, bool *stop)
2147{
2148 bool expand;
2149 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2150 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2151
2152 /* when hw sq is empty, but rq is not empty, so we continue
2153 * to keep the cqe in order to get the cq event again.
2154 */
2155 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2156 /* when cq for rq and sq is same, it is safe to return
2157 * flush cqe for RQEs.
2158 */
2159 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2160 *polled = true;
2161 status = OCRDMA_CQE_WR_FLUSH_ERR;
2162 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2163 } else {
2164 /* stop processing further cqe as this cqe is used for
2165 * triggering cq event on buddy cq of RQ.
2166 * When QP is destroyed, this cqe will be removed
2167 * from the cq's hardware q.
2168 */
2169 *polled = false;
2170 *stop = true;
2171 expand = false;
2172 }
2173 } else {
2174 *polled = true;
2175 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2176 }
2177 return expand;
2178}
2179
2180static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2181 struct ocrdma_cqe *cqe,
2182 struct ib_wc *ibwc, bool *polled)
2183{
2184 bool expand = false;
2185 int tail = qp->sq.tail;
2186 u32 wqe_idx;
2187
2188 if (!qp->wqe_wr_id_tbl[tail].signaled) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302189 *polled = false; /* WC cannot be consumed yet */
2190 } else {
2191 ibwc->status = IB_WC_SUCCESS;
2192 ibwc->wc_flags = 0;
2193 ibwc->qp = &qp->ibqp;
2194 ocrdma_update_wc(qp, ibwc, tail);
2195 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302196 }
Parav Panditae3bca92012-08-17 14:45:33 +00002197 wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
2198 if (tail != wqe_idx)
2199 expand = true; /* Coalesced CQE can't be consumed yet */
2200
Parav Panditfe2caef2012-03-21 04:09:06 +05302201 ocrdma_hwq_inc_tail(&qp->sq);
2202 return expand;
2203}
2204
2205static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2206 struct ib_wc *ibwc, bool *polled, bool *stop)
2207{
2208 int status;
2209 bool expand;
2210
2211 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2212 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2213
2214 if (status == OCRDMA_CQE_SUCCESS)
2215 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2216 else
2217 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2218 return expand;
2219}
2220
2221static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2222{
2223 int status;
2224
2225 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2226 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2227 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2228 OCRDMA_CQE_SRCQP_MASK;
2229 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2230 OCRDMA_CQE_PKEY_MASK;
2231 ibwc->wc_flags = IB_WC_GRH;
2232 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2233 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2234 return status;
2235}
2236
2237static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2238 struct ocrdma_cqe *cqe,
2239 struct ocrdma_qp *qp)
2240{
2241 unsigned long flags;
2242 struct ocrdma_srq *srq;
2243 u32 wqe_idx;
2244
2245 srq = get_ocrdma_srq(qp->ibqp.srq);
2246 wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;
2247 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2248 spin_lock_irqsave(&srq->q_lock, flags);
2249 ocrdma_srq_toggle_bit(srq, wqe_idx);
2250 spin_unlock_irqrestore(&srq->q_lock, flags);
2251 ocrdma_hwq_inc_tail(&srq->rq);
2252}
2253
2254static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2255 struct ib_wc *ibwc, bool *polled, bool *stop,
2256 int status)
2257{
2258 bool expand;
2259
2260 /* when hw_rq is empty, but wq is not empty, so continue
2261 * to keep the cqe to get the cq event again.
2262 */
2263 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2264 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2265 *polled = true;
2266 status = OCRDMA_CQE_WR_FLUSH_ERR;
2267 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2268 } else {
2269 *polled = false;
2270 *stop = true;
2271 expand = false;
2272 }
Parav Pandita3698a92012-06-11 16:39:20 +05302273 } else {
2274 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302275 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
Parav Pandita3698a92012-06-11 16:39:20 +05302276 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302277 return expand;
2278}
2279
2280static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2281 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2282{
2283 ibwc->opcode = IB_WC_RECV;
2284 ibwc->qp = &qp->ibqp;
2285 ibwc->status = IB_WC_SUCCESS;
2286
2287 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2288 ocrdma_update_ud_rcqe(ibwc, cqe);
2289 else
2290 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2291
2292 if (is_cqe_imm(cqe)) {
2293 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2294 ibwc->wc_flags |= IB_WC_WITH_IMM;
2295 } else if (is_cqe_wr_imm(cqe)) {
2296 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2297 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2298 ibwc->wc_flags |= IB_WC_WITH_IMM;
2299 } else if (is_cqe_invalidated(cqe)) {
2300 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2301 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2302 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302303 if (qp->ibqp.srq) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302304 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302305 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302306 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2307 ocrdma_hwq_inc_tail(&qp->rq);
2308 }
2309}
2310
2311static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2312 struct ib_wc *ibwc, bool *polled, bool *stop)
2313{
2314 int status;
2315 bool expand = false;
2316
2317 ibwc->wc_flags = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302318 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302319 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2320 OCRDMA_CQE_UD_STATUS_MASK) >>
2321 OCRDMA_CQE_UD_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302322 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302323 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2324 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302325 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302326
2327 if (status == OCRDMA_CQE_SUCCESS) {
2328 *polled = true;
2329 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2330 } else {
2331 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2332 status);
2333 }
2334 return expand;
2335}
2336
2337static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2338 u16 cur_getp)
2339{
2340 if (cq->phase_change) {
2341 if (cur_getp == 0)
2342 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302343 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302344 /* clear valid bit */
2345 cqe->flags_status_srcqpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302346 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302347}
2348
2349static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2350 struct ib_wc *ibwc)
2351{
2352 u16 qpn = 0;
2353 int i = 0;
2354 bool expand = false;
2355 int polled_hw_cqes = 0;
2356 struct ocrdma_qp *qp = NULL;
2357 struct ocrdma_dev *dev = cq->dev;
2358 struct ocrdma_cqe *cqe;
2359 u16 cur_getp; bool polled = false; bool stop = false;
2360
2361 cur_getp = cq->getp;
2362 while (num_entries) {
2363 cqe = cq->va + cur_getp;
2364 /* check whether valid cqe or not */
2365 if (!is_cqe_valid(cq, cqe))
2366 break;
2367 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2368 /* ignore discarded cqe */
2369 if (qpn == 0)
2370 goto skip_cqe;
2371 qp = dev->qp_tbl[qpn];
2372 BUG_ON(qp == NULL);
2373
2374 if (is_cqe_for_sq(cqe)) {
2375 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2376 &stop);
2377 } else {
2378 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2379 &stop);
2380 }
2381 if (expand)
2382 goto expand_cqe;
2383 if (stop)
2384 goto stop_cqe;
2385 /* clear qpn to avoid duplicate processing by discard_cqe() */
2386 cqe->cmn.qpn = 0;
2387skip_cqe:
2388 polled_hw_cqes += 1;
2389 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2390 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2391expand_cqe:
2392 if (polled) {
2393 num_entries -= 1;
2394 i += 1;
2395 ibwc = ibwc + 1;
2396 polled = false;
2397 }
2398 }
2399stop_cqe:
2400 cq->getp = cur_getp;
2401 if (polled_hw_cqes || expand || stop) {
2402 ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
2403 polled_hw_cqes);
2404 }
2405 return i;
2406}
2407
2408/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2409static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2410 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2411{
2412 int err_cqes = 0;
2413
2414 while (num_entries) {
2415 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2416 break;
2417 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2418 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2419 ocrdma_hwq_inc_tail(&qp->sq);
2420 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2421 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2422 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302423 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302424 return err_cqes;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302425 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302426 ibwc->byte_len = 0;
2427 ibwc->status = IB_WC_WR_FLUSH_ERR;
2428 ibwc = ibwc + 1;
2429 err_cqes += 1;
2430 num_entries -= 1;
2431 }
2432 return err_cqes;
2433}
2434
2435int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2436{
2437 int cqes_to_poll = num_entries;
2438 struct ocrdma_cq *cq = NULL;
2439 unsigned long flags;
2440 struct ocrdma_dev *dev;
2441 int num_os_cqe = 0, err_cqes = 0;
2442 struct ocrdma_qp *qp;
2443
2444 cq = get_ocrdma_cq(ibcq);
2445 dev = cq->dev;
2446
2447 /* poll cqes from adapter CQ */
2448 spin_lock_irqsave(&cq->cq_lock, flags);
2449 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2450 spin_unlock_irqrestore(&cq->cq_lock, flags);
2451 cqes_to_poll -= num_os_cqe;
2452
2453 if (cqes_to_poll) {
2454 wc = wc + num_os_cqe;
2455 /* adapter returns single error cqe when qp moves to
2456 * error state. So insert error cqes with wc_status as
2457 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2458 * respectively which uses this CQ.
2459 */
2460 spin_lock_irqsave(&dev->flush_q_lock, flags);
2461 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2462 if (cqes_to_poll == 0)
2463 break;
2464 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2465 cqes_to_poll -= err_cqes;
2466 num_os_cqe += err_cqes;
2467 wc = wc + err_cqes;
2468 }
2469 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2470 }
2471 return num_os_cqe;
2472}
2473
2474int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2475{
2476 struct ocrdma_cq *cq;
2477 unsigned long flags;
2478 struct ocrdma_dev *dev;
2479 u16 cq_id;
2480 u16 cur_getp;
2481 struct ocrdma_cqe *cqe;
2482
2483 cq = get_ocrdma_cq(ibcq);
2484 cq_id = cq->id;
2485 dev = cq->dev;
2486
2487 spin_lock_irqsave(&cq->cq_lock, flags);
2488 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2489 cq->armed = true;
2490 if (cq_flags & IB_CQ_SOLICITED)
2491 cq->solicited = true;
2492
2493 cur_getp = cq->getp;
2494 cqe = cq->va + cur_getp;
2495
2496 /* check whether any valid cqe exist or not, if not then safe to
2497 * arm. If cqe is not yet consumed, then let it get consumed and then
2498 * we arm it to avoid false interrupts.
2499 */
2500 if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
2501 cq->arm_needed = false;
2502 ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
2503 }
2504 spin_unlock_irqrestore(&cq->cq_lock, flags);
2505 return 0;
2506}