blob: ffa5511baf3cad6c04275ccbf3d17fa92b76f1a9 [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
Dan Carpenter7b33dc22012-06-14 21:36:09 +030056 if (index >= OCRDMA_MAX_SGID)
Parav Panditfe2caef2012-03-21 04:09:06 +053057 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
64int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65{
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 attr->max_mr_size = ~0ull;
73 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
76 attr->hw_ver = 0;
77 attr->max_qp = dev->attr.max_qp;
78 attr->max_ah = dev->attr.max_qp;
79 attr->max_qp_wr = dev->attr.max_wqe;
80
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
85 IB_DEVICE_LOCAL_DMA_LKEY;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +053086 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +053087 attr->max_sge_rd = dev->attr.max_rdma_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +053088 attr->max_cq = dev->attr.max_cq;
89 attr->max_cqe = dev->attr.max_cqe;
90 attr->max_mr = dev->attr.max_mr;
91 attr->max_mw = 0;
92 attr->max_pd = dev->attr.max_pd;
93 attr->atomic_cap = 0;
94 attr->max_fmr = 0;
95 attr->max_map_per_fmr = 0;
96 attr->max_qp_rd_atom =
97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +053099 attr->max_srq = dev->attr.max_srq;
Roland Dreierd1e09eb2012-07-07 15:13:47 -0700100 attr->max_srq_sge = dev->attr.max_srq_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +0530101 attr->max_srq_wr = dev->attr.max_rqe;
102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
103 attr->max_fast_reg_page_list_len = 0;
104 attr->max_pkeys = 1;
105 return 0;
106}
107
108int ocrdma_query_port(struct ib_device *ibdev,
109 u8 port, struct ib_port_attr *props)
110{
111 enum ib_port_state port_state;
112 struct ocrdma_dev *dev;
113 struct net_device *netdev;
114
115 dev = get_ocrdma_dev(ibdev);
116 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000117 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
118 dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530119 return -EINVAL;
120 }
121 netdev = dev->nic_info.netdev;
122 if (netif_running(netdev) && netif_oper_up(netdev)) {
123 port_state = IB_PORT_ACTIVE;
124 props->phys_state = 5;
125 } else {
126 port_state = IB_PORT_DOWN;
127 props->phys_state = 3;
128 }
129 props->max_mtu = IB_MTU_4096;
130 props->active_mtu = iboe_get_mtu(netdev->mtu);
131 props->lid = 0;
132 props->lmc = 0;
133 props->sm_lid = 0;
134 props->sm_sl = 0;
135 props->state = port_state;
136 props->port_cap_flags =
137 IB_PORT_CM_SUP |
138 IB_PORT_REINIT_SUP |
139 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
140 props->gid_tbl_len = OCRDMA_MAX_SGID;
141 props->pkey_tbl_len = 1;
142 props->bad_pkey_cntr = 0;
143 props->qkey_viol_cntr = 0;
144 props->active_width = IB_WIDTH_1X;
145 props->active_speed = 4;
146 props->max_msg_sz = 0x80000000;
147 props->max_vl_num = 4;
148 return 0;
149}
150
151int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
152 struct ib_port_modify *props)
153{
154 struct ocrdma_dev *dev;
155
156 dev = get_ocrdma_dev(ibdev);
157 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000158 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530159 return -EINVAL;
160 }
161 return 0;
162}
163
164static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
165 unsigned long len)
166{
167 struct ocrdma_mm *mm;
168
169 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
170 if (mm == NULL)
171 return -ENOMEM;
172 mm->key.phy_addr = phy_addr;
173 mm->key.len = len;
174 INIT_LIST_HEAD(&mm->entry);
175
176 mutex_lock(&uctx->mm_list_lock);
177 list_add_tail(&mm->entry, &uctx->mm_head);
178 mutex_unlock(&uctx->mm_list_lock);
179 return 0;
180}
181
182static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
183 unsigned long len)
184{
185 struct ocrdma_mm *mm, *tmp;
186
187 mutex_lock(&uctx->mm_list_lock);
188 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530189 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530190 continue;
191
192 list_del(&mm->entry);
193 kfree(mm);
194 break;
195 }
196 mutex_unlock(&uctx->mm_list_lock);
197}
198
199static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
200 unsigned long len)
201{
202 bool found = false;
203 struct ocrdma_mm *mm;
204
205 mutex_lock(&uctx->mm_list_lock);
206 list_for_each_entry(mm, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530207 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530208 continue;
209
210 found = true;
211 break;
212 }
213 mutex_unlock(&uctx->mm_list_lock);
214 return found;
215}
216
217struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
218 struct ib_udata *udata)
219{
220 int status;
221 struct ocrdma_ucontext *ctx;
222 struct ocrdma_alloc_ucontext_resp resp;
223 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
224 struct pci_dev *pdev = dev->nic_info.pdev;
225 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
226
227 if (!udata)
228 return ERR_PTR(-EFAULT);
229 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
230 if (!ctx)
231 return ERR_PTR(-ENOMEM);
Parav Panditfe2caef2012-03-21 04:09:06 +0530232 INIT_LIST_HEAD(&ctx->mm_head);
233 mutex_init(&ctx->mm_list_lock);
234
235 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
236 &ctx->ah_tbl.pa, GFP_KERNEL);
237 if (!ctx->ah_tbl.va) {
238 kfree(ctx);
239 return ERR_PTR(-ENOMEM);
240 }
241 memset(ctx->ah_tbl.va, 0, map_len);
242 ctx->ah_tbl.len = map_len;
243
Dan Carpenter63ea3742013-07-29 22:34:29 +0300244 memset(&resp, 0, sizeof(resp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530245 resp.ah_tbl_len = ctx->ah_tbl.len;
246 resp.ah_tbl_page = ctx->ah_tbl.pa;
247
248 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
249 if (status)
250 goto map_err;
251 resp.dev_id = dev->id;
252 resp.max_inline_data = dev->attr.max_inline_data;
253 resp.wqe_size = dev->attr.wqe_size;
254 resp.rqe_size = dev->attr.rqe_size;
255 resp.dpp_wqe_size = dev->attr.wqe_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530256
257 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
258 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
259 if (status)
260 goto cpy_err;
261 return &ctx->ibucontext;
262
263cpy_err:
264 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
265map_err:
266 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
267 ctx->ah_tbl.pa);
268 kfree(ctx);
269 return ERR_PTR(status);
270}
271
272int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
273{
274 struct ocrdma_mm *mm, *tmp;
275 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530276 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
277 struct pci_dev *pdev = dev->nic_info.pdev;
Parav Panditfe2caef2012-03-21 04:09:06 +0530278
279 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
280 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
281 uctx->ah_tbl.pa);
282
283 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
284 list_del(&mm->entry);
285 kfree(mm);
286 }
287 kfree(uctx);
288 return 0;
289}
290
291int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
292{
293 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530294 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530295 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
296 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
297 unsigned long len = (vma->vm_end - vma->vm_start);
298 int status = 0;
299 bool found;
300
301 if (vma->vm_start & (PAGE_SIZE - 1))
302 return -EINVAL;
303 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
304 if (!found)
305 return -EINVAL;
306
307 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
308 dev->nic_info.db_total_size)) &&
309 (len <= dev->nic_info.db_page_size)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530310 if (vma->vm_flags & VM_READ)
311 return -EPERM;
312
313 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Parav Panditfe2caef2012-03-21 04:09:06 +0530314 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
315 len, vma->vm_page_prot);
316 } else if (dev->nic_info.dpp_unmapped_len &&
317 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
318 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
319 dev->nic_info.dpp_unmapped_len)) &&
320 (len <= dev->nic_info.dpp_unmapped_len)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530321 if (vma->vm_flags & VM_READ)
322 return -EPERM;
323
Parav Panditfe2caef2012-03-21 04:09:06 +0530324 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
325 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
326 len, vma->vm_page_prot);
327 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530328 status = remap_pfn_range(vma, vma->vm_start,
329 vma->vm_pgoff, len, vma->vm_page_prot);
330 }
331 return status;
332}
333
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530334static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
Parav Panditfe2caef2012-03-21 04:09:06 +0530335 struct ib_ucontext *ib_ctx,
336 struct ib_udata *udata)
337{
338 int status;
339 u64 db_page_addr;
Roland Dreierda496432012-04-16 11:32:17 -0700340 u64 dpp_page_addr = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530341 u32 db_page_size;
342 struct ocrdma_alloc_pd_uresp rsp;
343 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
344
Dan Carpenter63ea3742013-07-29 22:34:29 +0300345 memset(&rsp, 0, sizeof(rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530346 rsp.id = pd->id;
347 rsp.dpp_enabled = pd->dpp_enabled;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530348 db_page_addr = dev->nic_info.unmapped_db +
349 (pd->id * dev->nic_info.db_page_size);
350 db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530351
352 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
353 if (status)
354 return status;
355
356 if (pd->dpp_enabled) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530357 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530358 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530359 status = ocrdma_add_mmap(uctx, dpp_page_addr,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530360 PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530361 if (status)
362 goto dpp_map_err;
363 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
364 rsp.dpp_page_addr_lo = dpp_page_addr;
365 }
366
367 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
368 if (status)
369 goto ucopy_err;
370
371 pd->uctx = uctx;
372 return 0;
373
374ucopy_err:
Roland Dreierda496432012-04-16 11:32:17 -0700375 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530376 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530377dpp_map_err:
378 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
379 return status;
380}
381
382struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
383 struct ib_ucontext *context,
384 struct ib_udata *udata)
385{
386 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
387 struct ocrdma_pd *pd;
388 int status;
389
390 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
391 if (!pd)
392 return ERR_PTR(-ENOMEM);
Parav Panditfe2caef2012-03-21 04:09:06 +0530393 if (udata && context) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530394 pd->dpp_enabled =
395 (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY);
Parav Panditfe2caef2012-03-21 04:09:06 +0530396 pd->num_dpp_qp =
397 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
398 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530399retry:
Parav Panditfe2caef2012-03-21 04:09:06 +0530400 status = ocrdma_mbx_alloc_pd(dev, pd);
401 if (status) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530402 /* try for pd with out dpp */
403 if (pd->dpp_enabled) {
404 pd->dpp_enabled = false;
405 pd->num_dpp_qp = 0;
406 goto retry;
407 } else {
408 kfree(pd);
409 return ERR_PTR(status);
410 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530411 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530412
413 if (udata && context) {
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530414 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +0530415 if (status)
416 goto err;
417 }
418 return &pd->ibpd;
419
420err:
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530421 status = ocrdma_mbx_dealloc_pd(dev, pd);
422 kfree(pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530423 return ERR_PTR(status);
424}
425
426int ocrdma_dealloc_pd(struct ib_pd *ibpd)
427{
428 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530429 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530430 int status;
431 u64 usr_db;
432
Parav Panditfe2caef2012-03-21 04:09:06 +0530433 status = ocrdma_mbx_dealloc_pd(dev, pd);
434 if (pd->uctx) {
435 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530436 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530437 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530438 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530439 usr_db = dev->nic_info.unmapped_db +
440 (pd->id * dev->nic_info.db_page_size);
441 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
442 }
443 kfree(pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530444 return status;
445}
446
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530447static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
448 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
Parav Panditfe2caef2012-03-21 04:09:06 +0530449{
450 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530451
Parav Panditfe2caef2012-03-21 04:09:06 +0530452 mr->hwmr.fr_mr = 0;
453 mr->hwmr.local_rd = 1;
454 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
455 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
456 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
457 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
458 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
459 mr->hwmr.num_pbls = num_pbls;
460
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530461 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
462 if (status)
463 return status;
464
Parav Panditfe2caef2012-03-21 04:09:06 +0530465 mr->ibmr.lkey = mr->hwmr.lkey;
466 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
467 mr->ibmr.rkey = mr->hwmr.lkey;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530468 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530469}
470
471struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
472{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530473 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530474 struct ocrdma_mr *mr;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530475 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
476 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530477
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530478 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
479 pr_err("%s err, invalid access rights\n", __func__);
480 return ERR_PTR(-EINVAL);
481 }
482
483 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
484 if (!mr)
485 return ERR_PTR(-ENOMEM);
486
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530487 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530488 OCRDMA_ADDR_CHECK_DISABLE);
489 if (status) {
490 kfree(mr);
491 return ERR_PTR(status);
492 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530493
494 return &mr->ibmr;
495}
496
497static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
498 struct ocrdma_hw_mr *mr)
499{
500 struct pci_dev *pdev = dev->nic_info.pdev;
501 int i = 0;
502
503 if (mr->pbl_table) {
504 for (i = 0; i < mr->num_pbls; i++) {
505 if (!mr->pbl_table[i].va)
506 continue;
507 dma_free_coherent(&pdev->dev, mr->pbl_size,
508 mr->pbl_table[i].va,
509 mr->pbl_table[i].pa);
510 }
511 kfree(mr->pbl_table);
512 mr->pbl_table = NULL;
513 }
514}
515
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530516static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
517 u32 num_pbes)
Parav Panditfe2caef2012-03-21 04:09:06 +0530518{
519 u32 num_pbls = 0;
520 u32 idx = 0;
521 int status = 0;
522 u32 pbl_size;
523
524 do {
525 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
526 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
527 status = -EFAULT;
528 break;
529 }
530 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
531 num_pbls = num_pbls / (pbl_size / sizeof(u64));
532 idx++;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530533 } while (num_pbls >= dev->attr.max_num_mr_pbl);
Parav Panditfe2caef2012-03-21 04:09:06 +0530534
535 mr->hwmr.num_pbes = num_pbes;
536 mr->hwmr.num_pbls = num_pbls;
537 mr->hwmr.pbl_size = pbl_size;
538 return status;
539}
540
541static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
542{
543 int status = 0;
544 int i;
545 u32 dma_len = mr->pbl_size;
546 struct pci_dev *pdev = dev->nic_info.pdev;
547 void *va;
548 dma_addr_t pa;
549
550 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
551 mr->num_pbls, GFP_KERNEL);
552
553 if (!mr->pbl_table)
554 return -ENOMEM;
555
556 for (i = 0; i < mr->num_pbls; i++) {
557 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
558 if (!va) {
559 ocrdma_free_mr_pbl_tbl(dev, mr);
560 status = -ENOMEM;
561 break;
562 }
563 memset(va, 0, dma_len);
564 mr->pbl_table[i].va = va;
565 mr->pbl_table[i].pa = pa;
566 }
567 return status;
568}
569
570static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
571 u32 num_pbes)
572{
573 struct ocrdma_pbe *pbe;
574 struct ib_umem_chunk *chunk;
575 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
576 struct ib_umem *umem = mr->umem;
577 int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
578
579 if (!mr->hwmr.num_pbes)
580 return;
581
582 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
583 pbe_cnt = 0;
584
585 shift = ilog2(umem->page_size);
586
587 list_for_each_entry(chunk, &umem->chunk_list, list) {
588 /* get all the dma regions from the chunk. */
589 for (i = 0; i < chunk->nmap; i++) {
590 pages = sg_dma_len(&chunk->page_list[i]) >> shift;
591 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
592 /* store the page address in pbe */
593 pbe->pa_lo =
594 cpu_to_le32(sg_dma_address
595 (&chunk->page_list[i]) +
596 (umem->page_size * pg_cnt));
597 pbe->pa_hi =
598 cpu_to_le32(upper_32_bits
599 ((sg_dma_address
600 (&chunk->page_list[i]) +
601 umem->page_size * pg_cnt)));
602 pbe_cnt += 1;
603 total_num_pbes += 1;
604 pbe++;
605
606 /* if done building pbes, issue the mbx cmd. */
607 if (total_num_pbes == num_pbes)
608 return;
609
610 /* if the given pbl is full storing the pbes,
611 * move to next pbl.
612 */
613 if (pbe_cnt ==
614 (mr->hwmr.pbl_size / sizeof(u64))) {
615 pbl_tbl++;
616 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
617 pbe_cnt = 0;
618 }
619 }
620 }
621 }
622}
623
624struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
625 u64 usr_addr, int acc, struct ib_udata *udata)
626{
627 int status = -ENOMEM;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530628 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530629 struct ocrdma_mr *mr;
630 struct ocrdma_pd *pd;
Parav Panditfe2caef2012-03-21 04:09:06 +0530631 u32 num_pbes;
632
633 pd = get_ocrdma_pd(ibpd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530634
635 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
636 return ERR_PTR(-EINVAL);
637
638 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
639 if (!mr)
640 return ERR_PTR(status);
Parav Panditfe2caef2012-03-21 04:09:06 +0530641 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
642 if (IS_ERR(mr->umem)) {
643 status = -EFAULT;
644 goto umem_err;
645 }
646 num_pbes = ib_umem_page_count(mr->umem);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530647 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
Parav Panditfe2caef2012-03-21 04:09:06 +0530648 if (status)
649 goto umem_err;
650
651 mr->hwmr.pbe_size = mr->umem->page_size;
652 mr->hwmr.fbo = mr->umem->offset;
653 mr->hwmr.va = usr_addr;
654 mr->hwmr.len = len;
655 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
656 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
657 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
658 mr->hwmr.local_rd = 1;
659 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
660 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
661 if (status)
662 goto umem_err;
663 build_user_pbes(dev, mr, num_pbes);
664 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
665 if (status)
666 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +0530667 mr->ibmr.lkey = mr->hwmr.lkey;
668 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
669 mr->ibmr.rkey = mr->hwmr.lkey;
670
671 return &mr->ibmr;
672
673mbx_err:
674 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
675umem_err:
676 kfree(mr);
677 return ERR_PTR(status);
678}
679
680int ocrdma_dereg_mr(struct ib_mr *ib_mr)
681{
682 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530683 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530684 int status;
685
686 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
687
688 if (mr->hwmr.fr_mr == 0)
689 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
690
Parav Panditfe2caef2012-03-21 04:09:06 +0530691 /* it could be user registered memory. */
692 if (mr->umem)
693 ib_umem_release(mr->umem);
694 kfree(mr);
695 return status;
696}
697
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530698static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
699 struct ib_udata *udata,
Parav Panditfe2caef2012-03-21 04:09:06 +0530700 struct ib_ucontext *ib_ctx)
701{
702 int status;
703 struct ocrdma_ucontext *uctx;
704 struct ocrdma_create_cq_uresp uresp;
705
Dan Carpenter63ea3742013-07-29 22:34:29 +0300706 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530707 uresp.cq_id = cq->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530708 uresp.page_size = PAGE_ALIGN(cq->len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530709 uresp.num_pages = 1;
710 uresp.max_hw_cqe = cq->max_hw_cqe;
711 uresp.page_addr[0] = cq->pa;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530712 uresp.db_page_addr = dev->nic_info.unmapped_db;
713 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530714 uresp.phase_change = cq->phase_change ? 1 : 0;
715 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
716 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000717 pr_err("%s(%d) copy error cqid=0x%x.\n",
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530718 __func__, dev->id, cq->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530719 goto err;
720 }
721 uctx = get_ocrdma_ucontext(ib_ctx);
722 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
723 if (status)
724 goto err;
725 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
726 if (status) {
727 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
728 goto err;
729 }
730 cq->ucontext = uctx;
731err:
732 return status;
733}
734
735struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
736 struct ib_ucontext *ib_ctx,
737 struct ib_udata *udata)
738{
739 struct ocrdma_cq *cq;
740 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
741 int status;
742 struct ocrdma_create_cq_ureq ureq;
743
744 if (udata) {
745 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
746 return ERR_PTR(-EFAULT);
747 } else
748 ureq.dpp_cq = 0;
749 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
750 if (!cq)
751 return ERR_PTR(-ENOMEM);
752
753 spin_lock_init(&cq->cq_lock);
754 spin_lock_init(&cq->comp_handler_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +0530755 INIT_LIST_HEAD(&cq->sq_head);
756 INIT_LIST_HEAD(&cq->rq_head);
Parav Panditfe2caef2012-03-21 04:09:06 +0530757
758 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
759 if (status) {
760 kfree(cq);
761 return ERR_PTR(status);
762 }
763 if (ib_ctx) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530764 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530765 if (status)
766 goto ctx_err;
767 }
768 cq->phase = OCRDMA_CQE_VALID;
769 cq->arm_needed = true;
770 dev->cq_tbl[cq->id] = cq;
771
772 return &cq->ibcq;
773
774ctx_err:
775 ocrdma_mbx_destroy_cq(dev, cq);
776 kfree(cq);
777 return ERR_PTR(status);
778}
779
780int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
781 struct ib_udata *udata)
782{
783 int status = 0;
784 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
785
786 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
787 status = -EINVAL;
788 return status;
789 }
790 ibcq->cqe = new_cnt;
791 return status;
792}
793
794int ocrdma_destroy_cq(struct ib_cq *ibcq)
795{
796 int status;
797 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530798 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530799
Parav Panditfe2caef2012-03-21 04:09:06 +0530800 status = ocrdma_mbx_destroy_cq(dev, cq);
801
802 if (cq->ucontext) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530803 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
804 PAGE_ALIGN(cq->len));
Parav Panditfe2caef2012-03-21 04:09:06 +0530805 ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
806 dev->nic_info.db_page_size);
807 }
808 dev->cq_tbl[cq->id] = NULL;
809
810 kfree(cq);
811 return status;
812}
813
814static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
815{
816 int status = -EINVAL;
817
818 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
819 dev->qp_tbl[qp->id] = qp;
820 status = 0;
821 }
822 return status;
823}
824
825static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
826{
827 dev->qp_tbl[qp->id] = NULL;
828}
829
830static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
831 struct ib_qp_init_attr *attrs)
832{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530833 if ((attrs->qp_type != IB_QPT_GSI) &&
834 (attrs->qp_type != IB_QPT_RC) &&
835 (attrs->qp_type != IB_QPT_UC) &&
836 (attrs->qp_type != IB_QPT_UD)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000837 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
838 __func__, dev->id, attrs->qp_type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530839 return -EINVAL;
840 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530841 /* Skip the check for QP1 to support CM size of 128 */
842 if ((attrs->qp_type != IB_QPT_GSI) &&
843 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000844 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
845 __func__, dev->id, attrs->cap.max_send_wr);
846 pr_err("%s(%d) supported send_wr=0x%x\n",
847 __func__, dev->id, dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530848 return -EINVAL;
849 }
850 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000851 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
852 __func__, dev->id, attrs->cap.max_recv_wr);
853 pr_err("%s(%d) supported recv_wr=0x%x\n",
854 __func__, dev->id, dev->attr.max_rqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530855 return -EINVAL;
856 }
857 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000858 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
859 __func__, dev->id, attrs->cap.max_inline_data);
860 pr_err("%s(%d) supported inline data size=0x%x\n",
861 __func__, dev->id, dev->attr.max_inline_data);
Parav Panditfe2caef2012-03-21 04:09:06 +0530862 return -EINVAL;
863 }
864 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000865 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
866 __func__, dev->id, attrs->cap.max_send_sge);
867 pr_err("%s(%d) supported send_sge=0x%x\n",
868 __func__, dev->id, dev->attr.max_send_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +0530869 return -EINVAL;
870 }
871 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000872 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
873 __func__, dev->id, attrs->cap.max_recv_sge);
874 pr_err("%s(%d) supported recv_sge=0x%x\n",
875 __func__, dev->id, dev->attr.max_recv_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +0530876 return -EINVAL;
877 }
878 /* unprivileged user space cannot create special QP */
879 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000880 pr_err
Parav Panditfe2caef2012-03-21 04:09:06 +0530881 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
882 __func__, dev->id, attrs->qp_type);
883 return -EINVAL;
884 }
885 /* allow creating only one GSI type of QP */
886 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000887 pr_err("%s(%d) GSI special QPs already created.\n",
888 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530889 return -EINVAL;
890 }
891 /* verify consumer QPs are not trying to use GSI QP's CQ */
892 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
893 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530894 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000895 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530896 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530897 return -EINVAL;
898 }
899 }
900 return 0;
901}
902
903static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
904 struct ib_udata *udata, int dpp_offset,
905 int dpp_credit_lmt, int srq)
906{
907 int status = 0;
908 u64 usr_db;
909 struct ocrdma_create_qp_uresp uresp;
910 struct ocrdma_dev *dev = qp->dev;
911 struct ocrdma_pd *pd = qp->pd;
912
913 memset(&uresp, 0, sizeof(uresp));
914 usr_db = dev->nic_info.unmapped_db +
915 (pd->id * dev->nic_info.db_page_size);
916 uresp.qp_id = qp->id;
917 uresp.sq_dbid = qp->sq.dbid;
918 uresp.num_sq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530919 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530920 uresp.sq_page_addr[0] = qp->sq.pa;
921 uresp.num_wqe_allocated = qp->sq.max_cnt;
922 if (!srq) {
923 uresp.rq_dbid = qp->rq.dbid;
924 uresp.num_rq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530925 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530926 uresp.rq_page_addr[0] = qp->rq.pa;
927 uresp.num_rqe_allocated = qp->rq.max_cnt;
928 }
929 uresp.db_page_addr = usr_db;
930 uresp.db_page_size = dev->nic_info.db_page_size;
931 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
932 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
933 uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
934 OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
935 uresp.db_shift = (qp->id < 128) ? 24 : 16;
936 } else {
937 uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
938 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
939 uresp.db_shift = 16;
940 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530941
942 if (qp->dpp_enabled) {
943 uresp.dpp_credit = dpp_credit_lmt;
944 uresp.dpp_offset = dpp_offset;
945 }
946 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
947 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000948 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530949 goto err;
950 }
951 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
952 uresp.sq_page_size);
953 if (status)
954 goto err;
955
956 if (!srq) {
957 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
958 uresp.rq_page_size);
959 if (status)
960 goto rq_map_err;
961 }
962 return status;
963rq_map_err:
964 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
965err:
966 return status;
967}
968
969static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
970 struct ocrdma_pd *pd)
971{
972 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
973 qp->sq_db = dev->nic_info.db +
974 (pd->id * dev->nic_info.db_page_size) +
975 OCRDMA_DB_GEN2_SQ_OFFSET;
976 qp->rq_db = dev->nic_info.db +
977 (pd->id * dev->nic_info.db_page_size) +
978 ((qp->id < 128) ?
979 OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
980 } else {
981 qp->sq_db = dev->nic_info.db +
982 (pd->id * dev->nic_info.db_page_size) +
983 OCRDMA_DB_SQ_OFFSET;
984 qp->rq_db = dev->nic_info.db +
985 (pd->id * dev->nic_info.db_page_size) +
986 OCRDMA_DB_RQ_OFFSET;
987 }
988}
989
990static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
991{
992 qp->wqe_wr_id_tbl =
993 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
994 GFP_KERNEL);
995 if (qp->wqe_wr_id_tbl == NULL)
996 return -ENOMEM;
997 qp->rqe_wr_id_tbl =
998 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
999 if (qp->rqe_wr_id_tbl == NULL)
1000 return -ENOMEM;
1001
1002 return 0;
1003}
1004
1005static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1006 struct ocrdma_pd *pd,
1007 struct ib_qp_init_attr *attrs)
1008{
1009 qp->pd = pd;
1010 spin_lock_init(&qp->q_lock);
1011 INIT_LIST_HEAD(&qp->sq_entry);
1012 INIT_LIST_HEAD(&qp->rq_entry);
1013
1014 qp->qp_type = attrs->qp_type;
1015 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1016 qp->max_inline_data = attrs->cap.max_inline_data;
1017 qp->sq.max_sges = attrs->cap.max_send_sge;
1018 qp->rq.max_sges = attrs->cap.max_recv_sge;
1019 qp->state = OCRDMA_QPS_RST;
1020}
1021
Parav Panditfe2caef2012-03-21 04:09:06 +05301022
1023static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1024 struct ib_qp_init_attr *attrs)
1025{
1026 if (attrs->qp_type == IB_QPT_GSI) {
1027 dev->gsi_qp_created = 1;
1028 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1029 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1030 }
1031}
1032
1033struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1034 struct ib_qp_init_attr *attrs,
1035 struct ib_udata *udata)
1036{
1037 int status;
1038 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1039 struct ocrdma_qp *qp;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301040 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301041 struct ocrdma_create_qp_ureq ureq;
1042 u16 dpp_credit_lmt, dpp_offset;
1043
1044 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1045 if (status)
1046 goto gen_err;
1047
1048 memset(&ureq, 0, sizeof(ureq));
1049 if (udata) {
1050 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1051 return ERR_PTR(-EFAULT);
1052 }
1053 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1054 if (!qp) {
1055 status = -ENOMEM;
1056 goto gen_err;
1057 }
1058 qp->dev = dev;
1059 ocrdma_set_qp_init_params(qp, pd, attrs);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301060 if (udata == NULL)
1061 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1062 OCRDMA_QP_FAST_REG);
Parav Panditfe2caef2012-03-21 04:09:06 +05301063
1064 mutex_lock(&dev->dev_lock);
1065 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1066 ureq.dpp_cq_id,
1067 &dpp_offset, &dpp_credit_lmt);
1068 if (status)
1069 goto mbx_err;
1070
1071 /* user space QP's wr_id table are managed in library */
1072 if (udata == NULL) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301073 status = ocrdma_alloc_wr_id_tbl(qp);
1074 if (status)
1075 goto map_err;
1076 }
1077
1078 status = ocrdma_add_qpn_map(dev, qp);
1079 if (status)
1080 goto map_err;
1081 ocrdma_set_qp_db(dev, qp, pd);
1082 if (udata) {
1083 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1084 dpp_credit_lmt,
1085 (attrs->srq != NULL));
1086 if (status)
1087 goto cpy_err;
1088 }
1089 ocrdma_store_gsi_qp_cq(dev, attrs);
Gottumukkala, Naresh27159f52013-06-05 08:50:46 +00001090 qp->ibqp.qp_num = qp->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301091 mutex_unlock(&dev->dev_lock);
1092 return &qp->ibqp;
1093
1094cpy_err:
1095 ocrdma_del_qpn_map(dev, qp);
1096map_err:
1097 ocrdma_mbx_destroy_qp(dev, qp);
1098mbx_err:
1099 mutex_unlock(&dev->dev_lock);
1100 kfree(qp->wqe_wr_id_tbl);
1101 kfree(qp->rqe_wr_id_tbl);
1102 kfree(qp);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001103 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301104gen_err:
1105 return ERR_PTR(status);
1106}
1107
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301108
1109static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
1110{
1111 if (qp->db_cache) {
1112 u32 val = qp->rq.dbid | (qp->db_cache <<
1113 ocrdma_get_num_posted_shift(qp));
1114 iowrite32(val, qp->rq_db);
1115 qp->db_cache = 0;
1116 }
1117}
1118
Parav Panditfe2caef2012-03-21 04:09:06 +05301119int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1120 int attr_mask)
1121{
1122 int status = 0;
1123 struct ocrdma_qp *qp;
1124 struct ocrdma_dev *dev;
1125 enum ib_qp_state old_qps;
1126
1127 qp = get_ocrdma_qp(ibqp);
1128 dev = qp->dev;
1129 if (attr_mask & IB_QP_STATE)
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301130 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301131 /* if new and previous states are same hw doesn't need to
1132 * know about it.
1133 */
1134 if (status < 0)
1135 return status;
1136 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301137 if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
1138 ocrdma_flush_rq_db(qp);
1139
Parav Panditfe2caef2012-03-21 04:09:06 +05301140 return status;
1141}
1142
1143int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1144 int attr_mask, struct ib_udata *udata)
1145{
1146 unsigned long flags;
1147 int status = -EINVAL;
1148 struct ocrdma_qp *qp;
1149 struct ocrdma_dev *dev;
1150 enum ib_qp_state old_qps, new_qps;
1151
1152 qp = get_ocrdma_qp(ibqp);
1153 dev = qp->dev;
1154
1155 /* syncronize with multiple context trying to change, retrive qps */
1156 mutex_lock(&dev->dev_lock);
1157 /* syncronize with wqe, rqe posting and cqe processing contexts */
1158 spin_lock_irqsave(&qp->q_lock, flags);
1159 old_qps = get_ibqp_state(qp->state);
1160 if (attr_mask & IB_QP_STATE)
1161 new_qps = attr->qp_state;
1162 else
1163 new_qps = old_qps;
1164 spin_unlock_irqrestore(&qp->q_lock, flags);
1165
1166 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001167 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1168 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1169 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1170 old_qps, new_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301171 goto param_err;
1172 }
1173
1174 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1175 if (status > 0)
1176 status = 0;
1177param_err:
1178 mutex_unlock(&dev->dev_lock);
1179 return status;
1180}
1181
1182static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1183{
1184 switch (mtu) {
1185 case 256:
1186 return IB_MTU_256;
1187 case 512:
1188 return IB_MTU_512;
1189 case 1024:
1190 return IB_MTU_1024;
1191 case 2048:
1192 return IB_MTU_2048;
1193 case 4096:
1194 return IB_MTU_4096;
1195 default:
1196 return IB_MTU_1024;
1197 }
1198}
1199
1200static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1201{
1202 int ib_qp_acc_flags = 0;
1203
1204 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1205 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1206 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1207 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1208 return ib_qp_acc_flags;
1209}
1210
1211int ocrdma_query_qp(struct ib_qp *ibqp,
1212 struct ib_qp_attr *qp_attr,
1213 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1214{
1215 int status;
1216 u32 qp_state;
1217 struct ocrdma_qp_params params;
1218 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1219 struct ocrdma_dev *dev = qp->dev;
1220
1221 memset(&params, 0, sizeof(params));
1222 mutex_lock(&dev->dev_lock);
1223 status = ocrdma_mbx_query_qp(dev, qp, &params);
1224 mutex_unlock(&dev->dev_lock);
1225 if (status)
1226 goto mbx_err;
1227 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1228 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1229 qp_attr->path_mtu =
1230 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1231 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1232 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1233 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1234 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1235 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1236 qp_attr->dest_qp_num =
1237 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1238
1239 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1240 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1241 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1242 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1243 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1244 qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
1245 qp_init_attr->cap = qp_attr->cap;
1246 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1247 sizeof(params.dgid));
1248 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1249 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1250 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1251 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1252 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1253 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1254 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1255 OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
1256 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1257
1258 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1259 qp_attr->ah_attr.port_num = 1;
1260 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1261 OCRDMA_QP_PARAMS_SL_MASK) >>
1262 OCRDMA_QP_PARAMS_SL_SHIFT;
1263 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1264 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1265 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1266 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1267 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1268 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1269 qp_attr->retry_cnt =
1270 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1271 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1272 qp_attr->min_rnr_timer = 0;
1273 qp_attr->pkey_index = 0;
1274 qp_attr->port_num = 1;
1275 qp_attr->ah_attr.src_path_bits = 0;
1276 qp_attr->ah_attr.static_rate = 0;
1277 qp_attr->alt_pkey_index = 0;
1278 qp_attr->alt_port_num = 0;
1279 qp_attr->alt_timeout = 0;
1280 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1281 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1282 OCRDMA_QP_PARAMS_STATE_SHIFT;
1283 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1284 qp_attr->max_dest_rd_atomic =
1285 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1286 qp_attr->max_rd_atomic =
1287 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1288 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1289 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1290mbx_err:
1291 return status;
1292}
1293
1294static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1295{
1296 int i = idx / 32;
1297 unsigned int mask = (1 << (idx % 32));
1298
1299 if (srq->idx_bit_fields[i] & mask)
1300 srq->idx_bit_fields[i] &= ~mask;
1301 else
1302 srq->idx_bit_fields[i] |= mask;
1303}
1304
1305static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1306{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301307 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
Parav Panditfe2caef2012-03-21 04:09:06 +05301308}
1309
1310static int is_hw_sq_empty(struct ocrdma_qp *qp)
1311{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301312 return (qp->sq.tail == qp->sq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301313}
1314
1315static int is_hw_rq_empty(struct ocrdma_qp *qp)
1316{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301317 return (qp->rq.tail == qp->rq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301318}
1319
1320static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1321{
1322 return q->va + (q->head * q->entry_size);
1323}
1324
1325static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1326 u32 idx)
1327{
1328 return q->va + (idx * q->entry_size);
1329}
1330
1331static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1332{
1333 q->head = (q->head + 1) & q->max_wqe_idx;
1334}
1335
1336static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1337{
1338 q->tail = (q->tail + 1) & q->max_wqe_idx;
1339}
1340
1341/* discard the cqe for a given QP */
1342static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1343{
1344 unsigned long cq_flags;
1345 unsigned long flags;
1346 int discard_cnt = 0;
1347 u32 cur_getp, stop_getp;
1348 struct ocrdma_cqe *cqe;
1349 u32 qpn = 0;
1350
1351 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1352
1353 /* traverse through the CQEs in the hw CQ,
1354 * find the matching CQE for a given qp,
1355 * mark the matching one discarded by clearing qpn.
1356 * ring the doorbell in the poll_cq() as
1357 * we don't complete out of order cqe.
1358 */
1359
1360 cur_getp = cq->getp;
1361 /* find upto when do we reap the cq. */
1362 stop_getp = cur_getp;
1363 do {
1364 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1365 break;
1366
1367 cqe = cq->va + cur_getp;
1368 /* if (a) done reaping whole hw cq, or
1369 * (b) qp_xq becomes empty.
1370 * then exit
1371 */
1372 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1373 /* if previously discarded cqe found, skip that too. */
1374 /* check for matching qp */
1375 if (qpn == 0 || qpn != qp->id)
1376 goto skip_cqe;
1377
1378 /* mark cqe discarded so that it is not picked up later
1379 * in the poll_cq().
1380 */
1381 discard_cnt += 1;
1382 cqe->cmn.qpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301383 if (is_cqe_for_sq(cqe)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301384 ocrdma_hwq_inc_tail(&qp->sq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301385 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301386 if (qp->srq) {
1387 spin_lock_irqsave(&qp->srq->q_lock, flags);
1388 ocrdma_hwq_inc_tail(&qp->srq->rq);
1389 ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1390 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1391
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301392 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301393 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301394 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301395 }
1396skip_cqe:
1397 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1398 } while (cur_getp != stop_getp);
1399 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1400}
1401
1402static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1403{
1404 int found = false;
1405 unsigned long flags;
1406 struct ocrdma_dev *dev = qp->dev;
1407 /* sync with any active CQ poll */
1408
1409 spin_lock_irqsave(&dev->flush_q_lock, flags);
1410 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1411 if (found)
1412 list_del(&qp->sq_entry);
1413 if (!qp->srq) {
1414 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1415 if (found)
1416 list_del(&qp->rq_entry);
1417 }
1418 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1419}
1420
1421int ocrdma_destroy_qp(struct ib_qp *ibqp)
1422{
1423 int status;
1424 struct ocrdma_pd *pd;
1425 struct ocrdma_qp *qp;
1426 struct ocrdma_dev *dev;
1427 struct ib_qp_attr attrs;
1428 int attr_mask = IB_QP_STATE;
Dan Carpenterd19081e2012-05-02 09:14:47 +03001429 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05301430
1431 qp = get_ocrdma_qp(ibqp);
1432 dev = qp->dev;
1433
1434 attrs.qp_state = IB_QPS_ERR;
1435 pd = qp->pd;
1436
1437 /* change the QP state to ERROR */
1438 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1439
1440 /* ensure that CQEs for newly created QP (whose id may be same with
1441 * one which just getting destroyed are same), dont get
1442 * discarded until the old CQEs are discarded.
1443 */
1444 mutex_lock(&dev->dev_lock);
1445 status = ocrdma_mbx_destroy_qp(dev, qp);
1446
1447 /*
1448 * acquire CQ lock while destroy is in progress, in order to
1449 * protect against proessing in-flight CQEs for this QP.
1450 */
Dan Carpenterd19081e2012-05-02 09:14:47 +03001451 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301452 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001453 spin_lock(&qp->rq_cq->cq_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301454
1455 ocrdma_del_qpn_map(dev, qp);
1456
1457 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001458 spin_unlock(&qp->rq_cq->cq_lock);
1459 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301460
1461 if (!pd->uctx) {
1462 ocrdma_discard_cqes(qp, qp->sq_cq);
1463 ocrdma_discard_cqes(qp, qp->rq_cq);
1464 }
1465 mutex_unlock(&dev->dev_lock);
1466
1467 if (pd->uctx) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301468 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1469 PAGE_ALIGN(qp->sq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301470 if (!qp->srq)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301471 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1472 PAGE_ALIGN(qp->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301473 }
1474
1475 ocrdma_del_flush_qp(qp);
1476
Parav Panditfe2caef2012-03-21 04:09:06 +05301477 kfree(qp->wqe_wr_id_tbl);
1478 kfree(qp->rqe_wr_id_tbl);
1479 kfree(qp);
1480 return status;
1481}
1482
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301483static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1484 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301485{
1486 int status;
1487 struct ocrdma_create_srq_uresp uresp;
1488
Dan Carpenter63ea3742013-07-29 22:34:29 +03001489 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301490 uresp.rq_dbid = srq->rq.dbid;
1491 uresp.num_rq_pages = 1;
1492 uresp.rq_page_addr[0] = srq->rq.pa;
1493 uresp.rq_page_size = srq->rq.len;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301494 uresp.db_page_addr = dev->nic_info.unmapped_db +
1495 (srq->pd->id * dev->nic_info.db_page_size);
1496 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +05301497 uresp.num_rqe_allocated = srq->rq.max_cnt;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301498 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301499 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
1500 uresp.db_shift = 24;
1501 } else {
1502 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1503 uresp.db_shift = 16;
1504 }
1505
1506 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1507 if (status)
1508 return status;
1509 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1510 uresp.rq_page_size);
1511 if (status)
1512 return status;
1513 return status;
1514}
1515
1516struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1517 struct ib_srq_init_attr *init_attr,
1518 struct ib_udata *udata)
1519{
1520 int status = -ENOMEM;
1521 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301522 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301523 struct ocrdma_srq *srq;
1524
1525 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1526 return ERR_PTR(-EINVAL);
1527 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1528 return ERR_PTR(-EINVAL);
1529
1530 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1531 if (!srq)
1532 return ERR_PTR(status);
1533
1534 spin_lock_init(&srq->q_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301535 srq->pd = pd;
1536 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301537 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +05301538 if (status)
1539 goto err;
1540
1541 if (udata == NULL) {
1542 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1543 GFP_KERNEL);
1544 if (srq->rqe_wr_id_tbl == NULL)
1545 goto arm_err;
1546
1547 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1548 (srq->rq.max_cnt % 32 ? 1 : 0);
1549 srq->idx_bit_fields =
1550 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1551 if (srq->idx_bit_fields == NULL)
1552 goto arm_err;
1553 memset(srq->idx_bit_fields, 0xff,
1554 srq->bit_fields_len * sizeof(u32));
1555 }
1556
1557 if (init_attr->attr.srq_limit) {
1558 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1559 if (status)
1560 goto arm_err;
1561 }
1562
Parav Panditfe2caef2012-03-21 04:09:06 +05301563 if (udata) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301564 status = ocrdma_copy_srq_uresp(dev, srq, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +05301565 if (status)
1566 goto arm_err;
1567 }
1568
Parav Panditfe2caef2012-03-21 04:09:06 +05301569 return &srq->ibsrq;
1570
1571arm_err:
1572 ocrdma_mbx_destroy_srq(dev, srq);
1573err:
1574 kfree(srq->rqe_wr_id_tbl);
1575 kfree(srq->idx_bit_fields);
1576 kfree(srq);
1577 return ERR_PTR(status);
1578}
1579
1580int ocrdma_modify_srq(struct ib_srq *ibsrq,
1581 struct ib_srq_attr *srq_attr,
1582 enum ib_srq_attr_mask srq_attr_mask,
1583 struct ib_udata *udata)
1584{
1585 int status = 0;
1586 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301587
1588 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301589 if (srq_attr_mask & IB_SRQ_MAX_WR)
1590 status = -EINVAL;
1591 else
1592 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1593 return status;
1594}
1595
1596int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1597{
1598 int status;
1599 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301600
1601 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301602 status = ocrdma_mbx_query_srq(srq, srq_attr);
1603 return status;
1604}
1605
1606int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1607{
1608 int status;
1609 struct ocrdma_srq *srq;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301610 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301611
1612 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301613
1614 status = ocrdma_mbx_destroy_srq(dev, srq);
1615
1616 if (srq->pd->uctx)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301617 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1618 PAGE_ALIGN(srq->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301619
Parav Panditfe2caef2012-03-21 04:09:06 +05301620 kfree(srq->idx_bit_fields);
1621 kfree(srq->rqe_wr_id_tbl);
1622 kfree(srq);
1623 return status;
1624}
1625
1626/* unprivileged verbs and their support functions. */
1627static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1628 struct ocrdma_hdr_wqe *hdr,
1629 struct ib_send_wr *wr)
1630{
1631 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1632 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1633 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1634
1635 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1636 if (qp->qp_type == IB_QPT_GSI)
1637 ud_hdr->qkey = qp->qkey;
1638 else
1639 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1640 ud_hdr->rsvd_ahid = ah->id;
1641}
1642
1643static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1644 struct ocrdma_sge *sge, int num_sge,
1645 struct ib_sge *sg_list)
1646{
1647 int i;
1648
1649 for (i = 0; i < num_sge; i++) {
1650 sge[i].lrkey = sg_list[i].lkey;
1651 sge[i].addr_lo = sg_list[i].addr;
1652 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1653 sge[i].len = sg_list[i].length;
1654 hdr->total_len += sg_list[i].length;
1655 }
1656 if (num_sge == 0)
1657 memset(sge, 0, sizeof(*sge));
1658}
1659
1660static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1661 struct ocrdma_hdr_wqe *hdr,
1662 struct ocrdma_sge *sge,
1663 struct ib_send_wr *wr, u32 wqe_size)
1664{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301665 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301666 if (wr->sg_list[0].length > qp->max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001667 pr_err("%s() supported_len=0x%x,\n"
1668 " unspported len req=0x%x\n", __func__,
1669 qp->max_inline_data, wr->sg_list[0].length);
Parav Panditfe2caef2012-03-21 04:09:06 +05301670 return -EINVAL;
1671 }
1672 memcpy(sge,
1673 (void *)(unsigned long)wr->sg_list[0].addr,
1674 wr->sg_list[0].length);
1675 hdr->total_len = wr->sg_list[0].length;
1676 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301677 if (0 == wr->sg_list[0].length)
1678 wqe_size += sizeof(struct ocrdma_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301679 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1680 } else {
1681 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1682 if (wr->num_sge)
1683 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1684 else
1685 wqe_size += sizeof(struct ocrdma_sge);
1686 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1687 }
1688 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1689 return 0;
1690}
1691
1692static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1693 struct ib_send_wr *wr)
1694{
1695 int status;
1696 struct ocrdma_sge *sge;
1697 u32 wqe_size = sizeof(*hdr);
1698
1699 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1700 ocrdma_build_ud_hdr(qp, hdr, wr);
1701 sge = (struct ocrdma_sge *)(hdr + 2);
1702 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301703 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301704 sge = (struct ocrdma_sge *)(hdr + 1);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301705 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301706
1707 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1708 return status;
1709}
1710
1711static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1712 struct ib_send_wr *wr)
1713{
1714 int status;
1715 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1716 struct ocrdma_sge *sge = ext_rw + 1;
1717 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1718
1719 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1720 if (status)
1721 return status;
1722 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1723 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1724 ext_rw->lrkey = wr->wr.rdma.rkey;
1725 ext_rw->len = hdr->total_len;
1726 return 0;
1727}
1728
1729static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1730 struct ib_send_wr *wr)
1731{
1732 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1733 struct ocrdma_sge *sge = ext_rw + 1;
1734 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1735 sizeof(struct ocrdma_hdr_wqe);
1736
1737 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1738 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1739 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1740 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1741
1742 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1743 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1744 ext_rw->lrkey = wr->wr.rdma.rkey;
1745 ext_rw->len = hdr->total_len;
1746}
1747
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05301748static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
1749 struct ocrdma_hw_mr *hwmr)
1750{
1751 int i;
1752 u64 buf_addr = 0;
1753 int num_pbes;
1754 struct ocrdma_pbe *pbe;
1755
1756 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1757 num_pbes = 0;
1758
1759 /* go through the OS phy regions & fill hw pbe entries into pbls. */
1760 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
1761 /* number of pbes can be more for one OS buf, when
1762 * buffers are of different sizes.
1763 * split the ib_buf to one or more pbes.
1764 */
1765 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
1766 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
1767 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
1768 num_pbes += 1;
1769 pbe++;
1770
1771 /* if the pbl is full storing the pbes,
1772 * move to next pbl.
1773 */
1774 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
1775 pbl_tbl++;
1776 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1777 }
1778 }
1779 return;
1780}
1781
1782static int get_encoded_page_size(int pg_sz)
1783{
1784 /* Max size is 256M 4096 << 16 */
1785 int i = 0;
1786 for (; i < 17; i++)
1787 if (pg_sz == (4096 << i))
1788 break;
1789 return i;
1790}
1791
1792
1793static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1794 struct ib_send_wr *wr)
1795{
1796 u64 fbo;
1797 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
1798 struct ocrdma_mr *mr;
1799 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
1800
1801 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
1802
1803 if ((wr->wr.fast_reg.page_list_len >
1804 qp->dev->attr.max_pages_per_frmr) ||
1805 (wr->wr.fast_reg.length > 0xffffffffULL))
1806 return -EINVAL;
1807
1808 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
1809 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1810
1811 if (wr->wr.fast_reg.page_list_len == 0)
1812 BUG();
1813 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
1814 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
1815 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
1816 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
1817 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
1818 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
1819 hdr->lkey = wr->wr.fast_reg.rkey;
1820 hdr->total_len = wr->wr.fast_reg.length;
1821
1822 fbo = wr->wr.fast_reg.iova_start -
1823 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
1824
1825 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
1826 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
1827 fast_reg->fbo_hi = upper_32_bits(fbo);
1828 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
1829 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
1830 fast_reg->size_sge =
1831 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
1832 mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) &
1833 (OCRDMA_MAX_STAG - 1)];
1834 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
1835 return 0;
1836}
1837
Parav Panditfe2caef2012-03-21 04:09:06 +05301838static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
1839{
1840 u32 val = qp->sq.dbid | (1 << 16);
1841
1842 iowrite32(val, qp->sq_db);
1843}
1844
1845int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1846 struct ib_send_wr **bad_wr)
1847{
1848 int status = 0;
1849 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1850 struct ocrdma_hdr_wqe *hdr;
1851 unsigned long flags;
1852
1853 spin_lock_irqsave(&qp->q_lock, flags);
1854 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
1855 spin_unlock_irqrestore(&qp->q_lock, flags);
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00001856 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301857 return -EINVAL;
1858 }
1859
1860 while (wr) {
1861 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
1862 wr->num_sge > qp->sq.max_sges) {
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00001863 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301864 status = -ENOMEM;
1865 break;
1866 }
1867 hdr = ocrdma_hwq_head(&qp->sq);
1868 hdr->cw = 0;
1869 if (wr->send_flags & IB_SEND_SIGNALED)
1870 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1871 if (wr->send_flags & IB_SEND_FENCE)
1872 hdr->cw |=
1873 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
1874 if (wr->send_flags & IB_SEND_SOLICITED)
1875 hdr->cw |=
1876 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
1877 hdr->total_len = 0;
1878 switch (wr->opcode) {
1879 case IB_WR_SEND_WITH_IMM:
1880 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1881 hdr->immdt = ntohl(wr->ex.imm_data);
1882 case IB_WR_SEND:
1883 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1884 ocrdma_build_send(qp, hdr, wr);
1885 break;
1886 case IB_WR_SEND_WITH_INV:
1887 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1888 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
1889 hdr->lkey = wr->ex.invalidate_rkey;
1890 status = ocrdma_build_send(qp, hdr, wr);
1891 break;
1892 case IB_WR_RDMA_WRITE_WITH_IMM:
1893 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
1894 hdr->immdt = ntohl(wr->ex.imm_data);
1895 case IB_WR_RDMA_WRITE:
1896 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
1897 status = ocrdma_build_write(qp, hdr, wr);
1898 break;
1899 case IB_WR_RDMA_READ_WITH_INV:
1900 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
1901 case IB_WR_RDMA_READ:
1902 ocrdma_build_read(qp, hdr, wr);
1903 break;
1904 case IB_WR_LOCAL_INV:
1905 hdr->cw |=
1906 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05301907 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
1908 sizeof(struct ocrdma_sge)) /
Parav Panditfe2caef2012-03-21 04:09:06 +05301909 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
1910 hdr->lkey = wr->ex.invalidate_rkey;
1911 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05301912 case IB_WR_FAST_REG_MR:
1913 status = ocrdma_build_fr(qp, hdr, wr);
1914 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05301915 default:
1916 status = -EINVAL;
1917 break;
1918 }
1919 if (status) {
1920 *bad_wr = wr;
1921 break;
1922 }
1923 if (wr->send_flags & IB_SEND_SIGNALED)
1924 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
1925 else
1926 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
1927 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
1928 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
1929 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
1930 /* make sure wqe is written before adapter can access it */
1931 wmb();
1932 /* inform hw to start processing it */
1933 ocrdma_ring_sq_db(qp);
1934
1935 /* update pointer, counter for next wr */
1936 ocrdma_hwq_inc_head(&qp->sq);
1937 wr = wr->next;
1938 }
1939 spin_unlock_irqrestore(&qp->q_lock, flags);
1940 return status;
1941}
1942
1943static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
1944{
Naresh Gottumukkaladf176ea2013-06-10 04:42:41 +00001945 u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301946
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301947 if (qp->state != OCRDMA_QPS_INIT)
1948 iowrite32(val, qp->rq_db);
1949 else
1950 qp->db_cache++;
Parav Panditfe2caef2012-03-21 04:09:06 +05301951}
1952
1953static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
1954 u16 tag)
1955{
1956 u32 wqe_size = 0;
1957 struct ocrdma_sge *sge;
1958 if (wr->num_sge)
1959 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
1960 else
1961 wqe_size = sizeof(*sge) + sizeof(*rqe);
1962
1963 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
1964 OCRDMA_WQE_SIZE_SHIFT);
1965 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1966 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1967 rqe->total_len = 0;
1968 rqe->rsvd_tag = tag;
1969 sge = (struct ocrdma_sge *)(rqe + 1);
1970 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
1971 ocrdma_cpu_to_le32(rqe, wqe_size);
1972}
1973
1974int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1975 struct ib_recv_wr **bad_wr)
1976{
1977 int status = 0;
1978 unsigned long flags;
1979 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1980 struct ocrdma_hdr_wqe *rqe;
1981
1982 spin_lock_irqsave(&qp->q_lock, flags);
1983 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
1984 spin_unlock_irqrestore(&qp->q_lock, flags);
1985 *bad_wr = wr;
1986 return -EINVAL;
1987 }
1988 while (wr) {
1989 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
1990 wr->num_sge > qp->rq.max_sges) {
1991 *bad_wr = wr;
1992 status = -ENOMEM;
1993 break;
1994 }
1995 rqe = ocrdma_hwq_head(&qp->rq);
1996 ocrdma_build_rqe(rqe, wr, 0);
1997
1998 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
1999 /* make sure rqe is written before adapter can access it */
2000 wmb();
2001
2002 /* inform hw to start processing it */
2003 ocrdma_ring_rq_db(qp);
2004
2005 /* update pointer, counter for next wr */
2006 ocrdma_hwq_inc_head(&qp->rq);
2007 wr = wr->next;
2008 }
2009 spin_unlock_irqrestore(&qp->q_lock, flags);
2010 return status;
2011}
2012
2013/* cqe for srq's rqe can potentially arrive out of order.
2014 * index gives the entry in the shadow table where to store
2015 * the wr_id. tag/index is returned in cqe to reference back
2016 * for a given rqe.
2017 */
2018static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2019{
2020 int row = 0;
2021 int indx = 0;
2022
2023 for (row = 0; row < srq->bit_fields_len; row++) {
2024 if (srq->idx_bit_fields[row]) {
2025 indx = ffs(srq->idx_bit_fields[row]);
2026 indx = (row * 32) + (indx - 1);
2027 if (indx >= srq->rq.max_cnt)
2028 BUG();
2029 ocrdma_srq_toggle_bit(srq, indx);
2030 break;
2031 }
2032 }
2033
2034 if (row == srq->bit_fields_len)
2035 BUG();
2036 return indx;
2037}
2038
2039static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2040{
2041 u32 val = srq->rq.dbid | (1 << 16);
2042
2043 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2044}
2045
2046int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2047 struct ib_recv_wr **bad_wr)
2048{
2049 int status = 0;
2050 unsigned long flags;
2051 struct ocrdma_srq *srq;
2052 struct ocrdma_hdr_wqe *rqe;
2053 u16 tag;
2054
2055 srq = get_ocrdma_srq(ibsrq);
2056
2057 spin_lock_irqsave(&srq->q_lock, flags);
2058 while (wr) {
2059 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2060 wr->num_sge > srq->rq.max_sges) {
2061 status = -ENOMEM;
2062 *bad_wr = wr;
2063 break;
2064 }
2065 tag = ocrdma_srq_get_idx(srq);
2066 rqe = ocrdma_hwq_head(&srq->rq);
2067 ocrdma_build_rqe(rqe, wr, tag);
2068
2069 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2070 /* make sure rqe is written before adapter can perform DMA */
2071 wmb();
2072 /* inform hw to start processing it */
2073 ocrdma_ring_srq_db(srq);
2074 /* update pointer, counter for next wr */
2075 ocrdma_hwq_inc_head(&srq->rq);
2076 wr = wr->next;
2077 }
2078 spin_unlock_irqrestore(&srq->q_lock, flags);
2079 return status;
2080}
2081
2082static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2083{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302084 enum ib_wc_status ibwc_status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302085
2086 switch (status) {
2087 case OCRDMA_CQE_GENERAL_ERR:
2088 ibwc_status = IB_WC_GENERAL_ERR;
2089 break;
2090 case OCRDMA_CQE_LOC_LEN_ERR:
2091 ibwc_status = IB_WC_LOC_LEN_ERR;
2092 break;
2093 case OCRDMA_CQE_LOC_QP_OP_ERR:
2094 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2095 break;
2096 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2097 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2098 break;
2099 case OCRDMA_CQE_LOC_PROT_ERR:
2100 ibwc_status = IB_WC_LOC_PROT_ERR;
2101 break;
2102 case OCRDMA_CQE_WR_FLUSH_ERR:
2103 ibwc_status = IB_WC_WR_FLUSH_ERR;
2104 break;
2105 case OCRDMA_CQE_MW_BIND_ERR:
2106 ibwc_status = IB_WC_MW_BIND_ERR;
2107 break;
2108 case OCRDMA_CQE_BAD_RESP_ERR:
2109 ibwc_status = IB_WC_BAD_RESP_ERR;
2110 break;
2111 case OCRDMA_CQE_LOC_ACCESS_ERR:
2112 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2113 break;
2114 case OCRDMA_CQE_REM_INV_REQ_ERR:
2115 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2116 break;
2117 case OCRDMA_CQE_REM_ACCESS_ERR:
2118 ibwc_status = IB_WC_REM_ACCESS_ERR;
2119 break;
2120 case OCRDMA_CQE_REM_OP_ERR:
2121 ibwc_status = IB_WC_REM_OP_ERR;
2122 break;
2123 case OCRDMA_CQE_RETRY_EXC_ERR:
2124 ibwc_status = IB_WC_RETRY_EXC_ERR;
2125 break;
2126 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2127 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2128 break;
2129 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2130 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2131 break;
2132 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2133 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2134 break;
2135 case OCRDMA_CQE_REM_ABORT_ERR:
2136 ibwc_status = IB_WC_REM_ABORT_ERR;
2137 break;
2138 case OCRDMA_CQE_INV_EECN_ERR:
2139 ibwc_status = IB_WC_INV_EECN_ERR;
2140 break;
2141 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2142 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2143 break;
2144 case OCRDMA_CQE_FATAL_ERR:
2145 ibwc_status = IB_WC_FATAL_ERR;
2146 break;
2147 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2148 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2149 break;
2150 default:
2151 ibwc_status = IB_WC_GENERAL_ERR;
2152 break;
2153 };
2154 return ibwc_status;
2155}
2156
2157static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2158 u32 wqe_idx)
2159{
2160 struct ocrdma_hdr_wqe *hdr;
2161 struct ocrdma_sge *rw;
2162 int opcode;
2163
2164 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2165
2166 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2167 /* Undo the hdr->cw swap */
2168 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2169 switch (opcode) {
2170 case OCRDMA_WRITE:
2171 ibwc->opcode = IB_WC_RDMA_WRITE;
2172 break;
2173 case OCRDMA_READ:
2174 rw = (struct ocrdma_sge *)(hdr + 1);
2175 ibwc->opcode = IB_WC_RDMA_READ;
2176 ibwc->byte_len = rw->len;
2177 break;
2178 case OCRDMA_SEND:
2179 ibwc->opcode = IB_WC_SEND;
2180 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302181 case OCRDMA_FR_MR:
2182 ibwc->opcode = IB_WC_FAST_REG_MR;
2183 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302184 case OCRDMA_LKEY_INV:
2185 ibwc->opcode = IB_WC_LOCAL_INV;
2186 break;
2187 default:
2188 ibwc->status = IB_WC_GENERAL_ERR;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002189 pr_err("%s() invalid opcode received = 0x%x\n",
2190 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +05302191 break;
2192 };
2193}
2194
2195static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2196 struct ocrdma_cqe *cqe)
2197{
2198 if (is_cqe_for_sq(cqe)) {
2199 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2200 cqe->flags_status_srcqpn) &
2201 ~OCRDMA_CQE_STATUS_MASK);
2202 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2203 cqe->flags_status_srcqpn) |
2204 (OCRDMA_CQE_WR_FLUSH_ERR <<
2205 OCRDMA_CQE_STATUS_SHIFT));
2206 } else {
2207 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2208 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2209 cqe->flags_status_srcqpn) &
2210 ~OCRDMA_CQE_UD_STATUS_MASK);
2211 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2212 cqe->flags_status_srcqpn) |
2213 (OCRDMA_CQE_WR_FLUSH_ERR <<
2214 OCRDMA_CQE_UD_STATUS_SHIFT));
2215 } else {
2216 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2217 cqe->flags_status_srcqpn) &
2218 ~OCRDMA_CQE_STATUS_MASK);
2219 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2220 cqe->flags_status_srcqpn) |
2221 (OCRDMA_CQE_WR_FLUSH_ERR <<
2222 OCRDMA_CQE_STATUS_SHIFT));
2223 }
2224 }
2225}
2226
2227static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2228 struct ocrdma_qp *qp, int status)
2229{
2230 bool expand = false;
2231
2232 ibwc->byte_len = 0;
2233 ibwc->qp = &qp->ibqp;
2234 ibwc->status = ocrdma_to_ibwc_err(status);
2235
2236 ocrdma_flush_qp(qp);
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302237 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05302238
2239 /* if wqe/rqe pending for which cqe needs to be returned,
2240 * trigger inflating it.
2241 */
2242 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2243 expand = true;
2244 ocrdma_set_cqe_status_flushed(qp, cqe);
2245 }
2246 return expand;
2247}
2248
2249static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2250 struct ocrdma_qp *qp, int status)
2251{
2252 ibwc->opcode = IB_WC_RECV;
2253 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2254 ocrdma_hwq_inc_tail(&qp->rq);
2255
2256 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2257}
2258
2259static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2260 struct ocrdma_qp *qp, int status)
2261{
2262 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2263 ocrdma_hwq_inc_tail(&qp->sq);
2264
2265 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2266}
2267
2268
2269static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2270 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2271 bool *polled, bool *stop)
2272{
2273 bool expand;
2274 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2275 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2276
2277 /* when hw sq is empty, but rq is not empty, so we continue
2278 * to keep the cqe in order to get the cq event again.
2279 */
2280 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2281 /* when cq for rq and sq is same, it is safe to return
2282 * flush cqe for RQEs.
2283 */
2284 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2285 *polled = true;
2286 status = OCRDMA_CQE_WR_FLUSH_ERR;
2287 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2288 } else {
2289 /* stop processing further cqe as this cqe is used for
2290 * triggering cq event on buddy cq of RQ.
2291 * When QP is destroyed, this cqe will be removed
2292 * from the cq's hardware q.
2293 */
2294 *polled = false;
2295 *stop = true;
2296 expand = false;
2297 }
2298 } else {
2299 *polled = true;
2300 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2301 }
2302 return expand;
2303}
2304
2305static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2306 struct ocrdma_cqe *cqe,
2307 struct ib_wc *ibwc, bool *polled)
2308{
2309 bool expand = false;
2310 int tail = qp->sq.tail;
2311 u32 wqe_idx;
2312
2313 if (!qp->wqe_wr_id_tbl[tail].signaled) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302314 *polled = false; /* WC cannot be consumed yet */
2315 } else {
2316 ibwc->status = IB_WC_SUCCESS;
2317 ibwc->wc_flags = 0;
2318 ibwc->qp = &qp->ibqp;
2319 ocrdma_update_wc(qp, ibwc, tail);
2320 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302321 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302322 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2323 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
Parav Panditae3bca92012-08-17 14:45:33 +00002324 if (tail != wqe_idx)
2325 expand = true; /* Coalesced CQE can't be consumed yet */
2326
Parav Panditfe2caef2012-03-21 04:09:06 +05302327 ocrdma_hwq_inc_tail(&qp->sq);
2328 return expand;
2329}
2330
2331static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2332 struct ib_wc *ibwc, bool *polled, bool *stop)
2333{
2334 int status;
2335 bool expand;
2336
2337 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2338 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2339
2340 if (status == OCRDMA_CQE_SUCCESS)
2341 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2342 else
2343 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2344 return expand;
2345}
2346
2347static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2348{
2349 int status;
2350
2351 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2352 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2353 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2354 OCRDMA_CQE_SRCQP_MASK;
2355 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2356 OCRDMA_CQE_PKEY_MASK;
2357 ibwc->wc_flags = IB_WC_GRH;
2358 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2359 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2360 return status;
2361}
2362
2363static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2364 struct ocrdma_cqe *cqe,
2365 struct ocrdma_qp *qp)
2366{
2367 unsigned long flags;
2368 struct ocrdma_srq *srq;
2369 u32 wqe_idx;
2370
2371 srq = get_ocrdma_srq(qp->ibqp.srq);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302372 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2373 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
Parav Panditfe2caef2012-03-21 04:09:06 +05302374 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2375 spin_lock_irqsave(&srq->q_lock, flags);
2376 ocrdma_srq_toggle_bit(srq, wqe_idx);
2377 spin_unlock_irqrestore(&srq->q_lock, flags);
2378 ocrdma_hwq_inc_tail(&srq->rq);
2379}
2380
2381static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2382 struct ib_wc *ibwc, bool *polled, bool *stop,
2383 int status)
2384{
2385 bool expand;
2386
2387 /* when hw_rq is empty, but wq is not empty, so continue
2388 * to keep the cqe to get the cq event again.
2389 */
2390 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2391 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2392 *polled = true;
2393 status = OCRDMA_CQE_WR_FLUSH_ERR;
2394 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2395 } else {
2396 *polled = false;
2397 *stop = true;
2398 expand = false;
2399 }
Parav Pandita3698a92012-06-11 16:39:20 +05302400 } else {
2401 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302402 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
Parav Pandita3698a92012-06-11 16:39:20 +05302403 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302404 return expand;
2405}
2406
2407static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2408 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2409{
2410 ibwc->opcode = IB_WC_RECV;
2411 ibwc->qp = &qp->ibqp;
2412 ibwc->status = IB_WC_SUCCESS;
2413
2414 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2415 ocrdma_update_ud_rcqe(ibwc, cqe);
2416 else
2417 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2418
2419 if (is_cqe_imm(cqe)) {
2420 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2421 ibwc->wc_flags |= IB_WC_WITH_IMM;
2422 } else if (is_cqe_wr_imm(cqe)) {
2423 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2424 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2425 ibwc->wc_flags |= IB_WC_WITH_IMM;
2426 } else if (is_cqe_invalidated(cqe)) {
2427 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2428 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2429 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302430 if (qp->ibqp.srq) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302431 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302432 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302433 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2434 ocrdma_hwq_inc_tail(&qp->rq);
2435 }
2436}
2437
2438static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2439 struct ib_wc *ibwc, bool *polled, bool *stop)
2440{
2441 int status;
2442 bool expand = false;
2443
2444 ibwc->wc_flags = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302445 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302446 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2447 OCRDMA_CQE_UD_STATUS_MASK) >>
2448 OCRDMA_CQE_UD_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302449 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302450 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2451 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302452 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302453
2454 if (status == OCRDMA_CQE_SUCCESS) {
2455 *polled = true;
2456 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2457 } else {
2458 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2459 status);
2460 }
2461 return expand;
2462}
2463
2464static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2465 u16 cur_getp)
2466{
2467 if (cq->phase_change) {
2468 if (cur_getp == 0)
2469 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302470 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302471 /* clear valid bit */
2472 cqe->flags_status_srcqpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302473 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302474}
2475
2476static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2477 struct ib_wc *ibwc)
2478{
2479 u16 qpn = 0;
2480 int i = 0;
2481 bool expand = false;
2482 int polled_hw_cqes = 0;
2483 struct ocrdma_qp *qp = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302484 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302485 struct ocrdma_cqe *cqe;
2486 u16 cur_getp; bool polled = false; bool stop = false;
2487
2488 cur_getp = cq->getp;
2489 while (num_entries) {
2490 cqe = cq->va + cur_getp;
2491 /* check whether valid cqe or not */
2492 if (!is_cqe_valid(cq, cqe))
2493 break;
2494 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2495 /* ignore discarded cqe */
2496 if (qpn == 0)
2497 goto skip_cqe;
2498 qp = dev->qp_tbl[qpn];
2499 BUG_ON(qp == NULL);
2500
2501 if (is_cqe_for_sq(cqe)) {
2502 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2503 &stop);
2504 } else {
2505 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2506 &stop);
2507 }
2508 if (expand)
2509 goto expand_cqe;
2510 if (stop)
2511 goto stop_cqe;
2512 /* clear qpn to avoid duplicate processing by discard_cqe() */
2513 cqe->cmn.qpn = 0;
2514skip_cqe:
2515 polled_hw_cqes += 1;
2516 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2517 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2518expand_cqe:
2519 if (polled) {
2520 num_entries -= 1;
2521 i += 1;
2522 ibwc = ibwc + 1;
2523 polled = false;
2524 }
2525 }
2526stop_cqe:
2527 cq->getp = cur_getp;
2528 if (polled_hw_cqes || expand || stop) {
2529 ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
2530 polled_hw_cqes);
2531 }
2532 return i;
2533}
2534
2535/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2536static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2537 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2538{
2539 int err_cqes = 0;
2540
2541 while (num_entries) {
2542 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2543 break;
2544 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2545 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2546 ocrdma_hwq_inc_tail(&qp->sq);
2547 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2548 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2549 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302550 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302551 return err_cqes;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302552 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302553 ibwc->byte_len = 0;
2554 ibwc->status = IB_WC_WR_FLUSH_ERR;
2555 ibwc = ibwc + 1;
2556 err_cqes += 1;
2557 num_entries -= 1;
2558 }
2559 return err_cqes;
2560}
2561
2562int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2563{
2564 int cqes_to_poll = num_entries;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302565 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2566 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302567 int num_os_cqe = 0, err_cqes = 0;
2568 struct ocrdma_qp *qp;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302569 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302570
2571 /* poll cqes from adapter CQ */
2572 spin_lock_irqsave(&cq->cq_lock, flags);
2573 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2574 spin_unlock_irqrestore(&cq->cq_lock, flags);
2575 cqes_to_poll -= num_os_cqe;
2576
2577 if (cqes_to_poll) {
2578 wc = wc + num_os_cqe;
2579 /* adapter returns single error cqe when qp moves to
2580 * error state. So insert error cqes with wc_status as
2581 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2582 * respectively which uses this CQ.
2583 */
2584 spin_lock_irqsave(&dev->flush_q_lock, flags);
2585 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2586 if (cqes_to_poll == 0)
2587 break;
2588 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2589 cqes_to_poll -= err_cqes;
2590 num_os_cqe += err_cqes;
2591 wc = wc + err_cqes;
2592 }
2593 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2594 }
2595 return num_os_cqe;
2596}
2597
2598int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2599{
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302600 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2601 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302602 u16 cq_id;
2603 u16 cur_getp;
2604 struct ocrdma_cqe *cqe;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302605 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302606
Parav Panditfe2caef2012-03-21 04:09:06 +05302607 cq_id = cq->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05302608
2609 spin_lock_irqsave(&cq->cq_lock, flags);
2610 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2611 cq->armed = true;
2612 if (cq_flags & IB_CQ_SOLICITED)
2613 cq->solicited = true;
2614
2615 cur_getp = cq->getp;
2616 cqe = cq->va + cur_getp;
2617
2618 /* check whether any valid cqe exist or not, if not then safe to
2619 * arm. If cqe is not yet consumed, then let it get consumed and then
2620 * we arm it to avoid false interrupts.
2621 */
2622 if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
2623 cq->arm_needed = false;
2624 ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
2625 }
2626 spin_unlock_irqrestore(&cq->cq_lock, flags);
2627 return 0;
2628}
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302629
2630struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2631{
2632 int status;
2633 struct ocrdma_mr *mr;
2634 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2635 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2636
2637 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2638 return ERR_PTR(-EINVAL);
2639
2640 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2641 if (!mr)
2642 return ERR_PTR(-ENOMEM);
2643
2644 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2645 if (status)
2646 goto pbl_err;
2647 mr->hwmr.fr_mr = 1;
2648 mr->hwmr.remote_rd = 0;
2649 mr->hwmr.remote_wr = 0;
2650 mr->hwmr.local_rd = 0;
2651 mr->hwmr.local_wr = 0;
2652 mr->hwmr.mw_bind = 0;
2653 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2654 if (status)
2655 goto pbl_err;
2656 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2657 if (status)
2658 goto mbx_err;
2659 mr->ibmr.rkey = mr->hwmr.lkey;
2660 mr->ibmr.lkey = mr->hwmr.lkey;
2661 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64) mr;
2662 return &mr->ibmr;
2663mbx_err:
2664 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2665pbl_err:
2666 kfree(mr);
2667 return ERR_PTR(-ENOMEM);
2668}
2669
2670struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
2671 *ibdev,
2672 int page_list_len)
2673{
2674 struct ib_fast_reg_page_list *frmr_list;
2675 int size;
2676
2677 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
2678 frmr_list = kzalloc(size, GFP_KERNEL);
2679 if (!frmr_list)
2680 return ERR_PTR(-ENOMEM);
2681 frmr_list->page_list = (u64 *)(frmr_list + 1);
2682 return frmr_list;
2683}
2684
2685void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
2686{
2687 kfree(page_list);
2688}