blob: e554fc258a68433e9614775a4bb073864f1cec6d [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
Dan Carpenter7b33dc22012-06-14 21:36:09 +030056 if (index >= OCRDMA_MAX_SGID)
Parav Panditfe2caef2012-03-21 04:09:06 +053057 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
64int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65{
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 attr->max_mr_size = ~0ull;
73 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
76 attr->hw_ver = 0;
77 attr->max_qp = dev->attr.max_qp;
Naresh Gottumukkalad3cb6c02013-08-26 15:27:40 +053078 attr->max_ah = OCRDMA_MAX_AH;
Parav Panditfe2caef2012-03-21 04:09:06 +053079 attr->max_qp_wr = dev->attr.max_wqe;
80
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +053085 IB_DEVICE_LOCAL_DMA_LKEY |
86 IB_DEVICE_MEM_MGT_EXTENSIONS;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +053087 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +053088 attr->max_sge_rd = dev->attr.max_rdma_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +053089 attr->max_cq = dev->attr.max_cq;
90 attr->max_cqe = dev->attr.max_cqe;
91 attr->max_mr = dev->attr.max_mr;
92 attr->max_mw = 0;
93 attr->max_pd = dev->attr.max_pd;
94 attr->atomic_cap = 0;
95 attr->max_fmr = 0;
96 attr->max_map_per_fmr = 0;
97 attr->max_qp_rd_atom =
98 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +0530100 attr->max_srq = dev->attr.max_srq;
Roland Dreierd1e09eb2012-07-07 15:13:47 -0700101 attr->max_srq_sge = dev->attr.max_srq_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +0530102 attr->max_srq_wr = dev->attr.max_rqe;
103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 attr->max_fast_reg_page_list_len = 0;
105 attr->max_pkeys = 1;
106 return 0;
107}
108
109int ocrdma_query_port(struct ib_device *ibdev,
110 u8 port, struct ib_port_attr *props)
111{
112 enum ib_port_state port_state;
113 struct ocrdma_dev *dev;
114 struct net_device *netdev;
115
116 dev = get_ocrdma_dev(ibdev);
117 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000118 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
119 dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530120 return -EINVAL;
121 }
122 netdev = dev->nic_info.netdev;
123 if (netif_running(netdev) && netif_oper_up(netdev)) {
124 port_state = IB_PORT_ACTIVE;
125 props->phys_state = 5;
126 } else {
127 port_state = IB_PORT_DOWN;
128 props->phys_state = 3;
129 }
130 props->max_mtu = IB_MTU_4096;
131 props->active_mtu = iboe_get_mtu(netdev->mtu);
132 props->lid = 0;
133 props->lmc = 0;
134 props->sm_lid = 0;
135 props->sm_sl = 0;
136 props->state = port_state;
137 props->port_cap_flags =
138 IB_PORT_CM_SUP |
139 IB_PORT_REINIT_SUP |
140 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
141 props->gid_tbl_len = OCRDMA_MAX_SGID;
142 props->pkey_tbl_len = 1;
143 props->bad_pkey_cntr = 0;
144 props->qkey_viol_cntr = 0;
145 props->active_width = IB_WIDTH_1X;
146 props->active_speed = 4;
147 props->max_msg_sz = 0x80000000;
148 props->max_vl_num = 4;
149 return 0;
150}
151
152int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
153 struct ib_port_modify *props)
154{
155 struct ocrdma_dev *dev;
156
157 dev = get_ocrdma_dev(ibdev);
158 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000159 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530160 return -EINVAL;
161 }
162 return 0;
163}
164
165static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
166 unsigned long len)
167{
168 struct ocrdma_mm *mm;
169
170 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
171 if (mm == NULL)
172 return -ENOMEM;
173 mm->key.phy_addr = phy_addr;
174 mm->key.len = len;
175 INIT_LIST_HEAD(&mm->entry);
176
177 mutex_lock(&uctx->mm_list_lock);
178 list_add_tail(&mm->entry, &uctx->mm_head);
179 mutex_unlock(&uctx->mm_list_lock);
180 return 0;
181}
182
183static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
184 unsigned long len)
185{
186 struct ocrdma_mm *mm, *tmp;
187
188 mutex_lock(&uctx->mm_list_lock);
189 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530190 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530191 continue;
192
193 list_del(&mm->entry);
194 kfree(mm);
195 break;
196 }
197 mutex_unlock(&uctx->mm_list_lock);
198}
199
200static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
201 unsigned long len)
202{
203 bool found = false;
204 struct ocrdma_mm *mm;
205
206 mutex_lock(&uctx->mm_list_lock);
207 list_for_each_entry(mm, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530208 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530209 continue;
210
211 found = true;
212 break;
213 }
214 mutex_unlock(&uctx->mm_list_lock);
215 return found;
216}
217
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530218static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
219 struct ocrdma_ucontext *uctx,
220 struct ib_udata *udata)
221{
222 struct ocrdma_pd *pd = NULL;
223 int status = 0;
224
225 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
226 if (!pd)
227 return ERR_PTR(-ENOMEM);
228
229 if (udata && uctx) {
230 pd->dpp_enabled =
231 dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY;
232 pd->num_dpp_qp =
233 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
234 }
235
236retry:
237 status = ocrdma_mbx_alloc_pd(dev, pd);
238 if (status) {
239 if (pd->dpp_enabled) {
240 pd->dpp_enabled = false;
241 pd->num_dpp_qp = 0;
242 goto retry;
243 } else {
244 kfree(pd);
245 return ERR_PTR(status);
246 }
247 }
248
249 return pd;
250}
251
252static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
253 struct ocrdma_pd *pd)
254{
255 return (uctx->cntxt_pd == pd ? true : false);
256}
257
258static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
259 struct ocrdma_pd *pd)
260{
261 int status = 0;
262
263 status = ocrdma_mbx_dealloc_pd(dev, pd);
264 kfree(pd);
265 return status;
266}
267
268static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
269 struct ocrdma_ucontext *uctx,
270 struct ib_udata *udata)
271{
272 int status = 0;
273
274 uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
275 if (IS_ERR(uctx->cntxt_pd)) {
276 status = PTR_ERR(uctx->cntxt_pd);
277 uctx->cntxt_pd = NULL;
278 goto err;
279 }
280
281 uctx->cntxt_pd->uctx = uctx;
282 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
283err:
284 return status;
285}
286
287static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
288{
289 int status = 0;
290 struct ocrdma_pd *pd = uctx->cntxt_pd;
291 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
292
293 BUG_ON(uctx->pd_in_use);
294 uctx->cntxt_pd = NULL;
295 status = _ocrdma_dealloc_pd(dev, pd);
296 return status;
297}
298
299static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
300{
301 struct ocrdma_pd *pd = NULL;
302
303 mutex_lock(&uctx->mm_list_lock);
304 if (!uctx->pd_in_use) {
305 uctx->pd_in_use = true;
306 pd = uctx->cntxt_pd;
307 }
308 mutex_unlock(&uctx->mm_list_lock);
309
310 return pd;
311}
312
313static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
314{
315 mutex_lock(&uctx->mm_list_lock);
316 uctx->pd_in_use = false;
317 mutex_unlock(&uctx->mm_list_lock);
318}
319
Parav Panditfe2caef2012-03-21 04:09:06 +0530320struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
321 struct ib_udata *udata)
322{
323 int status;
324 struct ocrdma_ucontext *ctx;
325 struct ocrdma_alloc_ucontext_resp resp;
326 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
327 struct pci_dev *pdev = dev->nic_info.pdev;
328 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
329
330 if (!udata)
331 return ERR_PTR(-EFAULT);
332 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
333 if (!ctx)
334 return ERR_PTR(-ENOMEM);
Parav Panditfe2caef2012-03-21 04:09:06 +0530335 INIT_LIST_HEAD(&ctx->mm_head);
336 mutex_init(&ctx->mm_list_lock);
337
338 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
339 &ctx->ah_tbl.pa, GFP_KERNEL);
340 if (!ctx->ah_tbl.va) {
341 kfree(ctx);
342 return ERR_PTR(-ENOMEM);
343 }
344 memset(ctx->ah_tbl.va, 0, map_len);
345 ctx->ah_tbl.len = map_len;
346
Dan Carpenter63ea3742013-07-29 22:34:29 +0300347 memset(&resp, 0, sizeof(resp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530348 resp.ah_tbl_len = ctx->ah_tbl.len;
349 resp.ah_tbl_page = ctx->ah_tbl.pa;
350
351 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
352 if (status)
353 goto map_err;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530354
355 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
356 if (status)
357 goto pd_err;
358
Parav Panditfe2caef2012-03-21 04:09:06 +0530359 resp.dev_id = dev->id;
360 resp.max_inline_data = dev->attr.max_inline_data;
361 resp.wqe_size = dev->attr.wqe_size;
362 resp.rqe_size = dev->attr.rqe_size;
363 resp.dpp_wqe_size = dev->attr.wqe_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530364
365 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
366 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
367 if (status)
368 goto cpy_err;
369 return &ctx->ibucontext;
370
371cpy_err:
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530372pd_err:
Parav Panditfe2caef2012-03-21 04:09:06 +0530373 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
374map_err:
375 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
376 ctx->ah_tbl.pa);
377 kfree(ctx);
378 return ERR_PTR(status);
379}
380
381int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
382{
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530383 int status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530384 struct ocrdma_mm *mm, *tmp;
385 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530386 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
387 struct pci_dev *pdev = dev->nic_info.pdev;
Parav Panditfe2caef2012-03-21 04:09:06 +0530388
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530389 status = ocrdma_dealloc_ucontext_pd(uctx);
390
Parav Panditfe2caef2012-03-21 04:09:06 +0530391 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
392 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
393 uctx->ah_tbl.pa);
394
395 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
396 list_del(&mm->entry);
397 kfree(mm);
398 }
399 kfree(uctx);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530400 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530401}
402
403int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
404{
405 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530406 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530407 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
408 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
409 unsigned long len = (vma->vm_end - vma->vm_start);
410 int status = 0;
411 bool found;
412
413 if (vma->vm_start & (PAGE_SIZE - 1))
414 return -EINVAL;
415 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
416 if (!found)
417 return -EINVAL;
418
419 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
420 dev->nic_info.db_total_size)) &&
421 (len <= dev->nic_info.db_page_size)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530422 if (vma->vm_flags & VM_READ)
423 return -EPERM;
424
425 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Parav Panditfe2caef2012-03-21 04:09:06 +0530426 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
427 len, vma->vm_page_prot);
428 } else if (dev->nic_info.dpp_unmapped_len &&
429 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
430 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
431 dev->nic_info.dpp_unmapped_len)) &&
432 (len <= dev->nic_info.dpp_unmapped_len)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530433 if (vma->vm_flags & VM_READ)
434 return -EPERM;
435
Parav Panditfe2caef2012-03-21 04:09:06 +0530436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
438 len, vma->vm_page_prot);
439 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530440 status = remap_pfn_range(vma, vma->vm_start,
441 vma->vm_pgoff, len, vma->vm_page_prot);
442 }
443 return status;
444}
445
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530446static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
Parav Panditfe2caef2012-03-21 04:09:06 +0530447 struct ib_ucontext *ib_ctx,
448 struct ib_udata *udata)
449{
450 int status;
451 u64 db_page_addr;
Roland Dreierda496432012-04-16 11:32:17 -0700452 u64 dpp_page_addr = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530453 u32 db_page_size;
454 struct ocrdma_alloc_pd_uresp rsp;
455 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
456
Dan Carpenter63ea3742013-07-29 22:34:29 +0300457 memset(&rsp, 0, sizeof(rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530458 rsp.id = pd->id;
459 rsp.dpp_enabled = pd->dpp_enabled;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530460 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530461 db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530462
463 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
464 if (status)
465 return status;
466
467 if (pd->dpp_enabled) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530468 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530469 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530470 status = ocrdma_add_mmap(uctx, dpp_page_addr,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530471 PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530472 if (status)
473 goto dpp_map_err;
474 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
475 rsp.dpp_page_addr_lo = dpp_page_addr;
476 }
477
478 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
479 if (status)
480 goto ucopy_err;
481
482 pd->uctx = uctx;
483 return 0;
484
485ucopy_err:
Roland Dreierda496432012-04-16 11:32:17 -0700486 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530487 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530488dpp_map_err:
489 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
490 return status;
491}
492
493struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
494 struct ib_ucontext *context,
495 struct ib_udata *udata)
496{
497 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
498 struct ocrdma_pd *pd;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530499 struct ocrdma_ucontext *uctx = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530500 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530501 u8 is_uctx_pd = false;
Parav Panditfe2caef2012-03-21 04:09:06 +0530502
Parav Panditfe2caef2012-03-21 04:09:06 +0530503 if (udata && context) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530504 uctx = get_ocrdma_ucontext(context);
505 pd = ocrdma_get_ucontext_pd(uctx);
506 if (pd) {
507 is_uctx_pd = true;
508 goto pd_mapping;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530509 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530510 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530511
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530512 pd = _ocrdma_alloc_pd(dev, uctx, udata);
513 if (IS_ERR(pd)) {
514 status = PTR_ERR(pd);
515 goto exit;
516 }
517
518pd_mapping:
Parav Panditfe2caef2012-03-21 04:09:06 +0530519 if (udata && context) {
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530520 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +0530521 if (status)
522 goto err;
523 }
524 return &pd->ibpd;
525
526err:
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530527 if (is_uctx_pd) {
528 ocrdma_release_ucontext_pd(uctx);
529 } else {
530 status = ocrdma_mbx_dealloc_pd(dev, pd);
531 kfree(pd);
532 }
533exit:
Parav Panditfe2caef2012-03-21 04:09:06 +0530534 return ERR_PTR(status);
535}
536
537int ocrdma_dealloc_pd(struct ib_pd *ibpd)
538{
539 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530540 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530541 struct ocrdma_ucontext *uctx = NULL;
542 int status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530543 u64 usr_db;
544
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530545 uctx = pd->uctx;
546 if (uctx) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530547 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530548 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530549 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530550 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530551 usr_db = ocrdma_get_db_addr(dev, pd->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530552 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530553
554 if (is_ucontext_pd(uctx, pd)) {
555 ocrdma_release_ucontext_pd(uctx);
556 return status;
557 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530558 }
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530559 status = _ocrdma_dealloc_pd(dev, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530560 return status;
561}
562
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530563static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
564 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
Parav Panditfe2caef2012-03-21 04:09:06 +0530565{
566 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530567
Parav Panditfe2caef2012-03-21 04:09:06 +0530568 mr->hwmr.fr_mr = 0;
569 mr->hwmr.local_rd = 1;
570 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
571 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
572 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
573 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
574 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
575 mr->hwmr.num_pbls = num_pbls;
576
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530577 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
578 if (status)
579 return status;
580
Parav Panditfe2caef2012-03-21 04:09:06 +0530581 mr->ibmr.lkey = mr->hwmr.lkey;
582 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
583 mr->ibmr.rkey = mr->hwmr.lkey;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530584 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530585}
586
587struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
588{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530589 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530590 struct ocrdma_mr *mr;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530591 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
592 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530593
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530594 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
595 pr_err("%s err, invalid access rights\n", __func__);
596 return ERR_PTR(-EINVAL);
597 }
598
599 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
600 if (!mr)
601 return ERR_PTR(-ENOMEM);
602
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530603 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530604 OCRDMA_ADDR_CHECK_DISABLE);
605 if (status) {
606 kfree(mr);
607 return ERR_PTR(status);
608 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530609
610 return &mr->ibmr;
611}
612
613static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
614 struct ocrdma_hw_mr *mr)
615{
616 struct pci_dev *pdev = dev->nic_info.pdev;
617 int i = 0;
618
619 if (mr->pbl_table) {
620 for (i = 0; i < mr->num_pbls; i++) {
621 if (!mr->pbl_table[i].va)
622 continue;
623 dma_free_coherent(&pdev->dev, mr->pbl_size,
624 mr->pbl_table[i].va,
625 mr->pbl_table[i].pa);
626 }
627 kfree(mr->pbl_table);
628 mr->pbl_table = NULL;
629 }
630}
631
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530632static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
633 u32 num_pbes)
Parav Panditfe2caef2012-03-21 04:09:06 +0530634{
635 u32 num_pbls = 0;
636 u32 idx = 0;
637 int status = 0;
638 u32 pbl_size;
639
640 do {
641 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
642 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
643 status = -EFAULT;
644 break;
645 }
646 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
647 num_pbls = num_pbls / (pbl_size / sizeof(u64));
648 idx++;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530649 } while (num_pbls >= dev->attr.max_num_mr_pbl);
Parav Panditfe2caef2012-03-21 04:09:06 +0530650
651 mr->hwmr.num_pbes = num_pbes;
652 mr->hwmr.num_pbls = num_pbls;
653 mr->hwmr.pbl_size = pbl_size;
654 return status;
655}
656
657static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
658{
659 int status = 0;
660 int i;
661 u32 dma_len = mr->pbl_size;
662 struct pci_dev *pdev = dev->nic_info.pdev;
663 void *va;
664 dma_addr_t pa;
665
666 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
667 mr->num_pbls, GFP_KERNEL);
668
669 if (!mr->pbl_table)
670 return -ENOMEM;
671
672 for (i = 0; i < mr->num_pbls; i++) {
673 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
674 if (!va) {
675 ocrdma_free_mr_pbl_tbl(dev, mr);
676 status = -ENOMEM;
677 break;
678 }
679 memset(va, 0, dma_len);
680 mr->pbl_table[i].va = va;
681 mr->pbl_table[i].pa = pa;
682 }
683 return status;
684}
685
686static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
687 u32 num_pbes)
688{
689 struct ocrdma_pbe *pbe;
690 struct ib_umem_chunk *chunk;
691 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
692 struct ib_umem *umem = mr->umem;
693 int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
694
695 if (!mr->hwmr.num_pbes)
696 return;
697
698 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
699 pbe_cnt = 0;
700
701 shift = ilog2(umem->page_size);
702
703 list_for_each_entry(chunk, &umem->chunk_list, list) {
704 /* get all the dma regions from the chunk. */
705 for (i = 0; i < chunk->nmap; i++) {
706 pages = sg_dma_len(&chunk->page_list[i]) >> shift;
707 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
708 /* store the page address in pbe */
709 pbe->pa_lo =
710 cpu_to_le32(sg_dma_address
711 (&chunk->page_list[i]) +
712 (umem->page_size * pg_cnt));
713 pbe->pa_hi =
714 cpu_to_le32(upper_32_bits
715 ((sg_dma_address
716 (&chunk->page_list[i]) +
717 umem->page_size * pg_cnt)));
718 pbe_cnt += 1;
719 total_num_pbes += 1;
720 pbe++;
721
722 /* if done building pbes, issue the mbx cmd. */
723 if (total_num_pbes == num_pbes)
724 return;
725
726 /* if the given pbl is full storing the pbes,
727 * move to next pbl.
728 */
729 if (pbe_cnt ==
730 (mr->hwmr.pbl_size / sizeof(u64))) {
731 pbl_tbl++;
732 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
733 pbe_cnt = 0;
734 }
735 }
736 }
737 }
738}
739
740struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
741 u64 usr_addr, int acc, struct ib_udata *udata)
742{
743 int status = -ENOMEM;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530744 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530745 struct ocrdma_mr *mr;
746 struct ocrdma_pd *pd;
Parav Panditfe2caef2012-03-21 04:09:06 +0530747 u32 num_pbes;
748
749 pd = get_ocrdma_pd(ibpd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530750
751 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
752 return ERR_PTR(-EINVAL);
753
754 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
755 if (!mr)
756 return ERR_PTR(status);
Parav Panditfe2caef2012-03-21 04:09:06 +0530757 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
758 if (IS_ERR(mr->umem)) {
759 status = -EFAULT;
760 goto umem_err;
761 }
762 num_pbes = ib_umem_page_count(mr->umem);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530763 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
Parav Panditfe2caef2012-03-21 04:09:06 +0530764 if (status)
765 goto umem_err;
766
767 mr->hwmr.pbe_size = mr->umem->page_size;
768 mr->hwmr.fbo = mr->umem->offset;
769 mr->hwmr.va = usr_addr;
770 mr->hwmr.len = len;
771 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
772 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
773 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
774 mr->hwmr.local_rd = 1;
775 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
776 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
777 if (status)
778 goto umem_err;
779 build_user_pbes(dev, mr, num_pbes);
780 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
781 if (status)
782 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +0530783 mr->ibmr.lkey = mr->hwmr.lkey;
784 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
785 mr->ibmr.rkey = mr->hwmr.lkey;
786
787 return &mr->ibmr;
788
789mbx_err:
790 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
791umem_err:
792 kfree(mr);
793 return ERR_PTR(status);
794}
795
796int ocrdma_dereg_mr(struct ib_mr *ib_mr)
797{
798 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530799 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530800 int status;
801
802 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
803
804 if (mr->hwmr.fr_mr == 0)
805 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
806
Parav Panditfe2caef2012-03-21 04:09:06 +0530807 /* it could be user registered memory. */
808 if (mr->umem)
809 ib_umem_release(mr->umem);
810 kfree(mr);
811 return status;
812}
813
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530814static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
815 struct ib_udata *udata,
Parav Panditfe2caef2012-03-21 04:09:06 +0530816 struct ib_ucontext *ib_ctx)
817{
818 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530819 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530820 struct ocrdma_create_cq_uresp uresp;
821
Dan Carpenter63ea3742013-07-29 22:34:29 +0300822 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530823 uresp.cq_id = cq->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530824 uresp.page_size = PAGE_ALIGN(cq->len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530825 uresp.num_pages = 1;
826 uresp.max_hw_cqe = cq->max_hw_cqe;
827 uresp.page_addr[0] = cq->pa;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530828 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530829 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530830 uresp.phase_change = cq->phase_change ? 1 : 0;
831 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
832 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000833 pr_err("%s(%d) copy error cqid=0x%x.\n",
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530834 __func__, dev->id, cq->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530835 goto err;
836 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530837 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
838 if (status)
839 goto err;
840 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
841 if (status) {
842 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
843 goto err;
844 }
845 cq->ucontext = uctx;
846err:
847 return status;
848}
849
850struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
851 struct ib_ucontext *ib_ctx,
852 struct ib_udata *udata)
853{
854 struct ocrdma_cq *cq;
855 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530856 struct ocrdma_ucontext *uctx = NULL;
857 u16 pd_id = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530858 int status;
859 struct ocrdma_create_cq_ureq ureq;
860
861 if (udata) {
862 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
863 return ERR_PTR(-EFAULT);
864 } else
865 ureq.dpp_cq = 0;
866 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
867 if (!cq)
868 return ERR_PTR(-ENOMEM);
869
870 spin_lock_init(&cq->cq_lock);
871 spin_lock_init(&cq->comp_handler_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +0530872 INIT_LIST_HEAD(&cq->sq_head);
873 INIT_LIST_HEAD(&cq->rq_head);
Parav Panditfe2caef2012-03-21 04:09:06 +0530874
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530875 if (ib_ctx) {
876 uctx = get_ocrdma_ucontext(ib_ctx);
877 pd_id = uctx->cntxt_pd->id;
878 }
879
880 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530881 if (status) {
882 kfree(cq);
883 return ERR_PTR(status);
884 }
885 if (ib_ctx) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530886 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530887 if (status)
888 goto ctx_err;
889 }
890 cq->phase = OCRDMA_CQE_VALID;
891 cq->arm_needed = true;
892 dev->cq_tbl[cq->id] = cq;
893
894 return &cq->ibcq;
895
896ctx_err:
897 ocrdma_mbx_destroy_cq(dev, cq);
898 kfree(cq);
899 return ERR_PTR(status);
900}
901
902int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
903 struct ib_udata *udata)
904{
905 int status = 0;
906 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
907
908 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
909 status = -EINVAL;
910 return status;
911 }
912 ibcq->cqe = new_cnt;
913 return status;
914}
915
916int ocrdma_destroy_cq(struct ib_cq *ibcq)
917{
918 int status;
919 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530920 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530921 int pdid = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530922
Parav Panditfe2caef2012-03-21 04:09:06 +0530923 status = ocrdma_mbx_destroy_cq(dev, cq);
924
925 if (cq->ucontext) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530926 pdid = cq->ucontext->cntxt_pd->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530927 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
928 PAGE_ALIGN(cq->len));
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530929 ocrdma_del_mmap(cq->ucontext,
930 ocrdma_get_db_addr(dev, pdid),
Parav Panditfe2caef2012-03-21 04:09:06 +0530931 dev->nic_info.db_page_size);
932 }
933 dev->cq_tbl[cq->id] = NULL;
934
935 kfree(cq);
936 return status;
937}
938
939static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
940{
941 int status = -EINVAL;
942
943 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
944 dev->qp_tbl[qp->id] = qp;
945 status = 0;
946 }
947 return status;
948}
949
950static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
951{
952 dev->qp_tbl[qp->id] = NULL;
953}
954
955static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
956 struct ib_qp_init_attr *attrs)
957{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530958 if ((attrs->qp_type != IB_QPT_GSI) &&
959 (attrs->qp_type != IB_QPT_RC) &&
960 (attrs->qp_type != IB_QPT_UC) &&
961 (attrs->qp_type != IB_QPT_UD)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000962 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
963 __func__, dev->id, attrs->qp_type);
Parav Panditfe2caef2012-03-21 04:09:06 +0530964 return -EINVAL;
965 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530966 /* Skip the check for QP1 to support CM size of 128 */
967 if ((attrs->qp_type != IB_QPT_GSI) &&
968 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000969 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
970 __func__, dev->id, attrs->cap.max_send_wr);
971 pr_err("%s(%d) supported send_wr=0x%x\n",
972 __func__, dev->id, dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530973 return -EINVAL;
974 }
975 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000976 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
977 __func__, dev->id, attrs->cap.max_recv_wr);
978 pr_err("%s(%d) supported recv_wr=0x%x\n",
979 __func__, dev->id, dev->attr.max_rqe);
Parav Panditfe2caef2012-03-21 04:09:06 +0530980 return -EINVAL;
981 }
982 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000983 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
984 __func__, dev->id, attrs->cap.max_inline_data);
985 pr_err("%s(%d) supported inline data size=0x%x\n",
986 __func__, dev->id, dev->attr.max_inline_data);
Parav Panditfe2caef2012-03-21 04:09:06 +0530987 return -EINVAL;
988 }
989 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000990 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
991 __func__, dev->id, attrs->cap.max_send_sge);
992 pr_err("%s(%d) supported send_sge=0x%x\n",
993 __func__, dev->id, dev->attr.max_send_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +0530994 return -EINVAL;
995 }
996 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000997 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
998 __func__, dev->id, attrs->cap.max_recv_sge);
999 pr_err("%s(%d) supported recv_sge=0x%x\n",
1000 __func__, dev->id, dev->attr.max_recv_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301001 return -EINVAL;
1002 }
1003 /* unprivileged user space cannot create special QP */
1004 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001005 pr_err
Parav Panditfe2caef2012-03-21 04:09:06 +05301006 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1007 __func__, dev->id, attrs->qp_type);
1008 return -EINVAL;
1009 }
1010 /* allow creating only one GSI type of QP */
1011 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001012 pr_err("%s(%d) GSI special QPs already created.\n",
1013 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301014 return -EINVAL;
1015 }
1016 /* verify consumer QPs are not trying to use GSI QP's CQ */
1017 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1018 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301019 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001020 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301021 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301022 return -EINVAL;
1023 }
1024 }
1025 return 0;
1026}
1027
1028static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1029 struct ib_udata *udata, int dpp_offset,
1030 int dpp_credit_lmt, int srq)
1031{
1032 int status = 0;
1033 u64 usr_db;
1034 struct ocrdma_create_qp_uresp uresp;
1035 struct ocrdma_dev *dev = qp->dev;
1036 struct ocrdma_pd *pd = qp->pd;
1037
1038 memset(&uresp, 0, sizeof(uresp));
1039 usr_db = dev->nic_info.unmapped_db +
1040 (pd->id * dev->nic_info.db_page_size);
1041 uresp.qp_id = qp->id;
1042 uresp.sq_dbid = qp->sq.dbid;
1043 uresp.num_sq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301044 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
Parav Panditfe2caef2012-03-21 04:09:06 +05301045 uresp.sq_page_addr[0] = qp->sq.pa;
1046 uresp.num_wqe_allocated = qp->sq.max_cnt;
1047 if (!srq) {
1048 uresp.rq_dbid = qp->rq.dbid;
1049 uresp.num_rq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301050 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
Parav Panditfe2caef2012-03-21 04:09:06 +05301051 uresp.rq_page_addr[0] = qp->rq.pa;
1052 uresp.num_rqe_allocated = qp->rq.max_cnt;
1053 }
1054 uresp.db_page_addr = usr_db;
1055 uresp.db_page_size = dev->nic_info.db_page_size;
1056 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1057 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301058 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1059 uresp.db_shift = 24;
Parav Panditfe2caef2012-03-21 04:09:06 +05301060 } else {
1061 uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
1062 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1063 uresp.db_shift = 16;
1064 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301065
1066 if (qp->dpp_enabled) {
1067 uresp.dpp_credit = dpp_credit_lmt;
1068 uresp.dpp_offset = dpp_offset;
1069 }
1070 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1071 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001072 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301073 goto err;
1074 }
1075 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1076 uresp.sq_page_size);
1077 if (status)
1078 goto err;
1079
1080 if (!srq) {
1081 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1082 uresp.rq_page_size);
1083 if (status)
1084 goto rq_map_err;
1085 }
1086 return status;
1087rq_map_err:
1088 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1089err:
1090 return status;
1091}
1092
1093static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1094 struct ocrdma_pd *pd)
1095{
1096 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1097 qp->sq_db = dev->nic_info.db +
1098 (pd->id * dev->nic_info.db_page_size) +
1099 OCRDMA_DB_GEN2_SQ_OFFSET;
1100 qp->rq_db = dev->nic_info.db +
1101 (pd->id * dev->nic_info.db_page_size) +
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301102 OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301103 } else {
1104 qp->sq_db = dev->nic_info.db +
1105 (pd->id * dev->nic_info.db_page_size) +
1106 OCRDMA_DB_SQ_OFFSET;
1107 qp->rq_db = dev->nic_info.db +
1108 (pd->id * dev->nic_info.db_page_size) +
1109 OCRDMA_DB_RQ_OFFSET;
1110 }
1111}
1112
1113static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1114{
1115 qp->wqe_wr_id_tbl =
1116 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1117 GFP_KERNEL);
1118 if (qp->wqe_wr_id_tbl == NULL)
1119 return -ENOMEM;
1120 qp->rqe_wr_id_tbl =
1121 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1122 if (qp->rqe_wr_id_tbl == NULL)
1123 return -ENOMEM;
1124
1125 return 0;
1126}
1127
1128static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1129 struct ocrdma_pd *pd,
1130 struct ib_qp_init_attr *attrs)
1131{
1132 qp->pd = pd;
1133 spin_lock_init(&qp->q_lock);
1134 INIT_LIST_HEAD(&qp->sq_entry);
1135 INIT_LIST_HEAD(&qp->rq_entry);
1136
1137 qp->qp_type = attrs->qp_type;
1138 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1139 qp->max_inline_data = attrs->cap.max_inline_data;
1140 qp->sq.max_sges = attrs->cap.max_send_sge;
1141 qp->rq.max_sges = attrs->cap.max_recv_sge;
1142 qp->state = OCRDMA_QPS_RST;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301143 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
Parav Panditfe2caef2012-03-21 04:09:06 +05301144}
1145
Parav Panditfe2caef2012-03-21 04:09:06 +05301146
1147static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1148 struct ib_qp_init_attr *attrs)
1149{
1150 if (attrs->qp_type == IB_QPT_GSI) {
1151 dev->gsi_qp_created = 1;
1152 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1153 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1154 }
1155}
1156
1157struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1158 struct ib_qp_init_attr *attrs,
1159 struct ib_udata *udata)
1160{
1161 int status;
1162 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1163 struct ocrdma_qp *qp;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301164 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301165 struct ocrdma_create_qp_ureq ureq;
1166 u16 dpp_credit_lmt, dpp_offset;
1167
1168 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1169 if (status)
1170 goto gen_err;
1171
1172 memset(&ureq, 0, sizeof(ureq));
1173 if (udata) {
1174 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1175 return ERR_PTR(-EFAULT);
1176 }
1177 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1178 if (!qp) {
1179 status = -ENOMEM;
1180 goto gen_err;
1181 }
1182 qp->dev = dev;
1183 ocrdma_set_qp_init_params(qp, pd, attrs);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301184 if (udata == NULL)
1185 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1186 OCRDMA_QP_FAST_REG);
Parav Panditfe2caef2012-03-21 04:09:06 +05301187
1188 mutex_lock(&dev->dev_lock);
1189 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1190 ureq.dpp_cq_id,
1191 &dpp_offset, &dpp_credit_lmt);
1192 if (status)
1193 goto mbx_err;
1194
1195 /* user space QP's wr_id table are managed in library */
1196 if (udata == NULL) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301197 status = ocrdma_alloc_wr_id_tbl(qp);
1198 if (status)
1199 goto map_err;
1200 }
1201
1202 status = ocrdma_add_qpn_map(dev, qp);
1203 if (status)
1204 goto map_err;
1205 ocrdma_set_qp_db(dev, qp, pd);
1206 if (udata) {
1207 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1208 dpp_credit_lmt,
1209 (attrs->srq != NULL));
1210 if (status)
1211 goto cpy_err;
1212 }
1213 ocrdma_store_gsi_qp_cq(dev, attrs);
Gottumukkala, Naresh27159f52013-06-05 08:50:46 +00001214 qp->ibqp.qp_num = qp->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301215 mutex_unlock(&dev->dev_lock);
1216 return &qp->ibqp;
1217
1218cpy_err:
1219 ocrdma_del_qpn_map(dev, qp);
1220map_err:
1221 ocrdma_mbx_destroy_qp(dev, qp);
1222mbx_err:
1223 mutex_unlock(&dev->dev_lock);
1224 kfree(qp->wqe_wr_id_tbl);
1225 kfree(qp->rqe_wr_id_tbl);
1226 kfree(qp);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001227 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301228gen_err:
1229 return ERR_PTR(status);
1230}
1231
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301232
1233static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
1234{
1235 if (qp->db_cache) {
1236 u32 val = qp->rq.dbid | (qp->db_cache <<
1237 ocrdma_get_num_posted_shift(qp));
1238 iowrite32(val, qp->rq_db);
1239 qp->db_cache = 0;
1240 }
1241}
1242
Parav Panditfe2caef2012-03-21 04:09:06 +05301243int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1244 int attr_mask)
1245{
1246 int status = 0;
1247 struct ocrdma_qp *qp;
1248 struct ocrdma_dev *dev;
1249 enum ib_qp_state old_qps;
1250
1251 qp = get_ocrdma_qp(ibqp);
1252 dev = qp->dev;
1253 if (attr_mask & IB_QP_STATE)
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301254 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301255 /* if new and previous states are same hw doesn't need to
1256 * know about it.
1257 */
1258 if (status < 0)
1259 return status;
1260 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301261 if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
1262 ocrdma_flush_rq_db(qp);
1263
Parav Panditfe2caef2012-03-21 04:09:06 +05301264 return status;
1265}
1266
1267int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1268 int attr_mask, struct ib_udata *udata)
1269{
1270 unsigned long flags;
1271 int status = -EINVAL;
1272 struct ocrdma_qp *qp;
1273 struct ocrdma_dev *dev;
1274 enum ib_qp_state old_qps, new_qps;
1275
1276 qp = get_ocrdma_qp(ibqp);
1277 dev = qp->dev;
1278
1279 /* syncronize with multiple context trying to change, retrive qps */
1280 mutex_lock(&dev->dev_lock);
1281 /* syncronize with wqe, rqe posting and cqe processing contexts */
1282 spin_lock_irqsave(&qp->q_lock, flags);
1283 old_qps = get_ibqp_state(qp->state);
1284 if (attr_mask & IB_QP_STATE)
1285 new_qps = attr->qp_state;
1286 else
1287 new_qps = old_qps;
1288 spin_unlock_irqrestore(&qp->q_lock, flags);
1289
1290 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001291 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1292 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1293 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1294 old_qps, new_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301295 goto param_err;
1296 }
1297
1298 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1299 if (status > 0)
1300 status = 0;
1301param_err:
1302 mutex_unlock(&dev->dev_lock);
1303 return status;
1304}
1305
1306static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1307{
1308 switch (mtu) {
1309 case 256:
1310 return IB_MTU_256;
1311 case 512:
1312 return IB_MTU_512;
1313 case 1024:
1314 return IB_MTU_1024;
1315 case 2048:
1316 return IB_MTU_2048;
1317 case 4096:
1318 return IB_MTU_4096;
1319 default:
1320 return IB_MTU_1024;
1321 }
1322}
1323
1324static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1325{
1326 int ib_qp_acc_flags = 0;
1327
1328 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1329 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1330 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1331 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1332 return ib_qp_acc_flags;
1333}
1334
1335int ocrdma_query_qp(struct ib_qp *ibqp,
1336 struct ib_qp_attr *qp_attr,
1337 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1338{
1339 int status;
1340 u32 qp_state;
1341 struct ocrdma_qp_params params;
1342 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1343 struct ocrdma_dev *dev = qp->dev;
1344
1345 memset(&params, 0, sizeof(params));
1346 mutex_lock(&dev->dev_lock);
1347 status = ocrdma_mbx_query_qp(dev, qp, &params);
1348 mutex_unlock(&dev->dev_lock);
1349 if (status)
1350 goto mbx_err;
1351 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1352 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1353 qp_attr->path_mtu =
1354 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1355 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1356 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1357 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1358 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1359 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1360 qp_attr->dest_qp_num =
1361 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1362
1363 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1364 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1365 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1366 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1367 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1368 qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
1369 qp_init_attr->cap = qp_attr->cap;
1370 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1371 sizeof(params.dgid));
1372 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1373 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1374 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1375 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1376 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1377 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1378 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1379 OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
1380 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1381
1382 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1383 qp_attr->ah_attr.port_num = 1;
1384 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1385 OCRDMA_QP_PARAMS_SL_MASK) >>
1386 OCRDMA_QP_PARAMS_SL_SHIFT;
1387 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1388 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1389 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1390 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1391 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1392 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1393 qp_attr->retry_cnt =
1394 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1395 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1396 qp_attr->min_rnr_timer = 0;
1397 qp_attr->pkey_index = 0;
1398 qp_attr->port_num = 1;
1399 qp_attr->ah_attr.src_path_bits = 0;
1400 qp_attr->ah_attr.static_rate = 0;
1401 qp_attr->alt_pkey_index = 0;
1402 qp_attr->alt_port_num = 0;
1403 qp_attr->alt_timeout = 0;
1404 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1405 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1406 OCRDMA_QP_PARAMS_STATE_SHIFT;
1407 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1408 qp_attr->max_dest_rd_atomic =
1409 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1410 qp_attr->max_rd_atomic =
1411 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1412 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1413 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1414mbx_err:
1415 return status;
1416}
1417
1418static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1419{
1420 int i = idx / 32;
1421 unsigned int mask = (1 << (idx % 32));
1422
1423 if (srq->idx_bit_fields[i] & mask)
1424 srq->idx_bit_fields[i] &= ~mask;
1425 else
1426 srq->idx_bit_fields[i] |= mask;
1427}
1428
1429static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1430{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301431 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
Parav Panditfe2caef2012-03-21 04:09:06 +05301432}
1433
1434static int is_hw_sq_empty(struct ocrdma_qp *qp)
1435{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301436 return (qp->sq.tail == qp->sq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301437}
1438
1439static int is_hw_rq_empty(struct ocrdma_qp *qp)
1440{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301441 return (qp->rq.tail == qp->rq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301442}
1443
1444static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1445{
1446 return q->va + (q->head * q->entry_size);
1447}
1448
1449static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1450 u32 idx)
1451{
1452 return q->va + (idx * q->entry_size);
1453}
1454
1455static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1456{
1457 q->head = (q->head + 1) & q->max_wqe_idx;
1458}
1459
1460static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1461{
1462 q->tail = (q->tail + 1) & q->max_wqe_idx;
1463}
1464
1465/* discard the cqe for a given QP */
1466static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1467{
1468 unsigned long cq_flags;
1469 unsigned long flags;
1470 int discard_cnt = 0;
1471 u32 cur_getp, stop_getp;
1472 struct ocrdma_cqe *cqe;
1473 u32 qpn = 0;
1474
1475 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1476
1477 /* traverse through the CQEs in the hw CQ,
1478 * find the matching CQE for a given qp,
1479 * mark the matching one discarded by clearing qpn.
1480 * ring the doorbell in the poll_cq() as
1481 * we don't complete out of order cqe.
1482 */
1483
1484 cur_getp = cq->getp;
1485 /* find upto when do we reap the cq. */
1486 stop_getp = cur_getp;
1487 do {
1488 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1489 break;
1490
1491 cqe = cq->va + cur_getp;
1492 /* if (a) done reaping whole hw cq, or
1493 * (b) qp_xq becomes empty.
1494 * then exit
1495 */
1496 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1497 /* if previously discarded cqe found, skip that too. */
1498 /* check for matching qp */
1499 if (qpn == 0 || qpn != qp->id)
1500 goto skip_cqe;
1501
1502 /* mark cqe discarded so that it is not picked up later
1503 * in the poll_cq().
1504 */
1505 discard_cnt += 1;
1506 cqe->cmn.qpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301507 if (is_cqe_for_sq(cqe)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301508 ocrdma_hwq_inc_tail(&qp->sq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301509 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301510 if (qp->srq) {
1511 spin_lock_irqsave(&qp->srq->q_lock, flags);
1512 ocrdma_hwq_inc_tail(&qp->srq->rq);
1513 ocrdma_srq_toggle_bit(qp->srq, cur_getp);
1514 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1515
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301516 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301517 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301518 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301519 }
1520skip_cqe:
1521 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1522 } while (cur_getp != stop_getp);
1523 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1524}
1525
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301526void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
Parav Panditfe2caef2012-03-21 04:09:06 +05301527{
1528 int found = false;
1529 unsigned long flags;
1530 struct ocrdma_dev *dev = qp->dev;
1531 /* sync with any active CQ poll */
1532
1533 spin_lock_irqsave(&dev->flush_q_lock, flags);
1534 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1535 if (found)
1536 list_del(&qp->sq_entry);
1537 if (!qp->srq) {
1538 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1539 if (found)
1540 list_del(&qp->rq_entry);
1541 }
1542 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1543}
1544
1545int ocrdma_destroy_qp(struct ib_qp *ibqp)
1546{
1547 int status;
1548 struct ocrdma_pd *pd;
1549 struct ocrdma_qp *qp;
1550 struct ocrdma_dev *dev;
1551 struct ib_qp_attr attrs;
1552 int attr_mask = IB_QP_STATE;
Dan Carpenterd19081e2012-05-02 09:14:47 +03001553 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05301554
1555 qp = get_ocrdma_qp(ibqp);
1556 dev = qp->dev;
1557
1558 attrs.qp_state = IB_QPS_ERR;
1559 pd = qp->pd;
1560
1561 /* change the QP state to ERROR */
1562 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1563
1564 /* ensure that CQEs for newly created QP (whose id may be same with
1565 * one which just getting destroyed are same), dont get
1566 * discarded until the old CQEs are discarded.
1567 */
1568 mutex_lock(&dev->dev_lock);
1569 status = ocrdma_mbx_destroy_qp(dev, qp);
1570
1571 /*
1572 * acquire CQ lock while destroy is in progress, in order to
1573 * protect against proessing in-flight CQEs for this QP.
1574 */
Dan Carpenterd19081e2012-05-02 09:14:47 +03001575 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301576 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001577 spin_lock(&qp->rq_cq->cq_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301578
1579 ocrdma_del_qpn_map(dev, qp);
1580
1581 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001582 spin_unlock(&qp->rq_cq->cq_lock);
1583 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301584
1585 if (!pd->uctx) {
1586 ocrdma_discard_cqes(qp, qp->sq_cq);
1587 ocrdma_discard_cqes(qp, qp->rq_cq);
1588 }
1589 mutex_unlock(&dev->dev_lock);
1590
1591 if (pd->uctx) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301592 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1593 PAGE_ALIGN(qp->sq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301594 if (!qp->srq)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301595 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1596 PAGE_ALIGN(qp->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301597 }
1598
1599 ocrdma_del_flush_qp(qp);
1600
Parav Panditfe2caef2012-03-21 04:09:06 +05301601 kfree(qp->wqe_wr_id_tbl);
1602 kfree(qp->rqe_wr_id_tbl);
1603 kfree(qp);
1604 return status;
1605}
1606
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301607static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1608 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301609{
1610 int status;
1611 struct ocrdma_create_srq_uresp uresp;
1612
Dan Carpenter63ea3742013-07-29 22:34:29 +03001613 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301614 uresp.rq_dbid = srq->rq.dbid;
1615 uresp.num_rq_pages = 1;
1616 uresp.rq_page_addr[0] = srq->rq.pa;
1617 uresp.rq_page_size = srq->rq.len;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301618 uresp.db_page_addr = dev->nic_info.unmapped_db +
1619 (srq->pd->id * dev->nic_info.db_page_size);
1620 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +05301621 uresp.num_rqe_allocated = srq->rq.max_cnt;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301622 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301623 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301624 uresp.db_shift = 24;
1625 } else {
1626 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1627 uresp.db_shift = 16;
1628 }
1629
1630 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1631 if (status)
1632 return status;
1633 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1634 uresp.rq_page_size);
1635 if (status)
1636 return status;
1637 return status;
1638}
1639
1640struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1641 struct ib_srq_init_attr *init_attr,
1642 struct ib_udata *udata)
1643{
1644 int status = -ENOMEM;
1645 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301646 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301647 struct ocrdma_srq *srq;
1648
1649 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1650 return ERR_PTR(-EINVAL);
1651 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1652 return ERR_PTR(-EINVAL);
1653
1654 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1655 if (!srq)
1656 return ERR_PTR(status);
1657
1658 spin_lock_init(&srq->q_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301659 srq->pd = pd;
1660 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301661 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +05301662 if (status)
1663 goto err;
1664
1665 if (udata == NULL) {
1666 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1667 GFP_KERNEL);
1668 if (srq->rqe_wr_id_tbl == NULL)
1669 goto arm_err;
1670
1671 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1672 (srq->rq.max_cnt % 32 ? 1 : 0);
1673 srq->idx_bit_fields =
1674 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1675 if (srq->idx_bit_fields == NULL)
1676 goto arm_err;
1677 memset(srq->idx_bit_fields, 0xff,
1678 srq->bit_fields_len * sizeof(u32));
1679 }
1680
1681 if (init_attr->attr.srq_limit) {
1682 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1683 if (status)
1684 goto arm_err;
1685 }
1686
Parav Panditfe2caef2012-03-21 04:09:06 +05301687 if (udata) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301688 status = ocrdma_copy_srq_uresp(dev, srq, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +05301689 if (status)
1690 goto arm_err;
1691 }
1692
Parav Panditfe2caef2012-03-21 04:09:06 +05301693 return &srq->ibsrq;
1694
1695arm_err:
1696 ocrdma_mbx_destroy_srq(dev, srq);
1697err:
1698 kfree(srq->rqe_wr_id_tbl);
1699 kfree(srq->idx_bit_fields);
1700 kfree(srq);
1701 return ERR_PTR(status);
1702}
1703
1704int ocrdma_modify_srq(struct ib_srq *ibsrq,
1705 struct ib_srq_attr *srq_attr,
1706 enum ib_srq_attr_mask srq_attr_mask,
1707 struct ib_udata *udata)
1708{
1709 int status = 0;
1710 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301711
1712 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301713 if (srq_attr_mask & IB_SRQ_MAX_WR)
1714 status = -EINVAL;
1715 else
1716 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1717 return status;
1718}
1719
1720int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1721{
1722 int status;
1723 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301724
1725 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301726 status = ocrdma_mbx_query_srq(srq, srq_attr);
1727 return status;
1728}
1729
1730int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1731{
1732 int status;
1733 struct ocrdma_srq *srq;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301734 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301735
1736 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301737
1738 status = ocrdma_mbx_destroy_srq(dev, srq);
1739
1740 if (srq->pd->uctx)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301741 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1742 PAGE_ALIGN(srq->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301743
Parav Panditfe2caef2012-03-21 04:09:06 +05301744 kfree(srq->idx_bit_fields);
1745 kfree(srq->rqe_wr_id_tbl);
1746 kfree(srq);
1747 return status;
1748}
1749
1750/* unprivileged verbs and their support functions. */
1751static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1752 struct ocrdma_hdr_wqe *hdr,
1753 struct ib_send_wr *wr)
1754{
1755 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1756 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1757 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1758
1759 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1760 if (qp->qp_type == IB_QPT_GSI)
1761 ud_hdr->qkey = qp->qkey;
1762 else
1763 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1764 ud_hdr->rsvd_ahid = ah->id;
1765}
1766
1767static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1768 struct ocrdma_sge *sge, int num_sge,
1769 struct ib_sge *sg_list)
1770{
1771 int i;
1772
1773 for (i = 0; i < num_sge; i++) {
1774 sge[i].lrkey = sg_list[i].lkey;
1775 sge[i].addr_lo = sg_list[i].addr;
1776 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1777 sge[i].len = sg_list[i].length;
1778 hdr->total_len += sg_list[i].length;
1779 }
1780 if (num_sge == 0)
1781 memset(sge, 0, sizeof(*sge));
1782}
1783
1784static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1785 struct ocrdma_hdr_wqe *hdr,
1786 struct ocrdma_sge *sge,
1787 struct ib_send_wr *wr, u32 wqe_size)
1788{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301789 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301790 if (wr->sg_list[0].length > qp->max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001791 pr_err("%s() supported_len=0x%x,\n"
1792 " unspported len req=0x%x\n", __func__,
1793 qp->max_inline_data, wr->sg_list[0].length);
Parav Panditfe2caef2012-03-21 04:09:06 +05301794 return -EINVAL;
1795 }
1796 memcpy(sge,
1797 (void *)(unsigned long)wr->sg_list[0].addr,
1798 wr->sg_list[0].length);
1799 hdr->total_len = wr->sg_list[0].length;
1800 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301801 if (0 == wr->sg_list[0].length)
1802 wqe_size += sizeof(struct ocrdma_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301803 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1804 } else {
1805 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1806 if (wr->num_sge)
1807 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1808 else
1809 wqe_size += sizeof(struct ocrdma_sge);
1810 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1811 }
1812 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1813 return 0;
1814}
1815
1816static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1817 struct ib_send_wr *wr)
1818{
1819 int status;
1820 struct ocrdma_sge *sge;
1821 u32 wqe_size = sizeof(*hdr);
1822
1823 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1824 ocrdma_build_ud_hdr(qp, hdr, wr);
1825 sge = (struct ocrdma_sge *)(hdr + 2);
1826 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301827 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301828 sge = (struct ocrdma_sge *)(hdr + 1);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301829 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301830
1831 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1832 return status;
1833}
1834
1835static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1836 struct ib_send_wr *wr)
1837{
1838 int status;
1839 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1840 struct ocrdma_sge *sge = ext_rw + 1;
1841 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1842
1843 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1844 if (status)
1845 return status;
1846 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1847 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1848 ext_rw->lrkey = wr->wr.rdma.rkey;
1849 ext_rw->len = hdr->total_len;
1850 return 0;
1851}
1852
1853static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1854 struct ib_send_wr *wr)
1855{
1856 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1857 struct ocrdma_sge *sge = ext_rw + 1;
1858 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1859 sizeof(struct ocrdma_hdr_wqe);
1860
1861 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1862 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1863 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1864 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1865
1866 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1867 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1868 ext_rw->lrkey = wr->wr.rdma.rkey;
1869 ext_rw->len = hdr->total_len;
1870}
1871
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05301872static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
1873 struct ocrdma_hw_mr *hwmr)
1874{
1875 int i;
1876 u64 buf_addr = 0;
1877 int num_pbes;
1878 struct ocrdma_pbe *pbe;
1879
1880 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1881 num_pbes = 0;
1882
1883 /* go through the OS phy regions & fill hw pbe entries into pbls. */
1884 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
1885 /* number of pbes can be more for one OS buf, when
1886 * buffers are of different sizes.
1887 * split the ib_buf to one or more pbes.
1888 */
1889 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
1890 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
1891 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
1892 num_pbes += 1;
1893 pbe++;
1894
1895 /* if the pbl is full storing the pbes,
1896 * move to next pbl.
1897 */
1898 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
1899 pbl_tbl++;
1900 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1901 }
1902 }
1903 return;
1904}
1905
1906static int get_encoded_page_size(int pg_sz)
1907{
1908 /* Max size is 256M 4096 << 16 */
1909 int i = 0;
1910 for (; i < 17; i++)
1911 if (pg_sz == (4096 << i))
1912 break;
1913 return i;
1914}
1915
1916
1917static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1918 struct ib_send_wr *wr)
1919{
1920 u64 fbo;
1921 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
1922 struct ocrdma_mr *mr;
1923 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
1924
1925 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
1926
1927 if ((wr->wr.fast_reg.page_list_len >
1928 qp->dev->attr.max_pages_per_frmr) ||
1929 (wr->wr.fast_reg.length > 0xffffffffULL))
1930 return -EINVAL;
1931
1932 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
1933 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1934
1935 if (wr->wr.fast_reg.page_list_len == 0)
1936 BUG();
1937 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
1938 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
1939 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
1940 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
1941 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
1942 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
1943 hdr->lkey = wr->wr.fast_reg.rkey;
1944 hdr->total_len = wr->wr.fast_reg.length;
1945
1946 fbo = wr->wr.fast_reg.iova_start -
1947 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
1948
1949 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
1950 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
1951 fast_reg->fbo_hi = upper_32_bits(fbo);
1952 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
1953 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
1954 fast_reg->size_sge =
1955 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
1956 mr = (struct ocrdma_mr *)qp->dev->stag_arr[(hdr->lkey >> 8) &
1957 (OCRDMA_MAX_STAG - 1)];
1958 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
1959 return 0;
1960}
1961
Parav Panditfe2caef2012-03-21 04:09:06 +05301962static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
1963{
1964 u32 val = qp->sq.dbid | (1 << 16);
1965
1966 iowrite32(val, qp->sq_db);
1967}
1968
1969int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1970 struct ib_send_wr **bad_wr)
1971{
1972 int status = 0;
1973 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1974 struct ocrdma_hdr_wqe *hdr;
1975 unsigned long flags;
1976
1977 spin_lock_irqsave(&qp->q_lock, flags);
1978 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
1979 spin_unlock_irqrestore(&qp->q_lock, flags);
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00001980 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301981 return -EINVAL;
1982 }
1983
1984 while (wr) {
1985 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
1986 wr->num_sge > qp->sq.max_sges) {
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00001987 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05301988 status = -ENOMEM;
1989 break;
1990 }
1991 hdr = ocrdma_hwq_head(&qp->sq);
1992 hdr->cw = 0;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301993 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05301994 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
1995 if (wr->send_flags & IB_SEND_FENCE)
1996 hdr->cw |=
1997 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
1998 if (wr->send_flags & IB_SEND_SOLICITED)
1999 hdr->cw |=
2000 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2001 hdr->total_len = 0;
2002 switch (wr->opcode) {
2003 case IB_WR_SEND_WITH_IMM:
2004 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2005 hdr->immdt = ntohl(wr->ex.imm_data);
2006 case IB_WR_SEND:
2007 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2008 ocrdma_build_send(qp, hdr, wr);
2009 break;
2010 case IB_WR_SEND_WITH_INV:
2011 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2012 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2013 hdr->lkey = wr->ex.invalidate_rkey;
2014 status = ocrdma_build_send(qp, hdr, wr);
2015 break;
2016 case IB_WR_RDMA_WRITE_WITH_IMM:
2017 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2018 hdr->immdt = ntohl(wr->ex.imm_data);
2019 case IB_WR_RDMA_WRITE:
2020 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2021 status = ocrdma_build_write(qp, hdr, wr);
2022 break;
2023 case IB_WR_RDMA_READ_WITH_INV:
2024 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2025 case IB_WR_RDMA_READ:
2026 ocrdma_build_read(qp, hdr, wr);
2027 break;
2028 case IB_WR_LOCAL_INV:
2029 hdr->cw |=
2030 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302031 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2032 sizeof(struct ocrdma_sge)) /
Parav Panditfe2caef2012-03-21 04:09:06 +05302033 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2034 hdr->lkey = wr->ex.invalidate_rkey;
2035 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302036 case IB_WR_FAST_REG_MR:
2037 status = ocrdma_build_fr(qp, hdr, wr);
2038 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302039 default:
2040 status = -EINVAL;
2041 break;
2042 }
2043 if (status) {
2044 *bad_wr = wr;
2045 break;
2046 }
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302047 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302048 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2049 else
2050 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2051 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2052 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2053 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2054 /* make sure wqe is written before adapter can access it */
2055 wmb();
2056 /* inform hw to start processing it */
2057 ocrdma_ring_sq_db(qp);
2058
2059 /* update pointer, counter for next wr */
2060 ocrdma_hwq_inc_head(&qp->sq);
2061 wr = wr->next;
2062 }
2063 spin_unlock_irqrestore(&qp->q_lock, flags);
2064 return status;
2065}
2066
2067static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2068{
Naresh Gottumukkaladf176ea2013-06-10 04:42:41 +00002069 u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
Parav Panditfe2caef2012-03-21 04:09:06 +05302070
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05302071 if (qp->state != OCRDMA_QPS_INIT)
2072 iowrite32(val, qp->rq_db);
2073 else
2074 qp->db_cache++;
Parav Panditfe2caef2012-03-21 04:09:06 +05302075}
2076
2077static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2078 u16 tag)
2079{
2080 u32 wqe_size = 0;
2081 struct ocrdma_sge *sge;
2082 if (wr->num_sge)
2083 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2084 else
2085 wqe_size = sizeof(*sge) + sizeof(*rqe);
2086
2087 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2088 OCRDMA_WQE_SIZE_SHIFT);
2089 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2090 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2091 rqe->total_len = 0;
2092 rqe->rsvd_tag = tag;
2093 sge = (struct ocrdma_sge *)(rqe + 1);
2094 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2095 ocrdma_cpu_to_le32(rqe, wqe_size);
2096}
2097
2098int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2099 struct ib_recv_wr **bad_wr)
2100{
2101 int status = 0;
2102 unsigned long flags;
2103 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2104 struct ocrdma_hdr_wqe *rqe;
2105
2106 spin_lock_irqsave(&qp->q_lock, flags);
2107 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2108 spin_unlock_irqrestore(&qp->q_lock, flags);
2109 *bad_wr = wr;
2110 return -EINVAL;
2111 }
2112 while (wr) {
2113 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2114 wr->num_sge > qp->rq.max_sges) {
2115 *bad_wr = wr;
2116 status = -ENOMEM;
2117 break;
2118 }
2119 rqe = ocrdma_hwq_head(&qp->rq);
2120 ocrdma_build_rqe(rqe, wr, 0);
2121
2122 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2123 /* make sure rqe is written before adapter can access it */
2124 wmb();
2125
2126 /* inform hw to start processing it */
2127 ocrdma_ring_rq_db(qp);
2128
2129 /* update pointer, counter for next wr */
2130 ocrdma_hwq_inc_head(&qp->rq);
2131 wr = wr->next;
2132 }
2133 spin_unlock_irqrestore(&qp->q_lock, flags);
2134 return status;
2135}
2136
2137/* cqe for srq's rqe can potentially arrive out of order.
2138 * index gives the entry in the shadow table where to store
2139 * the wr_id. tag/index is returned in cqe to reference back
2140 * for a given rqe.
2141 */
2142static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2143{
2144 int row = 0;
2145 int indx = 0;
2146
2147 for (row = 0; row < srq->bit_fields_len; row++) {
2148 if (srq->idx_bit_fields[row]) {
2149 indx = ffs(srq->idx_bit_fields[row]);
2150 indx = (row * 32) + (indx - 1);
2151 if (indx >= srq->rq.max_cnt)
2152 BUG();
2153 ocrdma_srq_toggle_bit(srq, indx);
2154 break;
2155 }
2156 }
2157
2158 if (row == srq->bit_fields_len)
2159 BUG();
2160 return indx;
2161}
2162
2163static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2164{
2165 u32 val = srq->rq.dbid | (1 << 16);
2166
2167 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2168}
2169
2170int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2171 struct ib_recv_wr **bad_wr)
2172{
2173 int status = 0;
2174 unsigned long flags;
2175 struct ocrdma_srq *srq;
2176 struct ocrdma_hdr_wqe *rqe;
2177 u16 tag;
2178
2179 srq = get_ocrdma_srq(ibsrq);
2180
2181 spin_lock_irqsave(&srq->q_lock, flags);
2182 while (wr) {
2183 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2184 wr->num_sge > srq->rq.max_sges) {
2185 status = -ENOMEM;
2186 *bad_wr = wr;
2187 break;
2188 }
2189 tag = ocrdma_srq_get_idx(srq);
2190 rqe = ocrdma_hwq_head(&srq->rq);
2191 ocrdma_build_rqe(rqe, wr, tag);
2192
2193 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2194 /* make sure rqe is written before adapter can perform DMA */
2195 wmb();
2196 /* inform hw to start processing it */
2197 ocrdma_ring_srq_db(srq);
2198 /* update pointer, counter for next wr */
2199 ocrdma_hwq_inc_head(&srq->rq);
2200 wr = wr->next;
2201 }
2202 spin_unlock_irqrestore(&srq->q_lock, flags);
2203 return status;
2204}
2205
2206static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2207{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302208 enum ib_wc_status ibwc_status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302209
2210 switch (status) {
2211 case OCRDMA_CQE_GENERAL_ERR:
2212 ibwc_status = IB_WC_GENERAL_ERR;
2213 break;
2214 case OCRDMA_CQE_LOC_LEN_ERR:
2215 ibwc_status = IB_WC_LOC_LEN_ERR;
2216 break;
2217 case OCRDMA_CQE_LOC_QP_OP_ERR:
2218 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2219 break;
2220 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2221 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2222 break;
2223 case OCRDMA_CQE_LOC_PROT_ERR:
2224 ibwc_status = IB_WC_LOC_PROT_ERR;
2225 break;
2226 case OCRDMA_CQE_WR_FLUSH_ERR:
2227 ibwc_status = IB_WC_WR_FLUSH_ERR;
2228 break;
2229 case OCRDMA_CQE_MW_BIND_ERR:
2230 ibwc_status = IB_WC_MW_BIND_ERR;
2231 break;
2232 case OCRDMA_CQE_BAD_RESP_ERR:
2233 ibwc_status = IB_WC_BAD_RESP_ERR;
2234 break;
2235 case OCRDMA_CQE_LOC_ACCESS_ERR:
2236 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2237 break;
2238 case OCRDMA_CQE_REM_INV_REQ_ERR:
2239 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2240 break;
2241 case OCRDMA_CQE_REM_ACCESS_ERR:
2242 ibwc_status = IB_WC_REM_ACCESS_ERR;
2243 break;
2244 case OCRDMA_CQE_REM_OP_ERR:
2245 ibwc_status = IB_WC_REM_OP_ERR;
2246 break;
2247 case OCRDMA_CQE_RETRY_EXC_ERR:
2248 ibwc_status = IB_WC_RETRY_EXC_ERR;
2249 break;
2250 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2251 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2252 break;
2253 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2254 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2255 break;
2256 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2257 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2258 break;
2259 case OCRDMA_CQE_REM_ABORT_ERR:
2260 ibwc_status = IB_WC_REM_ABORT_ERR;
2261 break;
2262 case OCRDMA_CQE_INV_EECN_ERR:
2263 ibwc_status = IB_WC_INV_EECN_ERR;
2264 break;
2265 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2266 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2267 break;
2268 case OCRDMA_CQE_FATAL_ERR:
2269 ibwc_status = IB_WC_FATAL_ERR;
2270 break;
2271 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2272 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2273 break;
2274 default:
2275 ibwc_status = IB_WC_GENERAL_ERR;
2276 break;
2277 };
2278 return ibwc_status;
2279}
2280
2281static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2282 u32 wqe_idx)
2283{
2284 struct ocrdma_hdr_wqe *hdr;
2285 struct ocrdma_sge *rw;
2286 int opcode;
2287
2288 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2289
2290 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2291 /* Undo the hdr->cw swap */
2292 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2293 switch (opcode) {
2294 case OCRDMA_WRITE:
2295 ibwc->opcode = IB_WC_RDMA_WRITE;
2296 break;
2297 case OCRDMA_READ:
2298 rw = (struct ocrdma_sge *)(hdr + 1);
2299 ibwc->opcode = IB_WC_RDMA_READ;
2300 ibwc->byte_len = rw->len;
2301 break;
2302 case OCRDMA_SEND:
2303 ibwc->opcode = IB_WC_SEND;
2304 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302305 case OCRDMA_FR_MR:
2306 ibwc->opcode = IB_WC_FAST_REG_MR;
2307 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302308 case OCRDMA_LKEY_INV:
2309 ibwc->opcode = IB_WC_LOCAL_INV;
2310 break;
2311 default:
2312 ibwc->status = IB_WC_GENERAL_ERR;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002313 pr_err("%s() invalid opcode received = 0x%x\n",
2314 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +05302315 break;
2316 };
2317}
2318
2319static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2320 struct ocrdma_cqe *cqe)
2321{
2322 if (is_cqe_for_sq(cqe)) {
2323 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2324 cqe->flags_status_srcqpn) &
2325 ~OCRDMA_CQE_STATUS_MASK);
2326 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2327 cqe->flags_status_srcqpn) |
2328 (OCRDMA_CQE_WR_FLUSH_ERR <<
2329 OCRDMA_CQE_STATUS_SHIFT));
2330 } else {
2331 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2332 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2333 cqe->flags_status_srcqpn) &
2334 ~OCRDMA_CQE_UD_STATUS_MASK);
2335 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2336 cqe->flags_status_srcqpn) |
2337 (OCRDMA_CQE_WR_FLUSH_ERR <<
2338 OCRDMA_CQE_UD_STATUS_SHIFT));
2339 } else {
2340 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2341 cqe->flags_status_srcqpn) &
2342 ~OCRDMA_CQE_STATUS_MASK);
2343 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2344 cqe->flags_status_srcqpn) |
2345 (OCRDMA_CQE_WR_FLUSH_ERR <<
2346 OCRDMA_CQE_STATUS_SHIFT));
2347 }
2348 }
2349}
2350
2351static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2352 struct ocrdma_qp *qp, int status)
2353{
2354 bool expand = false;
2355
2356 ibwc->byte_len = 0;
2357 ibwc->qp = &qp->ibqp;
2358 ibwc->status = ocrdma_to_ibwc_err(status);
2359
2360 ocrdma_flush_qp(qp);
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302361 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05302362
2363 /* if wqe/rqe pending for which cqe needs to be returned,
2364 * trigger inflating it.
2365 */
2366 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2367 expand = true;
2368 ocrdma_set_cqe_status_flushed(qp, cqe);
2369 }
2370 return expand;
2371}
2372
2373static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2374 struct ocrdma_qp *qp, int status)
2375{
2376 ibwc->opcode = IB_WC_RECV;
2377 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2378 ocrdma_hwq_inc_tail(&qp->rq);
2379
2380 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2381}
2382
2383static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2384 struct ocrdma_qp *qp, int status)
2385{
2386 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2387 ocrdma_hwq_inc_tail(&qp->sq);
2388
2389 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2390}
2391
2392
2393static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2394 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2395 bool *polled, bool *stop)
2396{
2397 bool expand;
2398 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2399 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2400
2401 /* when hw sq is empty, but rq is not empty, so we continue
2402 * to keep the cqe in order to get the cq event again.
2403 */
2404 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2405 /* when cq for rq and sq is same, it is safe to return
2406 * flush cqe for RQEs.
2407 */
2408 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2409 *polled = true;
2410 status = OCRDMA_CQE_WR_FLUSH_ERR;
2411 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2412 } else {
2413 /* stop processing further cqe as this cqe is used for
2414 * triggering cq event on buddy cq of RQ.
2415 * When QP is destroyed, this cqe will be removed
2416 * from the cq's hardware q.
2417 */
2418 *polled = false;
2419 *stop = true;
2420 expand = false;
2421 }
2422 } else {
2423 *polled = true;
2424 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2425 }
2426 return expand;
2427}
2428
2429static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2430 struct ocrdma_cqe *cqe,
2431 struct ib_wc *ibwc, bool *polled)
2432{
2433 bool expand = false;
2434 int tail = qp->sq.tail;
2435 u32 wqe_idx;
2436
2437 if (!qp->wqe_wr_id_tbl[tail].signaled) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302438 *polled = false; /* WC cannot be consumed yet */
2439 } else {
2440 ibwc->status = IB_WC_SUCCESS;
2441 ibwc->wc_flags = 0;
2442 ibwc->qp = &qp->ibqp;
2443 ocrdma_update_wc(qp, ibwc, tail);
2444 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302445 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302446 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2447 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
Parav Panditae3bca92012-08-17 14:45:33 +00002448 if (tail != wqe_idx)
2449 expand = true; /* Coalesced CQE can't be consumed yet */
2450
Parav Panditfe2caef2012-03-21 04:09:06 +05302451 ocrdma_hwq_inc_tail(&qp->sq);
2452 return expand;
2453}
2454
2455static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2456 struct ib_wc *ibwc, bool *polled, bool *stop)
2457{
2458 int status;
2459 bool expand;
2460
2461 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2462 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2463
2464 if (status == OCRDMA_CQE_SUCCESS)
2465 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2466 else
2467 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2468 return expand;
2469}
2470
2471static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2472{
2473 int status;
2474
2475 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2476 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2477 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2478 OCRDMA_CQE_SRCQP_MASK;
2479 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2480 OCRDMA_CQE_PKEY_MASK;
2481 ibwc->wc_flags = IB_WC_GRH;
2482 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2483 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2484 return status;
2485}
2486
2487static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2488 struct ocrdma_cqe *cqe,
2489 struct ocrdma_qp *qp)
2490{
2491 unsigned long flags;
2492 struct ocrdma_srq *srq;
2493 u32 wqe_idx;
2494
2495 srq = get_ocrdma_srq(qp->ibqp.srq);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302496 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2497 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
Parav Panditfe2caef2012-03-21 04:09:06 +05302498 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2499 spin_lock_irqsave(&srq->q_lock, flags);
2500 ocrdma_srq_toggle_bit(srq, wqe_idx);
2501 spin_unlock_irqrestore(&srq->q_lock, flags);
2502 ocrdma_hwq_inc_tail(&srq->rq);
2503}
2504
2505static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2506 struct ib_wc *ibwc, bool *polled, bool *stop,
2507 int status)
2508{
2509 bool expand;
2510
2511 /* when hw_rq is empty, but wq is not empty, so continue
2512 * to keep the cqe to get the cq event again.
2513 */
2514 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2515 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2516 *polled = true;
2517 status = OCRDMA_CQE_WR_FLUSH_ERR;
2518 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2519 } else {
2520 *polled = false;
2521 *stop = true;
2522 expand = false;
2523 }
Parav Pandita3698a92012-06-11 16:39:20 +05302524 } else {
2525 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302526 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
Parav Pandita3698a92012-06-11 16:39:20 +05302527 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302528 return expand;
2529}
2530
2531static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2532 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2533{
2534 ibwc->opcode = IB_WC_RECV;
2535 ibwc->qp = &qp->ibqp;
2536 ibwc->status = IB_WC_SUCCESS;
2537
2538 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2539 ocrdma_update_ud_rcqe(ibwc, cqe);
2540 else
2541 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2542
2543 if (is_cqe_imm(cqe)) {
2544 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2545 ibwc->wc_flags |= IB_WC_WITH_IMM;
2546 } else if (is_cqe_wr_imm(cqe)) {
2547 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2548 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2549 ibwc->wc_flags |= IB_WC_WITH_IMM;
2550 } else if (is_cqe_invalidated(cqe)) {
2551 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2552 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2553 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302554 if (qp->ibqp.srq) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302555 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302556 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302557 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2558 ocrdma_hwq_inc_tail(&qp->rq);
2559 }
2560}
2561
2562static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2563 struct ib_wc *ibwc, bool *polled, bool *stop)
2564{
2565 int status;
2566 bool expand = false;
2567
2568 ibwc->wc_flags = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302569 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302570 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2571 OCRDMA_CQE_UD_STATUS_MASK) >>
2572 OCRDMA_CQE_UD_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302573 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302574 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2575 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302576 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302577
2578 if (status == OCRDMA_CQE_SUCCESS) {
2579 *polled = true;
2580 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2581 } else {
2582 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2583 status);
2584 }
2585 return expand;
2586}
2587
2588static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2589 u16 cur_getp)
2590{
2591 if (cq->phase_change) {
2592 if (cur_getp == 0)
2593 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302594 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302595 /* clear valid bit */
2596 cqe->flags_status_srcqpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302597 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302598}
2599
2600static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2601 struct ib_wc *ibwc)
2602{
2603 u16 qpn = 0;
2604 int i = 0;
2605 bool expand = false;
2606 int polled_hw_cqes = 0;
2607 struct ocrdma_qp *qp = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302608 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302609 struct ocrdma_cqe *cqe;
2610 u16 cur_getp; bool polled = false; bool stop = false;
2611
2612 cur_getp = cq->getp;
2613 while (num_entries) {
2614 cqe = cq->va + cur_getp;
2615 /* check whether valid cqe or not */
2616 if (!is_cqe_valid(cq, cqe))
2617 break;
2618 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2619 /* ignore discarded cqe */
2620 if (qpn == 0)
2621 goto skip_cqe;
2622 qp = dev->qp_tbl[qpn];
2623 BUG_ON(qp == NULL);
2624
2625 if (is_cqe_for_sq(cqe)) {
2626 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2627 &stop);
2628 } else {
2629 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2630 &stop);
2631 }
2632 if (expand)
2633 goto expand_cqe;
2634 if (stop)
2635 goto stop_cqe;
2636 /* clear qpn to avoid duplicate processing by discard_cqe() */
2637 cqe->cmn.qpn = 0;
2638skip_cqe:
2639 polled_hw_cqes += 1;
2640 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2641 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2642expand_cqe:
2643 if (polled) {
2644 num_entries -= 1;
2645 i += 1;
2646 ibwc = ibwc + 1;
2647 polled = false;
2648 }
2649 }
2650stop_cqe:
2651 cq->getp = cur_getp;
2652 if (polled_hw_cqes || expand || stop) {
2653 ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
2654 polled_hw_cqes);
2655 }
2656 return i;
2657}
2658
2659/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2660static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2661 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2662{
2663 int err_cqes = 0;
2664
2665 while (num_entries) {
2666 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2667 break;
2668 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2669 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2670 ocrdma_hwq_inc_tail(&qp->sq);
2671 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2672 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2673 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302674 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302675 return err_cqes;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302676 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302677 ibwc->byte_len = 0;
2678 ibwc->status = IB_WC_WR_FLUSH_ERR;
2679 ibwc = ibwc + 1;
2680 err_cqes += 1;
2681 num_entries -= 1;
2682 }
2683 return err_cqes;
2684}
2685
2686int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2687{
2688 int cqes_to_poll = num_entries;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302689 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2690 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302691 int num_os_cqe = 0, err_cqes = 0;
2692 struct ocrdma_qp *qp;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302693 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302694
2695 /* poll cqes from adapter CQ */
2696 spin_lock_irqsave(&cq->cq_lock, flags);
2697 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2698 spin_unlock_irqrestore(&cq->cq_lock, flags);
2699 cqes_to_poll -= num_os_cqe;
2700
2701 if (cqes_to_poll) {
2702 wc = wc + num_os_cqe;
2703 /* adapter returns single error cqe when qp moves to
2704 * error state. So insert error cqes with wc_status as
2705 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2706 * respectively which uses this CQ.
2707 */
2708 spin_lock_irqsave(&dev->flush_q_lock, flags);
2709 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2710 if (cqes_to_poll == 0)
2711 break;
2712 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2713 cqes_to_poll -= err_cqes;
2714 num_os_cqe += err_cqes;
2715 wc = wc + err_cqes;
2716 }
2717 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2718 }
2719 return num_os_cqe;
2720}
2721
2722int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2723{
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302724 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2725 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302726 u16 cq_id;
2727 u16 cur_getp;
2728 struct ocrdma_cqe *cqe;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302729 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302730
Parav Panditfe2caef2012-03-21 04:09:06 +05302731 cq_id = cq->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05302732
2733 spin_lock_irqsave(&cq->cq_lock, flags);
2734 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2735 cq->armed = true;
2736 if (cq_flags & IB_CQ_SOLICITED)
2737 cq->solicited = true;
2738
2739 cur_getp = cq->getp;
2740 cqe = cq->va + cur_getp;
2741
2742 /* check whether any valid cqe exist or not, if not then safe to
2743 * arm. If cqe is not yet consumed, then let it get consumed and then
2744 * we arm it to avoid false interrupts.
2745 */
2746 if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
2747 cq->arm_needed = false;
2748 ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
2749 }
2750 spin_unlock_irqrestore(&cq->cq_lock, flags);
2751 return 0;
2752}
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302753
2754struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2755{
2756 int status;
2757 struct ocrdma_mr *mr;
2758 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2759 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2760
2761 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2762 return ERR_PTR(-EINVAL);
2763
2764 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2765 if (!mr)
2766 return ERR_PTR(-ENOMEM);
2767
2768 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2769 if (status)
2770 goto pbl_err;
2771 mr->hwmr.fr_mr = 1;
2772 mr->hwmr.remote_rd = 0;
2773 mr->hwmr.remote_wr = 0;
2774 mr->hwmr.local_rd = 0;
2775 mr->hwmr.local_wr = 0;
2776 mr->hwmr.mw_bind = 0;
2777 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2778 if (status)
2779 goto pbl_err;
2780 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2781 if (status)
2782 goto mbx_err;
2783 mr->ibmr.rkey = mr->hwmr.lkey;
2784 mr->ibmr.lkey = mr->hwmr.lkey;
2785 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (u64) mr;
2786 return &mr->ibmr;
2787mbx_err:
2788 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2789pbl_err:
2790 kfree(mr);
2791 return ERR_PTR(-ENOMEM);
2792}
2793
2794struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
2795 *ibdev,
2796 int page_list_len)
2797{
2798 struct ib_fast_reg_page_list *frmr_list;
2799 int size;
2800
2801 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
2802 frmr_list = kzalloc(size, GFP_KERNEL);
2803 if (!frmr_list)
2804 return ERR_PTR(-ENOMEM);
2805 frmr_list->page_list = (u64 *)(frmr_list + 1);
2806 return frmr_list;
2807}
2808
2809void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
2810{
2811 kfree(page_list);
2812}
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05302813
2814#define MAX_KERNEL_PBE_SIZE 65536
2815static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
2816 int buf_cnt, u32 *pbe_size)
2817{
2818 u64 total_size = 0;
2819 u64 buf_size = 0;
2820 int i;
2821 *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
2822 *pbe_size = roundup_pow_of_two(*pbe_size);
2823
2824 /* find the smallest PBE size that we can have */
2825 for (i = 0; i < buf_cnt; i++) {
2826 /* first addr may not be page aligned, so ignore checking */
2827 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
2828 (buf_list[i].size & ~PAGE_MASK))) {
2829 return 0;
2830 }
2831
2832 /* if configured PBE size is greater then the chosen one,
2833 * reduce the PBE size.
2834 */
2835 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
2836 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
2837 buf_size = roundup_pow_of_two(buf_size);
2838 if (*pbe_size > buf_size)
2839 *pbe_size = buf_size;
2840
2841 total_size += buf_size;
2842 }
2843 *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
2844 (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
2845
2846 /* num_pbes = total_size / (*pbe_size); this is implemented below. */
2847
2848 return total_size >> ilog2(*pbe_size);
2849}
2850
2851static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
2852 u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
2853 struct ocrdma_hw_mr *hwmr)
2854{
2855 int i;
2856 int idx;
2857 int pbes_per_buf = 0;
2858 u64 buf_addr = 0;
2859 int num_pbes;
2860 struct ocrdma_pbe *pbe;
2861 int total_num_pbes = 0;
2862
2863 if (!hwmr->num_pbes)
2864 return;
2865
2866 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2867 num_pbes = 0;
2868
2869 /* go through the OS phy regions & fill hw pbe entries into pbls. */
2870 for (i = 0; i < ib_buf_cnt; i++) {
2871 buf_addr = buf_list[i].addr;
2872 pbes_per_buf =
2873 roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
2874 pbe_size;
2875 hwmr->len += buf_list[i].size;
2876 /* number of pbes can be more for one OS buf, when
2877 * buffers are of different sizes.
2878 * split the ib_buf to one or more pbes.
2879 */
2880 for (idx = 0; idx < pbes_per_buf; idx++) {
2881 /* we program always page aligned addresses,
2882 * first unaligned address is taken care by fbo.
2883 */
2884 if (i == 0) {
2885 /* for non zero fbo, assign the
2886 * start of the page.
2887 */
2888 pbe->pa_lo =
2889 cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2890 pbe->pa_hi =
2891 cpu_to_le32((u32) upper_32_bits(buf_addr));
2892 } else {
2893 pbe->pa_lo =
2894 cpu_to_le32((u32) (buf_addr & 0xffffffff));
2895 pbe->pa_hi =
2896 cpu_to_le32((u32) upper_32_bits(buf_addr));
2897 }
2898 buf_addr += pbe_size;
2899 num_pbes += 1;
2900 total_num_pbes += 1;
2901 pbe++;
2902
2903 if (total_num_pbes == hwmr->num_pbes)
2904 goto mr_tbl_done;
2905 /* if the pbl is full storing the pbes,
2906 * move to next pbl.
2907 */
2908 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2909 pbl_tbl++;
2910 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2911 num_pbes = 0;
2912 }
2913 }
2914 }
2915mr_tbl_done:
2916 return;
2917}
2918
2919struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
2920 struct ib_phys_buf *buf_list,
2921 int buf_cnt, int acc, u64 *iova_start)
2922{
2923 int status = -ENOMEM;
2924 struct ocrdma_mr *mr;
2925 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2926 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2927 u32 num_pbes;
2928 u32 pbe_size = 0;
2929
2930 if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
2931 return ERR_PTR(-EINVAL);
2932
2933 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2934 if (!mr)
2935 return ERR_PTR(status);
2936
2937 num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
2938 if (num_pbes == 0) {
2939 status = -EINVAL;
2940 goto pbl_err;
2941 }
2942 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
2943 if (status)
2944 goto pbl_err;
2945
2946 mr->hwmr.pbe_size = pbe_size;
2947 mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
2948 mr->hwmr.va = *iova_start;
2949 mr->hwmr.local_rd = 1;
2950 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2951 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2952 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2953 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2954 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
2955
2956 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2957 if (status)
2958 goto pbl_err;
2959 build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
2960 &mr->hwmr);
2961 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
2962 if (status)
2963 goto mbx_err;
2964
2965 mr->ibmr.lkey = mr->hwmr.lkey;
2966 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
2967 mr->ibmr.rkey = mr->hwmr.lkey;
2968 return &mr->ibmr;
2969
2970mbx_err:
2971 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2972pbl_err:
2973 kfree(mr);
2974 return ERR_PTR(status);
2975}