blob: fd9359171fcdca149f75106fdc2e6a0580786dff [file] [log] [blame]
Parav Panditfe2caef2012-03-21 04:09:06 +05301/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h>
30#include <rdma/ib_user_verbs.h>
31#include <rdma/iw_cm.h>
32#include <rdma/ib_umem.h>
33#include <rdma/ib_addr.h>
34
35#include "ocrdma.h"
36#include "ocrdma_hw.h"
37#include "ocrdma_verbs.h"
38#include "ocrdma_abi.h"
39
40int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41{
42 if (index > 1)
43 return -EINVAL;
44
45 *pkey = 0xffff;
46 return 0;
47}
48
49int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
51{
52 struct ocrdma_dev *dev;
53
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
Devesh Sharmafad51b72014-02-04 11:57:10 +053056 if (index > OCRDMA_MAX_SGID)
Parav Panditfe2caef2012-03-21 04:09:06 +053057 return -EINVAL;
58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61 return 0;
62}
63
64int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65{
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
Mitesh Ahuja033edd42014-06-10 19:32:22 +053072 attr->max_mr_size = dev->attr.max_mr_size;
Parav Panditfe2caef2012-03-21 04:09:06 +053073 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
Mitesh Ahuja96c51ab2014-07-02 11:36:06 +053076 attr->hw_ver = dev->asic_id;
Parav Panditfe2caef2012-03-21 04:09:06 +053077 attr->max_qp = dev->attr.max_qp;
Naresh Gottumukkalad3cb6c02013-08-26 15:27:40 +053078 attr->max_ah = OCRDMA_MAX_AH;
Parav Panditfe2caef2012-03-21 04:09:06 +053079 attr->max_qp_wr = dev->attr.max_wqe;
80
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +053085 IB_DEVICE_LOCAL_DMA_LKEY |
86 IB_DEVICE_MEM_MGT_EXTENSIONS;
Mahesh Vardhamanaiah634c5792012-06-08 21:26:11 +053087 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +053088 attr->max_sge_rd = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +053089 attr->max_cq = dev->attr.max_cq;
90 attr->max_cqe = dev->attr.max_cqe;
91 attr->max_mr = dev->attr.max_mr;
Selvin Xavierac578ae2014-02-04 11:57:04 +053092 attr->max_mw = dev->attr.max_mw;
Parav Panditfe2caef2012-03-21 04:09:06 +053093 attr->max_pd = dev->attr.max_pd;
94 attr->atomic_cap = 0;
95 attr->max_fmr = 0;
96 attr->max_map_per_fmr = 0;
97 attr->max_qp_rd_atom =
98 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +0530100 attr->max_srq = dev->attr.max_srq;
Roland Dreierd1e09eb2012-07-07 15:13:47 -0700101 attr->max_srq_sge = dev->attr.max_srq_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +0530102 attr->max_srq_wr = dev->attr.max_rqe;
103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
Devesh Sharmad6a488f2014-06-09 10:52:37 +0530104 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
Parav Panditfe2caef2012-03-21 04:09:06 +0530105 attr->max_pkeys = 1;
106 return 0;
107}
108
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530109static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
110 u8 *ib_speed, u8 *ib_width)
111{
112 int status;
113 u8 speed;
114
115 status = ocrdma_mbx_get_link_speed(dev, &speed);
116 if (status)
117 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
118
119 switch (speed) {
120 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
121 *ib_speed = IB_SPEED_SDR;
122 *ib_width = IB_WIDTH_1X;
123 break;
124
125 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
126 *ib_speed = IB_SPEED_QDR;
127 *ib_width = IB_WIDTH_1X;
128 break;
129
130 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
131 *ib_speed = IB_SPEED_DDR;
132 *ib_width = IB_WIDTH_4X;
133 break;
134
135 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
136 *ib_speed = IB_SPEED_QDR;
137 *ib_width = IB_WIDTH_4X;
138 break;
139
140 default:
141 /* Unsupported */
142 *ib_speed = IB_SPEED_SDR;
143 *ib_width = IB_WIDTH_1X;
Joe Perches2b50176d2013-10-08 16:07:22 -0700144 }
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530145}
146
Parav Panditfe2caef2012-03-21 04:09:06 +0530147int ocrdma_query_port(struct ib_device *ibdev,
148 u8 port, struct ib_port_attr *props)
149{
150 enum ib_port_state port_state;
151 struct ocrdma_dev *dev;
152 struct net_device *netdev;
153
154 dev = get_ocrdma_dev(ibdev);
155 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000156 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
157 dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530158 return -EINVAL;
159 }
160 netdev = dev->nic_info.netdev;
161 if (netif_running(netdev) && netif_oper_up(netdev)) {
162 port_state = IB_PORT_ACTIVE;
163 props->phys_state = 5;
164 } else {
165 port_state = IB_PORT_DOWN;
166 props->phys_state = 3;
167 }
168 props->max_mtu = IB_MTU_4096;
169 props->active_mtu = iboe_get_mtu(netdev->mtu);
170 props->lid = 0;
171 props->lmc = 0;
172 props->sm_lid = 0;
173 props->sm_sl = 0;
174 props->state = port_state;
175 props->port_cap_flags =
176 IB_PORT_CM_SUP |
177 IB_PORT_REINIT_SUP |
Moni Shouab4a26a22014-02-09 11:54:34 +0200178 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
Parav Panditfe2caef2012-03-21 04:09:06 +0530179 props->gid_tbl_len = OCRDMA_MAX_SGID;
180 props->pkey_tbl_len = 1;
181 props->bad_pkey_cntr = 0;
182 props->qkey_viol_cntr = 0;
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530183 get_link_speed_and_width(dev, &props->active_speed,
184 &props->active_width);
Parav Panditfe2caef2012-03-21 04:09:06 +0530185 props->max_msg_sz = 0x80000000;
186 props->max_vl_num = 4;
187 return 0;
188}
189
190int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
191 struct ib_port_modify *props)
192{
193 struct ocrdma_dev *dev;
194
195 dev = get_ocrdma_dev(ibdev);
196 if (port > 1) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000197 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
Parav Panditfe2caef2012-03-21 04:09:06 +0530198 return -EINVAL;
199 }
200 return 0;
201}
202
203static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
204 unsigned long len)
205{
206 struct ocrdma_mm *mm;
207
208 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
209 if (mm == NULL)
210 return -ENOMEM;
211 mm->key.phy_addr = phy_addr;
212 mm->key.len = len;
213 INIT_LIST_HEAD(&mm->entry);
214
215 mutex_lock(&uctx->mm_list_lock);
216 list_add_tail(&mm->entry, &uctx->mm_head);
217 mutex_unlock(&uctx->mm_list_lock);
218 return 0;
219}
220
221static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
222 unsigned long len)
223{
224 struct ocrdma_mm *mm, *tmp;
225
226 mutex_lock(&uctx->mm_list_lock);
227 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530228 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530229 continue;
230
231 list_del(&mm->entry);
232 kfree(mm);
233 break;
234 }
235 mutex_unlock(&uctx->mm_list_lock);
236}
237
238static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
239 unsigned long len)
240{
241 bool found = false;
242 struct ocrdma_mm *mm;
243
244 mutex_lock(&uctx->mm_list_lock);
245 list_for_each_entry(mm, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530246 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530247 continue;
248
249 found = true;
250 break;
251 }
252 mutex_unlock(&uctx->mm_list_lock);
253 return found;
254}
255
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530256
257static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
258{
259 u16 pd_bitmap_idx = 0;
260 const unsigned long *pd_bitmap;
261
262 if (dpp_pool) {
263 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 dev->pd_mgr->max_dpp_pd);
266 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 dev->pd_mgr->pd_dpp_count++;
268 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
270 } else {
271 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 dev->pd_mgr->max_normal_pd);
274 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 dev->pd_mgr->pd_norm_count++;
276 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
278 }
279 return pd_bitmap_idx;
280}
281
282static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
283 bool dpp_pool)
284{
285 u16 pd_count;
286 u16 pd_bit_index;
287
288 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 dev->pd_mgr->pd_norm_count;
290 if (pd_count == 0)
291 return -EINVAL;
292
293 if (dpp_pool) {
294 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
296 return -EINVAL;
297 } else {
298 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 dev->pd_mgr->pd_dpp_count--;
300 }
301 } else {
302 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
304 return -EINVAL;
305 } else {
306 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 dev->pd_mgr->pd_norm_count--;
308 }
309 }
310
311 return 0;
312}
313
314static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 bool dpp_pool)
316{
317 int status;
318
319 mutex_lock(&dev->dev_lock);
320 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 mutex_unlock(&dev->dev_lock);
322 return status;
323}
324
325static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326{
327 u16 pd_idx = 0;
328 int status = 0;
329
330 mutex_lock(&dev->dev_lock);
331 if (pd->dpp_enabled) {
332 /* try allocating DPP PD, if not available then normal PD */
333 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 } else if (dev->pd_mgr->pd_norm_count <
338 dev->pd_mgr->max_normal_pd) {
339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 pd->dpp_enabled = false;
342 } else {
343 status = -EINVAL;
344 }
345 } else {
346 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 } else {
350 status = -EINVAL;
351 }
352 }
353 mutex_unlock(&dev->dev_lock);
354 return status;
355}
356
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530357static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
358 struct ocrdma_ucontext *uctx,
359 struct ib_udata *udata)
360{
361 struct ocrdma_pd *pd = NULL;
362 int status = 0;
363
364 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
365 if (!pd)
366 return ERR_PTR(-ENOMEM);
367
368 if (udata && uctx) {
369 pd->dpp_enabled =
Devesh Sharma21c33912014-02-04 11:56:56 +0530370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530371 pd->num_dpp_qp =
Devesh Sharmaa53d77a2014-06-10 19:32:17 +0530372 pd->dpp_enabled ? (dev->nic_info.db_page_size /
373 dev->attr.wqe_size) : 0;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530374 }
375
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530376 if (dev->pd_mgr->pd_prealloc_valid) {
377 status = ocrdma_get_pd_num(dev, pd);
378 return (status == 0) ? pd : ERR_PTR(status);
379 }
380
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530381retry:
382 status = ocrdma_mbx_alloc_pd(dev, pd);
383 if (status) {
384 if (pd->dpp_enabled) {
385 pd->dpp_enabled = false;
386 pd->num_dpp_qp = 0;
387 goto retry;
388 } else {
389 kfree(pd);
390 return ERR_PTR(status);
391 }
392 }
393
394 return pd;
395}
396
397static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
398 struct ocrdma_pd *pd)
399{
400 return (uctx->cntxt_pd == pd ? true : false);
401}
402
403static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
404 struct ocrdma_pd *pd)
405{
406 int status = 0;
407
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530408 if (dev->pd_mgr->pd_prealloc_valid)
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
410 else
411 status = ocrdma_mbx_dealloc_pd(dev, pd);
412
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530413 kfree(pd);
414 return status;
415}
416
417static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
418 struct ocrdma_ucontext *uctx,
419 struct ib_udata *udata)
420{
421 int status = 0;
422
423 uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
424 if (IS_ERR(uctx->cntxt_pd)) {
425 status = PTR_ERR(uctx->cntxt_pd);
426 uctx->cntxt_pd = NULL;
427 goto err;
428 }
429
430 uctx->cntxt_pd->uctx = uctx;
431 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
432err:
433 return status;
434}
435
436static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
437{
438 int status = 0;
439 struct ocrdma_pd *pd = uctx->cntxt_pd;
440 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
441
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530442 if (uctx->pd_in_use) {
443 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
444 __func__, dev->id, pd->id);
445 }
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530446 uctx->cntxt_pd = NULL;
447 status = _ocrdma_dealloc_pd(dev, pd);
448 return status;
449}
450
451static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
452{
453 struct ocrdma_pd *pd = NULL;
454
455 mutex_lock(&uctx->mm_list_lock);
456 if (!uctx->pd_in_use) {
457 uctx->pd_in_use = true;
458 pd = uctx->cntxt_pd;
459 }
460 mutex_unlock(&uctx->mm_list_lock);
461
462 return pd;
463}
464
465static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
466{
467 mutex_lock(&uctx->mm_list_lock);
468 uctx->pd_in_use = false;
469 mutex_unlock(&uctx->mm_list_lock);
470}
471
Parav Panditfe2caef2012-03-21 04:09:06 +0530472struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
473 struct ib_udata *udata)
474{
475 int status;
476 struct ocrdma_ucontext *ctx;
477 struct ocrdma_alloc_ucontext_resp resp;
478 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
479 struct pci_dev *pdev = dev->nic_info.pdev;
480 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
481
482 if (!udata)
483 return ERR_PTR(-EFAULT);
484 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
485 if (!ctx)
486 return ERR_PTR(-ENOMEM);
Parav Panditfe2caef2012-03-21 04:09:06 +0530487 INIT_LIST_HEAD(&ctx->mm_head);
488 mutex_init(&ctx->mm_list_lock);
489
490 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
491 &ctx->ah_tbl.pa, GFP_KERNEL);
492 if (!ctx->ah_tbl.va) {
493 kfree(ctx);
494 return ERR_PTR(-ENOMEM);
495 }
496 memset(ctx->ah_tbl.va, 0, map_len);
497 ctx->ah_tbl.len = map_len;
498
Dan Carpenter63ea3742013-07-29 22:34:29 +0300499 memset(&resp, 0, sizeof(resp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530500 resp.ah_tbl_len = ctx->ah_tbl.len;
Devesh Sharma1b76d382014-09-05 19:35:40 +0530501 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
Parav Panditfe2caef2012-03-21 04:09:06 +0530502
503 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
504 if (status)
505 goto map_err;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530506
507 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
508 if (status)
509 goto pd_err;
510
Parav Panditfe2caef2012-03-21 04:09:06 +0530511 resp.dev_id = dev->id;
512 resp.max_inline_data = dev->attr.max_inline_data;
513 resp.wqe_size = dev->attr.wqe_size;
514 resp.rqe_size = dev->attr.rqe_size;
515 resp.dpp_wqe_size = dev->attr.wqe_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530516
517 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
518 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
519 if (status)
520 goto cpy_err;
521 return &ctx->ibucontext;
522
523cpy_err:
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530524pd_err:
Parav Panditfe2caef2012-03-21 04:09:06 +0530525 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
526map_err:
527 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
528 ctx->ah_tbl.pa);
529 kfree(ctx);
530 return ERR_PTR(status);
531}
532
533int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
534{
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530535 int status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530536 struct ocrdma_mm *mm, *tmp;
537 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530538 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
539 struct pci_dev *pdev = dev->nic_info.pdev;
Parav Panditfe2caef2012-03-21 04:09:06 +0530540
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530541 status = ocrdma_dealloc_ucontext_pd(uctx);
542
Parav Panditfe2caef2012-03-21 04:09:06 +0530543 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
544 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
545 uctx->ah_tbl.pa);
546
547 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
548 list_del(&mm->entry);
549 kfree(mm);
550 }
551 kfree(uctx);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530552 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530553}
554
555int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
556{
557 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530558 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530559 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
560 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
561 unsigned long len = (vma->vm_end - vma->vm_start);
562 int status = 0;
563 bool found;
564
565 if (vma->vm_start & (PAGE_SIZE - 1))
566 return -EINVAL;
567 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
568 if (!found)
569 return -EINVAL;
570
571 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
572 dev->nic_info.db_total_size)) &&
573 (len <= dev->nic_info.db_page_size)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530574 if (vma->vm_flags & VM_READ)
575 return -EPERM;
576
577 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Parav Panditfe2caef2012-03-21 04:09:06 +0530578 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
579 len, vma->vm_page_prot);
580 } else if (dev->nic_info.dpp_unmapped_len &&
581 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
582 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
583 dev->nic_info.dpp_unmapped_len)) &&
584 (len <= dev->nic_info.dpp_unmapped_len)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530585 if (vma->vm_flags & VM_READ)
586 return -EPERM;
587
Parav Panditfe2caef2012-03-21 04:09:06 +0530588 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
589 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
590 len, vma->vm_page_prot);
591 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530592 status = remap_pfn_range(vma, vma->vm_start,
593 vma->vm_pgoff, len, vma->vm_page_prot);
594 }
595 return status;
596}
597
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530598static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
Parav Panditfe2caef2012-03-21 04:09:06 +0530599 struct ib_ucontext *ib_ctx,
600 struct ib_udata *udata)
601{
602 int status;
603 u64 db_page_addr;
Roland Dreierda496432012-04-16 11:32:17 -0700604 u64 dpp_page_addr = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530605 u32 db_page_size;
606 struct ocrdma_alloc_pd_uresp rsp;
607 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
608
Dan Carpenter63ea3742013-07-29 22:34:29 +0300609 memset(&rsp, 0, sizeof(rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530610 rsp.id = pd->id;
611 rsp.dpp_enabled = pd->dpp_enabled;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530612 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530613 db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530614
615 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
616 if (status)
617 return status;
618
619 if (pd->dpp_enabled) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530620 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530621 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530622 status = ocrdma_add_mmap(uctx, dpp_page_addr,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530623 PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530624 if (status)
625 goto dpp_map_err;
626 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
627 rsp.dpp_page_addr_lo = dpp_page_addr;
628 }
629
630 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
631 if (status)
632 goto ucopy_err;
633
634 pd->uctx = uctx;
635 return 0;
636
637ucopy_err:
Roland Dreierda496432012-04-16 11:32:17 -0700638 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530639 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530640dpp_map_err:
641 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
642 return status;
643}
644
645struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
646 struct ib_ucontext *context,
647 struct ib_udata *udata)
648{
649 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
650 struct ocrdma_pd *pd;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530651 struct ocrdma_ucontext *uctx = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530652 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530653 u8 is_uctx_pd = false;
Parav Panditfe2caef2012-03-21 04:09:06 +0530654
Parav Panditfe2caef2012-03-21 04:09:06 +0530655 if (udata && context) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530656 uctx = get_ocrdma_ucontext(context);
657 pd = ocrdma_get_ucontext_pd(uctx);
658 if (pd) {
659 is_uctx_pd = true;
660 goto pd_mapping;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530661 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530662 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530663
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530664 pd = _ocrdma_alloc_pd(dev, uctx, udata);
665 if (IS_ERR(pd)) {
666 status = PTR_ERR(pd);
667 goto exit;
668 }
669
670pd_mapping:
Parav Panditfe2caef2012-03-21 04:09:06 +0530671 if (udata && context) {
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530672 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +0530673 if (status)
674 goto err;
675 }
676 return &pd->ibpd;
677
678err:
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530679 if (is_uctx_pd) {
680 ocrdma_release_ucontext_pd(uctx);
681 } else {
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530682 status = _ocrdma_dealloc_pd(dev, pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530683 kfree(pd);
684 }
685exit:
Parav Panditfe2caef2012-03-21 04:09:06 +0530686 return ERR_PTR(status);
687}
688
689int ocrdma_dealloc_pd(struct ib_pd *ibpd)
690{
691 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530692 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530693 struct ocrdma_ucontext *uctx = NULL;
694 int status = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530695 u64 usr_db;
696
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530697 uctx = pd->uctx;
698 if (uctx) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530699 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530700 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530701 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530702 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530703 usr_db = ocrdma_get_db_addr(dev, pd->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530704 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530705
706 if (is_ucontext_pd(uctx, pd)) {
707 ocrdma_release_ucontext_pd(uctx);
708 return status;
709 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530710 }
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530711 status = _ocrdma_dealloc_pd(dev, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530712 return status;
713}
714
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530715static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
716 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
Parav Panditfe2caef2012-03-21 04:09:06 +0530717{
718 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530719
Parav Panditfe2caef2012-03-21 04:09:06 +0530720 mr->hwmr.fr_mr = 0;
721 mr->hwmr.local_rd = 1;
722 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
723 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
724 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
725 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
726 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
727 mr->hwmr.num_pbls = num_pbls;
728
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530729 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
730 if (status)
731 return status;
732
Parav Panditfe2caef2012-03-21 04:09:06 +0530733 mr->ibmr.lkey = mr->hwmr.lkey;
734 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
735 mr->ibmr.rkey = mr->hwmr.lkey;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530736 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530737}
738
739struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
740{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530741 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530742 struct ocrdma_mr *mr;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530743 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
744 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530745
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530746 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
747 pr_err("%s err, invalid access rights\n", __func__);
748 return ERR_PTR(-EINVAL);
749 }
750
751 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
752 if (!mr)
753 return ERR_PTR(-ENOMEM);
754
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530755 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530756 OCRDMA_ADDR_CHECK_DISABLE);
757 if (status) {
758 kfree(mr);
759 return ERR_PTR(status);
760 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530761
762 return &mr->ibmr;
763}
764
765static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
766 struct ocrdma_hw_mr *mr)
767{
768 struct pci_dev *pdev = dev->nic_info.pdev;
769 int i = 0;
770
771 if (mr->pbl_table) {
772 for (i = 0; i < mr->num_pbls; i++) {
773 if (!mr->pbl_table[i].va)
774 continue;
775 dma_free_coherent(&pdev->dev, mr->pbl_size,
776 mr->pbl_table[i].va,
777 mr->pbl_table[i].pa);
778 }
779 kfree(mr->pbl_table);
780 mr->pbl_table = NULL;
781 }
782}
783
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530784static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
785 u32 num_pbes)
Parav Panditfe2caef2012-03-21 04:09:06 +0530786{
787 u32 num_pbls = 0;
788 u32 idx = 0;
789 int status = 0;
790 u32 pbl_size;
791
792 do {
793 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
794 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
795 status = -EFAULT;
796 break;
797 }
798 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
799 num_pbls = num_pbls / (pbl_size / sizeof(u64));
800 idx++;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530801 } while (num_pbls >= dev->attr.max_num_mr_pbl);
Parav Panditfe2caef2012-03-21 04:09:06 +0530802
803 mr->hwmr.num_pbes = num_pbes;
804 mr->hwmr.num_pbls = num_pbls;
805 mr->hwmr.pbl_size = pbl_size;
806 return status;
807}
808
809static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
810{
811 int status = 0;
812 int i;
813 u32 dma_len = mr->pbl_size;
814 struct pci_dev *pdev = dev->nic_info.pdev;
815 void *va;
816 dma_addr_t pa;
817
818 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
819 mr->num_pbls, GFP_KERNEL);
820
821 if (!mr->pbl_table)
822 return -ENOMEM;
823
824 for (i = 0; i < mr->num_pbls; i++) {
825 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
826 if (!va) {
827 ocrdma_free_mr_pbl_tbl(dev, mr);
828 status = -ENOMEM;
829 break;
830 }
831 memset(va, 0, dma_len);
832 mr->pbl_table[i].va = va;
833 mr->pbl_table[i].pa = pa;
834 }
835 return status;
836}
837
838static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
839 u32 num_pbes)
840{
841 struct ocrdma_pbe *pbe;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200842 struct scatterlist *sg;
Parav Panditfe2caef2012-03-21 04:09:06 +0530843 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
844 struct ib_umem *umem = mr->umem;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200845 int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530846
847 if (!mr->hwmr.num_pbes)
848 return;
849
850 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
851 pbe_cnt = 0;
852
853 shift = ilog2(umem->page_size);
854
Yishai Hadaseeb84612014-01-28 13:40:15 +0200855 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
856 pages = sg_dma_len(sg) >> shift;
857 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
858 /* store the page address in pbe */
859 pbe->pa_lo =
860 cpu_to_le32(sg_dma_address
861 (sg) +
862 (umem->page_size * pg_cnt));
863 pbe->pa_hi =
864 cpu_to_le32(upper_32_bits
865 ((sg_dma_address
866 (sg) +
867 umem->page_size * pg_cnt)));
868 pbe_cnt += 1;
869 total_num_pbes += 1;
870 pbe++;
Parav Panditfe2caef2012-03-21 04:09:06 +0530871
Yishai Hadaseeb84612014-01-28 13:40:15 +0200872 /* if done building pbes, issue the mbx cmd. */
873 if (total_num_pbes == num_pbes)
874 return;
Parav Panditfe2caef2012-03-21 04:09:06 +0530875
Yishai Hadaseeb84612014-01-28 13:40:15 +0200876 /* if the given pbl is full storing the pbes,
877 * move to next pbl.
878 */
879 if (pbe_cnt ==
880 (mr->hwmr.pbl_size / sizeof(u64))) {
881 pbl_tbl++;
882 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
883 pbe_cnt = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530884 }
Yishai Hadaseeb84612014-01-28 13:40:15 +0200885
Parav Panditfe2caef2012-03-21 04:09:06 +0530886 }
887 }
888}
889
890struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
891 u64 usr_addr, int acc, struct ib_udata *udata)
892{
893 int status = -ENOMEM;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530894 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530895 struct ocrdma_mr *mr;
896 struct ocrdma_pd *pd;
Parav Panditfe2caef2012-03-21 04:09:06 +0530897 u32 num_pbes;
898
899 pd = get_ocrdma_pd(ibpd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530900
901 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
902 return ERR_PTR(-EINVAL);
903
904 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
905 if (!mr)
906 return ERR_PTR(status);
Parav Panditfe2caef2012-03-21 04:09:06 +0530907 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
908 if (IS_ERR(mr->umem)) {
909 status = -EFAULT;
910 goto umem_err;
911 }
912 num_pbes = ib_umem_page_count(mr->umem);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530913 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
Parav Panditfe2caef2012-03-21 04:09:06 +0530914 if (status)
915 goto umem_err;
916
917 mr->hwmr.pbe_size = mr->umem->page_size;
Haggai Eran406f9e52014-12-11 17:04:12 +0200918 mr->hwmr.fbo = ib_umem_offset(mr->umem);
Parav Panditfe2caef2012-03-21 04:09:06 +0530919 mr->hwmr.va = usr_addr;
920 mr->hwmr.len = len;
921 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
922 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
923 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
924 mr->hwmr.local_rd = 1;
925 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
926 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
927 if (status)
928 goto umem_err;
929 build_user_pbes(dev, mr, num_pbes);
930 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
931 if (status)
932 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +0530933 mr->ibmr.lkey = mr->hwmr.lkey;
934 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
935 mr->ibmr.rkey = mr->hwmr.lkey;
936
937 return &mr->ibmr;
938
939mbx_err:
940 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
941umem_err:
942 kfree(mr);
943 return ERR_PTR(status);
944}
945
946int ocrdma_dereg_mr(struct ib_mr *ib_mr)
947{
948 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530949 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530950 int status;
951
952 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
953
Selvin Xavier9d1878a2014-02-04 11:57:02 +0530954 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
Parav Panditfe2caef2012-03-21 04:09:06 +0530955
Parav Panditfe2caef2012-03-21 04:09:06 +0530956 /* it could be user registered memory. */
957 if (mr->umem)
958 ib_umem_release(mr->umem);
959 kfree(mr);
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530960
961 /* Don't stop cleanup, in case FW is unresponsive */
962 if (dev->mqe_ctx.fw_error_state) {
963 status = 0;
964 pr_err("%s(%d) fw not responding.\n",
965 __func__, dev->id);
966 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530967 return status;
968}
969
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530970static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
971 struct ib_udata *udata,
Parav Panditfe2caef2012-03-21 04:09:06 +0530972 struct ib_ucontext *ib_ctx)
973{
974 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530975 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530976 struct ocrdma_create_cq_uresp uresp;
977
Dan Carpenter63ea3742013-07-29 22:34:29 +0300978 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530979 uresp.cq_id = cq->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530980 uresp.page_size = PAGE_ALIGN(cq->len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530981 uresp.num_pages = 1;
982 uresp.max_hw_cqe = cq->max_hw_cqe;
Devesh Sharma1b76d382014-09-05 19:35:40 +0530983 uresp.page_addr[0] = virt_to_phys(cq->va);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530984 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530985 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530986 uresp.phase_change = cq->phase_change ? 1 : 0;
987 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
988 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000989 pr_err("%s(%d) copy error cqid=0x%x.\n",
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530990 __func__, dev->id, cq->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530991 goto err;
992 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530993 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
994 if (status)
995 goto err;
996 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
997 if (status) {
998 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
999 goto err;
1000 }
1001 cq->ucontext = uctx;
1002err:
1003 return status;
1004}
1005
1006struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
1007 struct ib_ucontext *ib_ctx,
1008 struct ib_udata *udata)
1009{
1010 struct ocrdma_cq *cq;
1011 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301012 struct ocrdma_ucontext *uctx = NULL;
1013 u16 pd_id = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301014 int status;
1015 struct ocrdma_create_cq_ureq ureq;
1016
1017 if (udata) {
1018 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1019 return ERR_PTR(-EFAULT);
1020 } else
1021 ureq.dpp_cq = 0;
1022 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1023 if (!cq)
1024 return ERR_PTR(-ENOMEM);
1025
1026 spin_lock_init(&cq->cq_lock);
1027 spin_lock_init(&cq->comp_handler_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301028 INIT_LIST_HEAD(&cq->sq_head);
1029 INIT_LIST_HEAD(&cq->rq_head);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301030 cq->first_arm = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05301031
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301032 if (ib_ctx) {
1033 uctx = get_ocrdma_ucontext(ib_ctx);
1034 pd_id = uctx->cntxt_pd->id;
1035 }
1036
1037 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301038 if (status) {
1039 kfree(cq);
1040 return ERR_PTR(status);
1041 }
1042 if (ib_ctx) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301043 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +05301044 if (status)
1045 goto ctx_err;
1046 }
1047 cq->phase = OCRDMA_CQE_VALID;
Parav Panditfe2caef2012-03-21 04:09:06 +05301048 dev->cq_tbl[cq->id] = cq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301049 return &cq->ibcq;
1050
1051ctx_err:
1052 ocrdma_mbx_destroy_cq(dev, cq);
1053 kfree(cq);
1054 return ERR_PTR(status);
1055}
1056
1057int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1058 struct ib_udata *udata)
1059{
1060 int status = 0;
1061 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1062
1063 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1064 status = -EINVAL;
1065 return status;
1066 }
1067 ibcq->cqe = new_cnt;
1068 return status;
1069}
1070
Devesh Sharmaea6176262014-02-04 11:56:54 +05301071static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1072{
1073 int cqe_cnt;
1074 int valid_count = 0;
1075 unsigned long flags;
1076
1077 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1078 struct ocrdma_cqe *cqe = NULL;
1079
1080 cqe = cq->va;
1081 cqe_cnt = cq->cqe_cnt;
1082
1083 /* Last irq might have scheduled a polling thread
1084 * sync-up with it before hard flushing.
1085 */
1086 spin_lock_irqsave(&cq->cq_lock, flags);
1087 while (cqe_cnt) {
1088 if (is_cqe_valid(cq, cqe))
1089 valid_count++;
1090 cqe++;
1091 cqe_cnt--;
1092 }
1093 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1094 spin_unlock_irqrestore(&cq->cq_lock, flags);
1095}
1096
Parav Panditfe2caef2012-03-21 04:09:06 +05301097int ocrdma_destroy_cq(struct ib_cq *ibcq)
1098{
1099 int status;
1100 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
Devesh Sharmaea6176262014-02-04 11:56:54 +05301101 struct ocrdma_eq *eq = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301102 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301103 int pdid = 0;
Devesh Sharmaea6176262014-02-04 11:56:54 +05301104 u32 irq, indx;
1105
1106 dev->cq_tbl[cq->id] = NULL;
1107 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1108 if (indx == -EINVAL)
1109 BUG();
1110
1111 eq = &dev->eq_tbl[indx];
1112 irq = ocrdma_get_irq(dev, eq);
1113 synchronize_irq(irq);
1114 ocrdma_flush_cq(cq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301115
Parav Panditfe2caef2012-03-21 04:09:06 +05301116 status = ocrdma_mbx_destroy_cq(dev, cq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301117 if (cq->ucontext) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301118 pdid = cq->ucontext->cntxt_pd->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301119 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1120 PAGE_ALIGN(cq->len));
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301121 ocrdma_del_mmap(cq->ucontext,
1122 ocrdma_get_db_addr(dev, pdid),
Parav Panditfe2caef2012-03-21 04:09:06 +05301123 dev->nic_info.db_page_size);
1124 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301125
1126 kfree(cq);
1127 return status;
1128}
1129
1130static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1131{
1132 int status = -EINVAL;
1133
1134 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1135 dev->qp_tbl[qp->id] = qp;
1136 status = 0;
1137 }
1138 return status;
1139}
1140
1141static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1142{
1143 dev->qp_tbl[qp->id] = NULL;
1144}
1145
1146static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1147 struct ib_qp_init_attr *attrs)
1148{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301149 if ((attrs->qp_type != IB_QPT_GSI) &&
1150 (attrs->qp_type != IB_QPT_RC) &&
1151 (attrs->qp_type != IB_QPT_UC) &&
1152 (attrs->qp_type != IB_QPT_UD)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001153 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1154 __func__, dev->id, attrs->qp_type);
Parav Panditfe2caef2012-03-21 04:09:06 +05301155 return -EINVAL;
1156 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301157 /* Skip the check for QP1 to support CM size of 128 */
1158 if ((attrs->qp_type != IB_QPT_GSI) &&
1159 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001160 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1161 __func__, dev->id, attrs->cap.max_send_wr);
1162 pr_err("%s(%d) supported send_wr=0x%x\n",
1163 __func__, dev->id, dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301164 return -EINVAL;
1165 }
1166 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001167 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1168 __func__, dev->id, attrs->cap.max_recv_wr);
1169 pr_err("%s(%d) supported recv_wr=0x%x\n",
1170 __func__, dev->id, dev->attr.max_rqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301171 return -EINVAL;
1172 }
1173 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001174 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1175 __func__, dev->id, attrs->cap.max_inline_data);
1176 pr_err("%s(%d) supported inline data size=0x%x\n",
1177 __func__, dev->id, dev->attr.max_inline_data);
Parav Panditfe2caef2012-03-21 04:09:06 +05301178 return -EINVAL;
1179 }
1180 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001181 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1182 __func__, dev->id, attrs->cap.max_send_sge);
1183 pr_err("%s(%d) supported send_sge=0x%x\n",
1184 __func__, dev->id, dev->attr.max_send_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301185 return -EINVAL;
1186 }
1187 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001188 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1189 __func__, dev->id, attrs->cap.max_recv_sge);
1190 pr_err("%s(%d) supported recv_sge=0x%x\n",
1191 __func__, dev->id, dev->attr.max_recv_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301192 return -EINVAL;
1193 }
1194 /* unprivileged user space cannot create special QP */
1195 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001196 pr_err
Parav Panditfe2caef2012-03-21 04:09:06 +05301197 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1198 __func__, dev->id, attrs->qp_type);
1199 return -EINVAL;
1200 }
1201 /* allow creating only one GSI type of QP */
1202 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001203 pr_err("%s(%d) GSI special QPs already created.\n",
1204 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301205 return -EINVAL;
1206 }
1207 /* verify consumer QPs are not trying to use GSI QP's CQ */
1208 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1209 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301210 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001211 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301212 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301213 return -EINVAL;
1214 }
1215 }
1216 return 0;
1217}
1218
1219static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1220 struct ib_udata *udata, int dpp_offset,
1221 int dpp_credit_lmt, int srq)
1222{
1223 int status = 0;
1224 u64 usr_db;
1225 struct ocrdma_create_qp_uresp uresp;
1226 struct ocrdma_dev *dev = qp->dev;
1227 struct ocrdma_pd *pd = qp->pd;
1228
1229 memset(&uresp, 0, sizeof(uresp));
1230 usr_db = dev->nic_info.unmapped_db +
1231 (pd->id * dev->nic_info.db_page_size);
1232 uresp.qp_id = qp->id;
1233 uresp.sq_dbid = qp->sq.dbid;
1234 uresp.num_sq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301235 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
Devesh Sharma1b76d382014-09-05 19:35:40 +05301236 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301237 uresp.num_wqe_allocated = qp->sq.max_cnt;
1238 if (!srq) {
1239 uresp.rq_dbid = qp->rq.dbid;
1240 uresp.num_rq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301241 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
Devesh Sharma1b76d382014-09-05 19:35:40 +05301242 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301243 uresp.num_rqe_allocated = qp->rq.max_cnt;
1244 }
1245 uresp.db_page_addr = usr_db;
1246 uresp.db_page_size = dev->nic_info.db_page_size;
Devesh Sharma2df84fa82014-02-04 11:56:55 +05301247 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1248 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1249 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05301250
1251 if (qp->dpp_enabled) {
1252 uresp.dpp_credit = dpp_credit_lmt;
1253 uresp.dpp_offset = dpp_offset;
1254 }
1255 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1256 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001257 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301258 goto err;
1259 }
1260 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1261 uresp.sq_page_size);
1262 if (status)
1263 goto err;
1264
1265 if (!srq) {
1266 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1267 uresp.rq_page_size);
1268 if (status)
1269 goto rq_map_err;
1270 }
1271 return status;
1272rq_map_err:
1273 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1274err:
1275 return status;
1276}
1277
1278static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1279 struct ocrdma_pd *pd)
1280{
Devesh Sharma21c33912014-02-04 11:56:56 +05301281 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301282 qp->sq_db = dev->nic_info.db +
1283 (pd->id * dev->nic_info.db_page_size) +
1284 OCRDMA_DB_GEN2_SQ_OFFSET;
1285 qp->rq_db = dev->nic_info.db +
1286 (pd->id * dev->nic_info.db_page_size) +
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301287 OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301288 } else {
1289 qp->sq_db = dev->nic_info.db +
1290 (pd->id * dev->nic_info.db_page_size) +
1291 OCRDMA_DB_SQ_OFFSET;
1292 qp->rq_db = dev->nic_info.db +
1293 (pd->id * dev->nic_info.db_page_size) +
1294 OCRDMA_DB_RQ_OFFSET;
1295 }
1296}
1297
1298static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1299{
1300 qp->wqe_wr_id_tbl =
1301 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1302 GFP_KERNEL);
1303 if (qp->wqe_wr_id_tbl == NULL)
1304 return -ENOMEM;
1305 qp->rqe_wr_id_tbl =
1306 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1307 if (qp->rqe_wr_id_tbl == NULL)
1308 return -ENOMEM;
1309
1310 return 0;
1311}
1312
1313static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1314 struct ocrdma_pd *pd,
1315 struct ib_qp_init_attr *attrs)
1316{
1317 qp->pd = pd;
1318 spin_lock_init(&qp->q_lock);
1319 INIT_LIST_HEAD(&qp->sq_entry);
1320 INIT_LIST_HEAD(&qp->rq_entry);
1321
1322 qp->qp_type = attrs->qp_type;
1323 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1324 qp->max_inline_data = attrs->cap.max_inline_data;
1325 qp->sq.max_sges = attrs->cap.max_send_sge;
1326 qp->rq.max_sges = attrs->cap.max_recv_sge;
1327 qp->state = OCRDMA_QPS_RST;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301328 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
Parav Panditfe2caef2012-03-21 04:09:06 +05301329}
1330
Parav Panditfe2caef2012-03-21 04:09:06 +05301331static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1332 struct ib_qp_init_attr *attrs)
1333{
1334 if (attrs->qp_type == IB_QPT_GSI) {
1335 dev->gsi_qp_created = 1;
1336 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1337 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1338 }
1339}
1340
1341struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1342 struct ib_qp_init_attr *attrs,
1343 struct ib_udata *udata)
1344{
1345 int status;
1346 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1347 struct ocrdma_qp *qp;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301348 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301349 struct ocrdma_create_qp_ureq ureq;
1350 u16 dpp_credit_lmt, dpp_offset;
1351
1352 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1353 if (status)
1354 goto gen_err;
1355
1356 memset(&ureq, 0, sizeof(ureq));
1357 if (udata) {
1358 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1359 return ERR_PTR(-EFAULT);
1360 }
1361 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1362 if (!qp) {
1363 status = -ENOMEM;
1364 goto gen_err;
1365 }
1366 qp->dev = dev;
1367 ocrdma_set_qp_init_params(qp, pd, attrs);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301368 if (udata == NULL)
1369 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1370 OCRDMA_QP_FAST_REG);
Parav Panditfe2caef2012-03-21 04:09:06 +05301371
1372 mutex_lock(&dev->dev_lock);
1373 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1374 ureq.dpp_cq_id,
1375 &dpp_offset, &dpp_credit_lmt);
1376 if (status)
1377 goto mbx_err;
1378
1379 /* user space QP's wr_id table are managed in library */
1380 if (udata == NULL) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301381 status = ocrdma_alloc_wr_id_tbl(qp);
1382 if (status)
1383 goto map_err;
1384 }
1385
1386 status = ocrdma_add_qpn_map(dev, qp);
1387 if (status)
1388 goto map_err;
1389 ocrdma_set_qp_db(dev, qp, pd);
1390 if (udata) {
1391 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1392 dpp_credit_lmt,
1393 (attrs->srq != NULL));
1394 if (status)
1395 goto cpy_err;
1396 }
1397 ocrdma_store_gsi_qp_cq(dev, attrs);
Gottumukkala, Naresh27159f52013-06-05 08:50:46 +00001398 qp->ibqp.qp_num = qp->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301399 mutex_unlock(&dev->dev_lock);
1400 return &qp->ibqp;
1401
1402cpy_err:
1403 ocrdma_del_qpn_map(dev, qp);
1404map_err:
1405 ocrdma_mbx_destroy_qp(dev, qp);
1406mbx_err:
1407 mutex_unlock(&dev->dev_lock);
1408 kfree(qp->wqe_wr_id_tbl);
1409 kfree(qp->rqe_wr_id_tbl);
1410 kfree(qp);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001411 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301412gen_err:
1413 return ERR_PTR(status);
1414}
1415
1416int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1417 int attr_mask)
1418{
1419 int status = 0;
1420 struct ocrdma_qp *qp;
1421 struct ocrdma_dev *dev;
1422 enum ib_qp_state old_qps;
1423
1424 qp = get_ocrdma_qp(ibqp);
1425 dev = qp->dev;
1426 if (attr_mask & IB_QP_STATE)
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301427 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301428 /* if new and previous states are same hw doesn't need to
1429 * know about it.
1430 */
1431 if (status < 0)
1432 return status;
Prarit Bhargavabc1b04a2014-02-19 15:05:16 -05001433 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +05301434
Parav Panditfe2caef2012-03-21 04:09:06 +05301435 return status;
1436}
1437
1438int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1439 int attr_mask, struct ib_udata *udata)
1440{
1441 unsigned long flags;
1442 int status = -EINVAL;
1443 struct ocrdma_qp *qp;
1444 struct ocrdma_dev *dev;
1445 enum ib_qp_state old_qps, new_qps;
1446
1447 qp = get_ocrdma_qp(ibqp);
1448 dev = qp->dev;
1449
1450 /* syncronize with multiple context trying to change, retrive qps */
1451 mutex_lock(&dev->dev_lock);
1452 /* syncronize with wqe, rqe posting and cqe processing contexts */
1453 spin_lock_irqsave(&qp->q_lock, flags);
1454 old_qps = get_ibqp_state(qp->state);
1455 if (attr_mask & IB_QP_STATE)
1456 new_qps = attr->qp_state;
1457 else
1458 new_qps = old_qps;
1459 spin_unlock_irqrestore(&qp->q_lock, flags);
1460
Matan Barakdd5f03b2013-12-12 18:03:11 +02001461 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
Moni Shoua37721d82013-12-12 18:03:16 +02001462 IB_LINK_LAYER_ETHERNET)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001463 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1464 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1465 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1466 old_qps, new_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301467 goto param_err;
1468 }
1469
1470 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1471 if (status > 0)
1472 status = 0;
1473param_err:
1474 mutex_unlock(&dev->dev_lock);
1475 return status;
1476}
1477
1478static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1479{
1480 switch (mtu) {
1481 case 256:
1482 return IB_MTU_256;
1483 case 512:
1484 return IB_MTU_512;
1485 case 1024:
1486 return IB_MTU_1024;
1487 case 2048:
1488 return IB_MTU_2048;
1489 case 4096:
1490 return IB_MTU_4096;
1491 default:
1492 return IB_MTU_1024;
1493 }
1494}
1495
1496static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1497{
1498 int ib_qp_acc_flags = 0;
1499
1500 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1501 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1502 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1503 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1504 return ib_qp_acc_flags;
1505}
1506
1507int ocrdma_query_qp(struct ib_qp *ibqp,
1508 struct ib_qp_attr *qp_attr,
1509 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1510{
1511 int status;
1512 u32 qp_state;
1513 struct ocrdma_qp_params params;
1514 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1515 struct ocrdma_dev *dev = qp->dev;
1516
1517 memset(&params, 0, sizeof(params));
1518 mutex_lock(&dev->dev_lock);
1519 status = ocrdma_mbx_query_qp(dev, qp, &params);
1520 mutex_unlock(&dev->dev_lock);
1521 if (status)
1522 goto mbx_err;
Mitesh Ahuja95bf0092014-12-03 11:36:33 +05301523 if (qp->qp_type == IB_QPT_UD)
1524 qp_attr->qkey = params.qkey;
Parav Panditfe2caef2012-03-21 04:09:06 +05301525 qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1526 qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1527 qp_attr->path_mtu =
1528 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1529 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1530 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1531 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1532 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1533 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1534 qp_attr->dest_qp_num =
1535 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1536
1537 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1538 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1539 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1540 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1541 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +05301542 qp_attr->cap.max_inline_data = qp->max_inline_data;
Parav Panditfe2caef2012-03-21 04:09:06 +05301543 qp_init_attr->cap = qp_attr->cap;
1544 memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1545 sizeof(params.dgid));
1546 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1547 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1548 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1549 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1550 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1551 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1552 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
Devesh Sharmaa61d93d2014-02-10 13:48:58 +05301553 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
Parav Panditfe2caef2012-03-21 04:09:06 +05301554 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1555
1556 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1557 qp_attr->ah_attr.port_num = 1;
1558 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1559 OCRDMA_QP_PARAMS_SL_MASK) >>
1560 OCRDMA_QP_PARAMS_SL_SHIFT;
1561 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1562 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1563 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1564 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1565 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1566 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1567 qp_attr->retry_cnt =
1568 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1569 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1570 qp_attr->min_rnr_timer = 0;
1571 qp_attr->pkey_index = 0;
1572 qp_attr->port_num = 1;
1573 qp_attr->ah_attr.src_path_bits = 0;
1574 qp_attr->ah_attr.static_rate = 0;
1575 qp_attr->alt_pkey_index = 0;
1576 qp_attr->alt_port_num = 0;
1577 qp_attr->alt_timeout = 0;
1578 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1579 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1580 OCRDMA_QP_PARAMS_STATE_SHIFT;
1581 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1582 qp_attr->max_dest_rd_atomic =
1583 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1584 qp_attr->max_rd_atomic =
1585 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1586 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1587 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1588mbx_err:
1589 return status;
1590}
1591
1592static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1593{
1594 int i = idx / 32;
1595 unsigned int mask = (1 << (idx % 32));
1596
1597 if (srq->idx_bit_fields[i] & mask)
1598 srq->idx_bit_fields[i] &= ~mask;
1599 else
1600 srq->idx_bit_fields[i] |= mask;
1601}
1602
1603static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1604{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301605 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
Parav Panditfe2caef2012-03-21 04:09:06 +05301606}
1607
1608static int is_hw_sq_empty(struct ocrdma_qp *qp)
1609{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301610 return (qp->sq.tail == qp->sq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301611}
1612
1613static int is_hw_rq_empty(struct ocrdma_qp *qp)
1614{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301615 return (qp->rq.tail == qp->rq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301616}
1617
1618static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1619{
1620 return q->va + (q->head * q->entry_size);
1621}
1622
1623static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1624 u32 idx)
1625{
1626 return q->va + (idx * q->entry_size);
1627}
1628
1629static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1630{
1631 q->head = (q->head + 1) & q->max_wqe_idx;
1632}
1633
1634static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1635{
1636 q->tail = (q->tail + 1) & q->max_wqe_idx;
1637}
1638
1639/* discard the cqe for a given QP */
1640static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1641{
1642 unsigned long cq_flags;
1643 unsigned long flags;
1644 int discard_cnt = 0;
1645 u32 cur_getp, stop_getp;
1646 struct ocrdma_cqe *cqe;
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301647 u32 qpn = 0, wqe_idx = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301648
1649 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1650
1651 /* traverse through the CQEs in the hw CQ,
1652 * find the matching CQE for a given qp,
1653 * mark the matching one discarded by clearing qpn.
1654 * ring the doorbell in the poll_cq() as
1655 * we don't complete out of order cqe.
1656 */
1657
1658 cur_getp = cq->getp;
1659 /* find upto when do we reap the cq. */
1660 stop_getp = cur_getp;
1661 do {
1662 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1663 break;
1664
1665 cqe = cq->va + cur_getp;
1666 /* if (a) done reaping whole hw cq, or
1667 * (b) qp_xq becomes empty.
1668 * then exit
1669 */
1670 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1671 /* if previously discarded cqe found, skip that too. */
1672 /* check for matching qp */
1673 if (qpn == 0 || qpn != qp->id)
1674 goto skip_cqe;
1675
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301676 if (is_cqe_for_sq(cqe)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301677 ocrdma_hwq_inc_tail(&qp->sq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301678 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301679 if (qp->srq) {
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301680 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1681 OCRDMA_CQE_BUFTAG_SHIFT) &
1682 qp->srq->rq.max_wqe_idx;
1683 if (wqe_idx < 1)
1684 BUG();
Parav Panditfe2caef2012-03-21 04:09:06 +05301685 spin_lock_irqsave(&qp->srq->q_lock, flags);
1686 ocrdma_hwq_inc_tail(&qp->srq->rq);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301687 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05301688 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1689
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301690 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301691 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301692 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301693 }
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301694 /* mark cqe discarded so that it is not picked up later
1695 * in the poll_cq().
1696 */
1697 discard_cnt += 1;
1698 cqe->cmn.qpn = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301699skip_cqe:
1700 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1701 } while (cur_getp != stop_getp);
1702 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1703}
1704
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301705void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
Parav Panditfe2caef2012-03-21 04:09:06 +05301706{
1707 int found = false;
1708 unsigned long flags;
1709 struct ocrdma_dev *dev = qp->dev;
1710 /* sync with any active CQ poll */
1711
1712 spin_lock_irqsave(&dev->flush_q_lock, flags);
1713 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1714 if (found)
1715 list_del(&qp->sq_entry);
1716 if (!qp->srq) {
1717 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1718 if (found)
1719 list_del(&qp->rq_entry);
1720 }
1721 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1722}
1723
1724int ocrdma_destroy_qp(struct ib_qp *ibqp)
1725{
1726 int status;
1727 struct ocrdma_pd *pd;
1728 struct ocrdma_qp *qp;
1729 struct ocrdma_dev *dev;
1730 struct ib_qp_attr attrs;
1731 int attr_mask = IB_QP_STATE;
Dan Carpenterd19081e2012-05-02 09:14:47 +03001732 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05301733
1734 qp = get_ocrdma_qp(ibqp);
1735 dev = qp->dev;
1736
1737 attrs.qp_state = IB_QPS_ERR;
1738 pd = qp->pd;
1739
1740 /* change the QP state to ERROR */
1741 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1742
1743 /* ensure that CQEs for newly created QP (whose id may be same with
1744 * one which just getting destroyed are same), dont get
1745 * discarded until the old CQEs are discarded.
1746 */
1747 mutex_lock(&dev->dev_lock);
1748 status = ocrdma_mbx_destroy_qp(dev, qp);
1749
1750 /*
1751 * acquire CQ lock while destroy is in progress, in order to
1752 * protect against proessing in-flight CQEs for this QP.
1753 */
Dan Carpenterd19081e2012-05-02 09:14:47 +03001754 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301755 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001756 spin_lock(&qp->rq_cq->cq_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301757
1758 ocrdma_del_qpn_map(dev, qp);
1759
1760 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
Dan Carpenterd19081e2012-05-02 09:14:47 +03001761 spin_unlock(&qp->rq_cq->cq_lock);
1762 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301763
1764 if (!pd->uctx) {
1765 ocrdma_discard_cqes(qp, qp->sq_cq);
1766 ocrdma_discard_cqes(qp, qp->rq_cq);
1767 }
1768 mutex_unlock(&dev->dev_lock);
1769
1770 if (pd->uctx) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301771 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1772 PAGE_ALIGN(qp->sq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301773 if (!qp->srq)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301774 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1775 PAGE_ALIGN(qp->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301776 }
1777
1778 ocrdma_del_flush_qp(qp);
1779
Parav Panditfe2caef2012-03-21 04:09:06 +05301780 kfree(qp->wqe_wr_id_tbl);
1781 kfree(qp->rqe_wr_id_tbl);
1782 kfree(qp);
1783 return status;
1784}
1785
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301786static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1787 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301788{
1789 int status;
1790 struct ocrdma_create_srq_uresp uresp;
1791
Dan Carpenter63ea3742013-07-29 22:34:29 +03001792 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301793 uresp.rq_dbid = srq->rq.dbid;
1794 uresp.num_rq_pages = 1;
Devesh Sharma1b76d382014-09-05 19:35:40 +05301795 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301796 uresp.rq_page_size = srq->rq.len;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301797 uresp.db_page_addr = dev->nic_info.unmapped_db +
1798 (srq->pd->id * dev->nic_info.db_page_size);
1799 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +05301800 uresp.num_rqe_allocated = srq->rq.max_cnt;
Devesh Sharma21c33912014-02-04 11:56:56 +05301801 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301802 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301803 uresp.db_shift = 24;
1804 } else {
1805 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1806 uresp.db_shift = 16;
1807 }
1808
1809 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1810 if (status)
1811 return status;
1812 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1813 uresp.rq_page_size);
1814 if (status)
1815 return status;
1816 return status;
1817}
1818
1819struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1820 struct ib_srq_init_attr *init_attr,
1821 struct ib_udata *udata)
1822{
1823 int status = -ENOMEM;
1824 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301825 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301826 struct ocrdma_srq *srq;
1827
1828 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1829 return ERR_PTR(-EINVAL);
1830 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1831 return ERR_PTR(-EINVAL);
1832
1833 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1834 if (!srq)
1835 return ERR_PTR(status);
1836
1837 spin_lock_init(&srq->q_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301838 srq->pd = pd;
1839 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301840 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +05301841 if (status)
1842 goto err;
1843
1844 if (udata == NULL) {
1845 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1846 GFP_KERNEL);
1847 if (srq->rqe_wr_id_tbl == NULL)
1848 goto arm_err;
1849
1850 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1851 (srq->rq.max_cnt % 32 ? 1 : 0);
1852 srq->idx_bit_fields =
1853 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1854 if (srq->idx_bit_fields == NULL)
1855 goto arm_err;
1856 memset(srq->idx_bit_fields, 0xff,
1857 srq->bit_fields_len * sizeof(u32));
1858 }
1859
1860 if (init_attr->attr.srq_limit) {
1861 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1862 if (status)
1863 goto arm_err;
1864 }
1865
Parav Panditfe2caef2012-03-21 04:09:06 +05301866 if (udata) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301867 status = ocrdma_copy_srq_uresp(dev, srq, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +05301868 if (status)
1869 goto arm_err;
1870 }
1871
Parav Panditfe2caef2012-03-21 04:09:06 +05301872 return &srq->ibsrq;
1873
1874arm_err:
1875 ocrdma_mbx_destroy_srq(dev, srq);
1876err:
1877 kfree(srq->rqe_wr_id_tbl);
1878 kfree(srq->idx_bit_fields);
1879 kfree(srq);
1880 return ERR_PTR(status);
1881}
1882
1883int ocrdma_modify_srq(struct ib_srq *ibsrq,
1884 struct ib_srq_attr *srq_attr,
1885 enum ib_srq_attr_mask srq_attr_mask,
1886 struct ib_udata *udata)
1887{
1888 int status = 0;
1889 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301890
1891 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301892 if (srq_attr_mask & IB_SRQ_MAX_WR)
1893 status = -EINVAL;
1894 else
1895 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1896 return status;
1897}
1898
1899int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1900{
1901 int status;
1902 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301903
1904 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301905 status = ocrdma_mbx_query_srq(srq, srq_attr);
1906 return status;
1907}
1908
1909int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1910{
1911 int status;
1912 struct ocrdma_srq *srq;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301913 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301914
1915 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301916
1917 status = ocrdma_mbx_destroy_srq(dev, srq);
1918
1919 if (srq->pd->uctx)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301920 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1921 PAGE_ALIGN(srq->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301922
Parav Panditfe2caef2012-03-21 04:09:06 +05301923 kfree(srq->idx_bit_fields);
1924 kfree(srq->rqe_wr_id_tbl);
1925 kfree(srq);
1926 return status;
1927}
1928
1929/* unprivileged verbs and their support functions. */
1930static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1931 struct ocrdma_hdr_wqe *hdr,
1932 struct ib_send_wr *wr)
1933{
1934 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1935 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1936 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1937
1938 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1939 if (qp->qp_type == IB_QPT_GSI)
1940 ud_hdr->qkey = qp->qkey;
1941 else
1942 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1943 ud_hdr->rsvd_ahid = ah->id;
1944}
1945
1946static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1947 struct ocrdma_sge *sge, int num_sge,
1948 struct ib_sge *sg_list)
1949{
1950 int i;
1951
1952 for (i = 0; i < num_sge; i++) {
1953 sge[i].lrkey = sg_list[i].lkey;
1954 sge[i].addr_lo = sg_list[i].addr;
1955 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1956 sge[i].len = sg_list[i].length;
1957 hdr->total_len += sg_list[i].length;
1958 }
1959 if (num_sge == 0)
1960 memset(sge, 0, sizeof(*sge));
1961}
1962
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301963static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1964{
1965 uint32_t total_len = 0, i;
1966
1967 for (i = 0; i < num_sge; i++)
1968 total_len += sg_list[i].length;
1969 return total_len;
1970}
1971
1972
Parav Panditfe2caef2012-03-21 04:09:06 +05301973static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1974 struct ocrdma_hdr_wqe *hdr,
1975 struct ocrdma_sge *sge,
1976 struct ib_send_wr *wr, u32 wqe_size)
1977{
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301978 int i;
1979 char *dpp_addr;
1980
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301981 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301982 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1983 if (unlikely(hdr->total_len > qp->max_inline_data)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001984 pr_err("%s() supported_len=0x%x,\n"
Masanari Iida1a84db52014-08-29 23:37:33 +09001985 " unsupported len req=0x%x\n", __func__,
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301986 qp->max_inline_data, hdr->total_len);
Parav Panditfe2caef2012-03-21 04:09:06 +05301987 return -EINVAL;
1988 }
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301989 dpp_addr = (char *)sge;
1990 for (i = 0; i < wr->num_sge; i++) {
1991 memcpy(dpp_addr,
1992 (void *)(unsigned long)wr->sg_list[i].addr,
1993 wr->sg_list[i].length);
1994 dpp_addr += wr->sg_list[i].length;
1995 }
1996
Parav Panditfe2caef2012-03-21 04:09:06 +05301997 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301998 if (0 == hdr->total_len)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301999 wqe_size += sizeof(struct ocrdma_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05302000 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2001 } else {
2002 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2003 if (wr->num_sge)
2004 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2005 else
2006 wqe_size += sizeof(struct ocrdma_sge);
2007 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2008 }
2009 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2010 return 0;
2011}
2012
2013static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2014 struct ib_send_wr *wr)
2015{
2016 int status;
2017 struct ocrdma_sge *sge;
2018 u32 wqe_size = sizeof(*hdr);
2019
2020 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2021 ocrdma_build_ud_hdr(qp, hdr, wr);
2022 sge = (struct ocrdma_sge *)(hdr + 2);
2023 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302024 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302025 sge = (struct ocrdma_sge *)(hdr + 1);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302026 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302027
2028 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2029 return status;
2030}
2031
2032static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2033 struct ib_send_wr *wr)
2034{
2035 int status;
2036 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2037 struct ocrdma_sge *sge = ext_rw + 1;
2038 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2039
2040 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2041 if (status)
2042 return status;
2043 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2044 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2045 ext_rw->lrkey = wr->wr.rdma.rkey;
2046 ext_rw->len = hdr->total_len;
2047 return 0;
2048}
2049
2050static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2051 struct ib_send_wr *wr)
2052{
2053 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2054 struct ocrdma_sge *sge = ext_rw + 1;
2055 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2056 sizeof(struct ocrdma_hdr_wqe);
2057
2058 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2059 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2060 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2061 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2062
2063 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2064 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2065 ext_rw->lrkey = wr->wr.rdma.rkey;
2066 ext_rw->len = hdr->total_len;
2067}
2068
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302069static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2070 struct ocrdma_hw_mr *hwmr)
2071{
2072 int i;
2073 u64 buf_addr = 0;
2074 int num_pbes;
2075 struct ocrdma_pbe *pbe;
2076
2077 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2078 num_pbes = 0;
2079
2080 /* go through the OS phy regions & fill hw pbe entries into pbls. */
2081 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2082 /* number of pbes can be more for one OS buf, when
2083 * buffers are of different sizes.
2084 * split the ib_buf to one or more pbes.
2085 */
2086 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2087 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2088 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2089 num_pbes += 1;
2090 pbe++;
2091
2092 /* if the pbl is full storing the pbes,
2093 * move to next pbl.
2094 */
2095 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2096 pbl_tbl++;
2097 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2098 }
2099 }
2100 return;
2101}
2102
2103static int get_encoded_page_size(int pg_sz)
2104{
2105 /* Max size is 256M 4096 << 16 */
2106 int i = 0;
2107 for (; i < 17; i++)
2108 if (pg_sz == (4096 << i))
2109 break;
2110 return i;
2111}
2112
2113
2114static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2115 struct ib_send_wr *wr)
2116{
2117 u64 fbo;
2118 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2119 struct ocrdma_mr *mr;
2120 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2121
2122 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2123
Naresh Gottumukkalad5e3f372013-10-28 17:29:34 +05302124 if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302125 return -EINVAL;
2126
2127 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2128 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2129
2130 if (wr->wr.fast_reg.page_list_len == 0)
2131 BUG();
2132 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2133 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2134 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2135 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2136 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2137 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2138 hdr->lkey = wr->wr.fast_reg.rkey;
2139 hdr->total_len = wr->wr.fast_reg.length;
2140
2141 fbo = wr->wr.fast_reg.iova_start -
2142 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2143
2144 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2145 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2146 fast_reg->fbo_hi = upper_32_bits(fbo);
2147 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2148 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2149 fast_reg->size_sge =
2150 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
Roland Dreier7a1e89d2014-03-17 23:14:17 -07002151 mr = (struct ocrdma_mr *) (unsigned long)
2152 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302153 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2154 return 0;
2155}
2156
Parav Panditfe2caef2012-03-21 04:09:06 +05302157static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2158{
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302159 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302160
2161 iowrite32(val, qp->sq_db);
2162}
2163
2164int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2165 struct ib_send_wr **bad_wr)
2166{
2167 int status = 0;
2168 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2169 struct ocrdma_hdr_wqe *hdr;
2170 unsigned long flags;
2171
2172 spin_lock_irqsave(&qp->q_lock, flags);
2173 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2174 spin_unlock_irqrestore(&qp->q_lock, flags);
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00002175 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05302176 return -EINVAL;
2177 }
2178
2179 while (wr) {
Mitesh Ahujaf252b5d2014-06-10 19:32:20 +05302180 if (qp->qp_type == IB_QPT_UD &&
2181 (wr->opcode != IB_WR_SEND &&
2182 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2183 *bad_wr = wr;
2184 status = -EINVAL;
2185 break;
2186 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302187 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2188 wr->num_sge > qp->sq.max_sges) {
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00002189 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05302190 status = -ENOMEM;
2191 break;
2192 }
2193 hdr = ocrdma_hwq_head(&qp->sq);
2194 hdr->cw = 0;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302195 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302196 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2197 if (wr->send_flags & IB_SEND_FENCE)
2198 hdr->cw |=
2199 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2200 if (wr->send_flags & IB_SEND_SOLICITED)
2201 hdr->cw |=
2202 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2203 hdr->total_len = 0;
2204 switch (wr->opcode) {
2205 case IB_WR_SEND_WITH_IMM:
2206 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2207 hdr->immdt = ntohl(wr->ex.imm_data);
2208 case IB_WR_SEND:
2209 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2210 ocrdma_build_send(qp, hdr, wr);
2211 break;
2212 case IB_WR_SEND_WITH_INV:
2213 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2214 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2215 hdr->lkey = wr->ex.invalidate_rkey;
2216 status = ocrdma_build_send(qp, hdr, wr);
2217 break;
2218 case IB_WR_RDMA_WRITE_WITH_IMM:
2219 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2220 hdr->immdt = ntohl(wr->ex.imm_data);
2221 case IB_WR_RDMA_WRITE:
2222 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2223 status = ocrdma_build_write(qp, hdr, wr);
2224 break;
2225 case IB_WR_RDMA_READ_WITH_INV:
2226 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2227 case IB_WR_RDMA_READ:
2228 ocrdma_build_read(qp, hdr, wr);
2229 break;
2230 case IB_WR_LOCAL_INV:
2231 hdr->cw |=
2232 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302233 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2234 sizeof(struct ocrdma_sge)) /
Parav Panditfe2caef2012-03-21 04:09:06 +05302235 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2236 hdr->lkey = wr->ex.invalidate_rkey;
2237 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302238 case IB_WR_FAST_REG_MR:
2239 status = ocrdma_build_fr(qp, hdr, wr);
2240 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302241 default:
2242 status = -EINVAL;
2243 break;
2244 }
2245 if (status) {
2246 *bad_wr = wr;
2247 break;
2248 }
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302249 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302250 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2251 else
2252 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2253 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2254 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2255 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2256 /* make sure wqe is written before adapter can access it */
2257 wmb();
2258 /* inform hw to start processing it */
2259 ocrdma_ring_sq_db(qp);
2260
2261 /* update pointer, counter for next wr */
2262 ocrdma_hwq_inc_head(&qp->sq);
2263 wr = wr->next;
2264 }
2265 spin_unlock_irqrestore(&qp->q_lock, flags);
2266 return status;
2267}
2268
2269static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2270{
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302271 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302272
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302273 iowrite32(val, qp->rq_db);
Parav Panditfe2caef2012-03-21 04:09:06 +05302274}
2275
2276static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2277 u16 tag)
2278{
2279 u32 wqe_size = 0;
2280 struct ocrdma_sge *sge;
2281 if (wr->num_sge)
2282 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2283 else
2284 wqe_size = sizeof(*sge) + sizeof(*rqe);
2285
2286 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2287 OCRDMA_WQE_SIZE_SHIFT);
2288 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2289 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2290 rqe->total_len = 0;
2291 rqe->rsvd_tag = tag;
2292 sge = (struct ocrdma_sge *)(rqe + 1);
2293 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2294 ocrdma_cpu_to_le32(rqe, wqe_size);
2295}
2296
2297int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2298 struct ib_recv_wr **bad_wr)
2299{
2300 int status = 0;
2301 unsigned long flags;
2302 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2303 struct ocrdma_hdr_wqe *rqe;
2304
2305 spin_lock_irqsave(&qp->q_lock, flags);
2306 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2307 spin_unlock_irqrestore(&qp->q_lock, flags);
2308 *bad_wr = wr;
2309 return -EINVAL;
2310 }
2311 while (wr) {
2312 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2313 wr->num_sge > qp->rq.max_sges) {
2314 *bad_wr = wr;
2315 status = -ENOMEM;
2316 break;
2317 }
2318 rqe = ocrdma_hwq_head(&qp->rq);
2319 ocrdma_build_rqe(rqe, wr, 0);
2320
2321 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2322 /* make sure rqe is written before adapter can access it */
2323 wmb();
2324
2325 /* inform hw to start processing it */
2326 ocrdma_ring_rq_db(qp);
2327
2328 /* update pointer, counter for next wr */
2329 ocrdma_hwq_inc_head(&qp->rq);
2330 wr = wr->next;
2331 }
2332 spin_unlock_irqrestore(&qp->q_lock, flags);
2333 return status;
2334}
2335
2336/* cqe for srq's rqe can potentially arrive out of order.
2337 * index gives the entry in the shadow table where to store
2338 * the wr_id. tag/index is returned in cqe to reference back
2339 * for a given rqe.
2340 */
2341static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2342{
2343 int row = 0;
2344 int indx = 0;
2345
2346 for (row = 0; row < srq->bit_fields_len; row++) {
2347 if (srq->idx_bit_fields[row]) {
2348 indx = ffs(srq->idx_bit_fields[row]);
2349 indx = (row * 32) + (indx - 1);
2350 if (indx >= srq->rq.max_cnt)
2351 BUG();
2352 ocrdma_srq_toggle_bit(srq, indx);
2353 break;
2354 }
2355 }
2356
2357 if (row == srq->bit_fields_len)
2358 BUG();
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302359 return indx + 1; /* Use from index 1 */
Parav Panditfe2caef2012-03-21 04:09:06 +05302360}
2361
2362static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2363{
2364 u32 val = srq->rq.dbid | (1 << 16);
2365
2366 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2367}
2368
2369int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2370 struct ib_recv_wr **bad_wr)
2371{
2372 int status = 0;
2373 unsigned long flags;
2374 struct ocrdma_srq *srq;
2375 struct ocrdma_hdr_wqe *rqe;
2376 u16 tag;
2377
2378 srq = get_ocrdma_srq(ibsrq);
2379
2380 spin_lock_irqsave(&srq->q_lock, flags);
2381 while (wr) {
2382 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2383 wr->num_sge > srq->rq.max_sges) {
2384 status = -ENOMEM;
2385 *bad_wr = wr;
2386 break;
2387 }
2388 tag = ocrdma_srq_get_idx(srq);
2389 rqe = ocrdma_hwq_head(&srq->rq);
2390 ocrdma_build_rqe(rqe, wr, tag);
2391
2392 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2393 /* make sure rqe is written before adapter can perform DMA */
2394 wmb();
2395 /* inform hw to start processing it */
2396 ocrdma_ring_srq_db(srq);
2397 /* update pointer, counter for next wr */
2398 ocrdma_hwq_inc_head(&srq->rq);
2399 wr = wr->next;
2400 }
2401 spin_unlock_irqrestore(&srq->q_lock, flags);
2402 return status;
2403}
2404
2405static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2406{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302407 enum ib_wc_status ibwc_status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302408
2409 switch (status) {
2410 case OCRDMA_CQE_GENERAL_ERR:
2411 ibwc_status = IB_WC_GENERAL_ERR;
2412 break;
2413 case OCRDMA_CQE_LOC_LEN_ERR:
2414 ibwc_status = IB_WC_LOC_LEN_ERR;
2415 break;
2416 case OCRDMA_CQE_LOC_QP_OP_ERR:
2417 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2418 break;
2419 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2420 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2421 break;
2422 case OCRDMA_CQE_LOC_PROT_ERR:
2423 ibwc_status = IB_WC_LOC_PROT_ERR;
2424 break;
2425 case OCRDMA_CQE_WR_FLUSH_ERR:
2426 ibwc_status = IB_WC_WR_FLUSH_ERR;
2427 break;
2428 case OCRDMA_CQE_MW_BIND_ERR:
2429 ibwc_status = IB_WC_MW_BIND_ERR;
2430 break;
2431 case OCRDMA_CQE_BAD_RESP_ERR:
2432 ibwc_status = IB_WC_BAD_RESP_ERR;
2433 break;
2434 case OCRDMA_CQE_LOC_ACCESS_ERR:
2435 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2436 break;
2437 case OCRDMA_CQE_REM_INV_REQ_ERR:
2438 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2439 break;
2440 case OCRDMA_CQE_REM_ACCESS_ERR:
2441 ibwc_status = IB_WC_REM_ACCESS_ERR;
2442 break;
2443 case OCRDMA_CQE_REM_OP_ERR:
2444 ibwc_status = IB_WC_REM_OP_ERR;
2445 break;
2446 case OCRDMA_CQE_RETRY_EXC_ERR:
2447 ibwc_status = IB_WC_RETRY_EXC_ERR;
2448 break;
2449 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2450 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2451 break;
2452 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2453 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2454 break;
2455 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2456 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2457 break;
2458 case OCRDMA_CQE_REM_ABORT_ERR:
2459 ibwc_status = IB_WC_REM_ABORT_ERR;
2460 break;
2461 case OCRDMA_CQE_INV_EECN_ERR:
2462 ibwc_status = IB_WC_INV_EECN_ERR;
2463 break;
2464 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2465 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2466 break;
2467 case OCRDMA_CQE_FATAL_ERR:
2468 ibwc_status = IB_WC_FATAL_ERR;
2469 break;
2470 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2471 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2472 break;
2473 default:
2474 ibwc_status = IB_WC_GENERAL_ERR;
2475 break;
Joe Perches2b50176d2013-10-08 16:07:22 -07002476 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302477 return ibwc_status;
2478}
2479
2480static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2481 u32 wqe_idx)
2482{
2483 struct ocrdma_hdr_wqe *hdr;
2484 struct ocrdma_sge *rw;
2485 int opcode;
2486
2487 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2488
2489 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2490 /* Undo the hdr->cw swap */
2491 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2492 switch (opcode) {
2493 case OCRDMA_WRITE:
2494 ibwc->opcode = IB_WC_RDMA_WRITE;
2495 break;
2496 case OCRDMA_READ:
2497 rw = (struct ocrdma_sge *)(hdr + 1);
2498 ibwc->opcode = IB_WC_RDMA_READ;
2499 ibwc->byte_len = rw->len;
2500 break;
2501 case OCRDMA_SEND:
2502 ibwc->opcode = IB_WC_SEND;
2503 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302504 case OCRDMA_FR_MR:
2505 ibwc->opcode = IB_WC_FAST_REG_MR;
2506 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302507 case OCRDMA_LKEY_INV:
2508 ibwc->opcode = IB_WC_LOCAL_INV;
2509 break;
2510 default:
2511 ibwc->status = IB_WC_GENERAL_ERR;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002512 pr_err("%s() invalid opcode received = 0x%x\n",
2513 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +05302514 break;
Joe Perches2b50176d2013-10-08 16:07:22 -07002515 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302516}
2517
2518static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2519 struct ocrdma_cqe *cqe)
2520{
2521 if (is_cqe_for_sq(cqe)) {
2522 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2523 cqe->flags_status_srcqpn) &
2524 ~OCRDMA_CQE_STATUS_MASK);
2525 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2526 cqe->flags_status_srcqpn) |
2527 (OCRDMA_CQE_WR_FLUSH_ERR <<
2528 OCRDMA_CQE_STATUS_SHIFT));
2529 } else {
2530 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2531 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2532 cqe->flags_status_srcqpn) &
2533 ~OCRDMA_CQE_UD_STATUS_MASK);
2534 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2535 cqe->flags_status_srcqpn) |
2536 (OCRDMA_CQE_WR_FLUSH_ERR <<
2537 OCRDMA_CQE_UD_STATUS_SHIFT));
2538 } else {
2539 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2540 cqe->flags_status_srcqpn) &
2541 ~OCRDMA_CQE_STATUS_MASK);
2542 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2543 cqe->flags_status_srcqpn) |
2544 (OCRDMA_CQE_WR_FLUSH_ERR <<
2545 OCRDMA_CQE_STATUS_SHIFT));
2546 }
2547 }
2548}
2549
2550static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2551 struct ocrdma_qp *qp, int status)
2552{
2553 bool expand = false;
2554
2555 ibwc->byte_len = 0;
2556 ibwc->qp = &qp->ibqp;
2557 ibwc->status = ocrdma_to_ibwc_err(status);
2558
2559 ocrdma_flush_qp(qp);
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302560 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05302561
2562 /* if wqe/rqe pending for which cqe needs to be returned,
2563 * trigger inflating it.
2564 */
2565 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2566 expand = true;
2567 ocrdma_set_cqe_status_flushed(qp, cqe);
2568 }
2569 return expand;
2570}
2571
2572static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2573 struct ocrdma_qp *qp, int status)
2574{
2575 ibwc->opcode = IB_WC_RECV;
2576 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2577 ocrdma_hwq_inc_tail(&qp->rq);
2578
2579 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2580}
2581
2582static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2583 struct ocrdma_qp *qp, int status)
2584{
2585 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2586 ocrdma_hwq_inc_tail(&qp->sq);
2587
2588 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2589}
2590
2591
2592static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2593 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2594 bool *polled, bool *stop)
2595{
2596 bool expand;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302597 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302598 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2599 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302600 if (status < OCRDMA_MAX_CQE_ERR)
2601 atomic_inc(&dev->cqe_err_stats[status]);
Parav Panditfe2caef2012-03-21 04:09:06 +05302602
2603 /* when hw sq is empty, but rq is not empty, so we continue
2604 * to keep the cqe in order to get the cq event again.
2605 */
2606 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2607 /* when cq for rq and sq is same, it is safe to return
2608 * flush cqe for RQEs.
2609 */
2610 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2611 *polled = true;
2612 status = OCRDMA_CQE_WR_FLUSH_ERR;
2613 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2614 } else {
2615 /* stop processing further cqe as this cqe is used for
2616 * triggering cq event on buddy cq of RQ.
2617 * When QP is destroyed, this cqe will be removed
2618 * from the cq's hardware q.
2619 */
2620 *polled = false;
2621 *stop = true;
2622 expand = false;
2623 }
Selvin Xaviera96ffb12014-06-10 19:32:19 +05302624 } else if (is_hw_sq_empty(qp)) {
2625 /* Do nothing */
2626 expand = false;
2627 *polled = false;
2628 *stop = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302629 } else {
2630 *polled = true;
2631 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2632 }
2633 return expand;
2634}
2635
2636static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2637 struct ocrdma_cqe *cqe,
2638 struct ib_wc *ibwc, bool *polled)
2639{
2640 bool expand = false;
2641 int tail = qp->sq.tail;
2642 u32 wqe_idx;
2643
2644 if (!qp->wqe_wr_id_tbl[tail].signaled) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302645 *polled = false; /* WC cannot be consumed yet */
2646 } else {
2647 ibwc->status = IB_WC_SUCCESS;
2648 ibwc->wc_flags = 0;
2649 ibwc->qp = &qp->ibqp;
2650 ocrdma_update_wc(qp, ibwc, tail);
2651 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302652 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302653 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2654 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
Parav Panditae3bca92012-08-17 14:45:33 +00002655 if (tail != wqe_idx)
2656 expand = true; /* Coalesced CQE can't be consumed yet */
2657
Parav Panditfe2caef2012-03-21 04:09:06 +05302658 ocrdma_hwq_inc_tail(&qp->sq);
2659 return expand;
2660}
2661
2662static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2663 struct ib_wc *ibwc, bool *polled, bool *stop)
2664{
2665 int status;
2666 bool expand;
2667
2668 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2669 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2670
2671 if (status == OCRDMA_CQE_SUCCESS)
2672 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2673 else
2674 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2675 return expand;
2676}
2677
2678static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2679{
2680 int status;
2681
2682 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2683 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2684 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2685 OCRDMA_CQE_SRCQP_MASK;
2686 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2687 OCRDMA_CQE_PKEY_MASK;
2688 ibwc->wc_flags = IB_WC_GRH;
2689 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2690 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2691 return status;
2692}
2693
2694static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2695 struct ocrdma_cqe *cqe,
2696 struct ocrdma_qp *qp)
2697{
2698 unsigned long flags;
2699 struct ocrdma_srq *srq;
2700 u32 wqe_idx;
2701
2702 srq = get_ocrdma_srq(qp->ibqp.srq);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302703 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302704 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2705 if (wqe_idx < 1)
2706 BUG();
2707
Parav Panditfe2caef2012-03-21 04:09:06 +05302708 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2709 spin_lock_irqsave(&srq->q_lock, flags);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302710 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05302711 spin_unlock_irqrestore(&srq->q_lock, flags);
2712 ocrdma_hwq_inc_tail(&srq->rq);
2713}
2714
2715static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2716 struct ib_wc *ibwc, bool *polled, bool *stop,
2717 int status)
2718{
2719 bool expand;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302720 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2721
2722 if (status < OCRDMA_MAX_CQE_ERR)
2723 atomic_inc(&dev->cqe_err_stats[status]);
Parav Panditfe2caef2012-03-21 04:09:06 +05302724
2725 /* when hw_rq is empty, but wq is not empty, so continue
2726 * to keep the cqe to get the cq event again.
2727 */
2728 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2729 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2730 *polled = true;
2731 status = OCRDMA_CQE_WR_FLUSH_ERR;
2732 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2733 } else {
2734 *polled = false;
2735 *stop = true;
2736 expand = false;
2737 }
Selvin Xaviera96ffb12014-06-10 19:32:19 +05302738 } else if (is_hw_rq_empty(qp)) {
2739 /* Do nothing */
2740 expand = false;
2741 *polled = false;
2742 *stop = false;
Parav Pandita3698a92012-06-11 16:39:20 +05302743 } else {
2744 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302745 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
Parav Pandita3698a92012-06-11 16:39:20 +05302746 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302747 return expand;
2748}
2749
2750static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2751 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2752{
2753 ibwc->opcode = IB_WC_RECV;
2754 ibwc->qp = &qp->ibqp;
2755 ibwc->status = IB_WC_SUCCESS;
2756
2757 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2758 ocrdma_update_ud_rcqe(ibwc, cqe);
2759 else
2760 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2761
2762 if (is_cqe_imm(cqe)) {
2763 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2764 ibwc->wc_flags |= IB_WC_WITH_IMM;
2765 } else if (is_cqe_wr_imm(cqe)) {
2766 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2767 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2768 ibwc->wc_flags |= IB_WC_WITH_IMM;
2769 } else if (is_cqe_invalidated(cqe)) {
2770 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2771 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2772 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302773 if (qp->ibqp.srq) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302774 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302775 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302776 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2777 ocrdma_hwq_inc_tail(&qp->rq);
2778 }
2779}
2780
2781static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2782 struct ib_wc *ibwc, bool *polled, bool *stop)
2783{
2784 int status;
2785 bool expand = false;
2786
2787 ibwc->wc_flags = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302788 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302789 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2790 OCRDMA_CQE_UD_STATUS_MASK) >>
2791 OCRDMA_CQE_UD_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302792 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302793 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2794 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302795 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302796
2797 if (status == OCRDMA_CQE_SUCCESS) {
2798 *polled = true;
2799 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2800 } else {
2801 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2802 status);
2803 }
2804 return expand;
2805}
2806
2807static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2808 u16 cur_getp)
2809{
2810 if (cq->phase_change) {
2811 if (cur_getp == 0)
2812 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302813 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302814 /* clear valid bit */
2815 cqe->flags_status_srcqpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302816 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302817}
2818
2819static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2820 struct ib_wc *ibwc)
2821{
2822 u16 qpn = 0;
2823 int i = 0;
2824 bool expand = false;
2825 int polled_hw_cqes = 0;
2826 struct ocrdma_qp *qp = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302827 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302828 struct ocrdma_cqe *cqe;
2829 u16 cur_getp; bool polled = false; bool stop = false;
2830
2831 cur_getp = cq->getp;
2832 while (num_entries) {
2833 cqe = cq->va + cur_getp;
2834 /* check whether valid cqe or not */
2835 if (!is_cqe_valid(cq, cqe))
2836 break;
2837 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2838 /* ignore discarded cqe */
2839 if (qpn == 0)
2840 goto skip_cqe;
2841 qp = dev->qp_tbl[qpn];
2842 BUG_ON(qp == NULL);
2843
2844 if (is_cqe_for_sq(cqe)) {
2845 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2846 &stop);
2847 } else {
2848 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2849 &stop);
2850 }
2851 if (expand)
2852 goto expand_cqe;
2853 if (stop)
2854 goto stop_cqe;
2855 /* clear qpn to avoid duplicate processing by discard_cqe() */
2856 cqe->cmn.qpn = 0;
2857skip_cqe:
2858 polled_hw_cqes += 1;
2859 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2860 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2861expand_cqe:
2862 if (polled) {
2863 num_entries -= 1;
2864 i += 1;
2865 ibwc = ibwc + 1;
2866 polled = false;
2867 }
2868 }
2869stop_cqe:
2870 cq->getp = cur_getp;
Devesh Sharmaea6176262014-02-04 11:56:54 +05302871 if (cq->deferred_arm) {
2872 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
Parav Panditfe2caef2012-03-21 04:09:06 +05302873 polled_hw_cqes);
Devesh Sharmaea6176262014-02-04 11:56:54 +05302874 cq->deferred_arm = false;
2875 cq->deferred_sol = false;
2876 } else {
2877 /* We need to pop the CQE. No need to arm */
2878 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2879 polled_hw_cqes);
2880 cq->deferred_sol = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302881 }
Devesh Sharmaea6176262014-02-04 11:56:54 +05302882
Parav Panditfe2caef2012-03-21 04:09:06 +05302883 return i;
2884}
2885
2886/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2887static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2888 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2889{
2890 int err_cqes = 0;
2891
2892 while (num_entries) {
2893 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2894 break;
2895 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2896 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2897 ocrdma_hwq_inc_tail(&qp->sq);
2898 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2899 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2900 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302901 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302902 return err_cqes;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302903 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302904 ibwc->byte_len = 0;
2905 ibwc->status = IB_WC_WR_FLUSH_ERR;
2906 ibwc = ibwc + 1;
2907 err_cqes += 1;
2908 num_entries -= 1;
2909 }
2910 return err_cqes;
2911}
2912
2913int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2914{
2915 int cqes_to_poll = num_entries;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302916 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2917 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302918 int num_os_cqe = 0, err_cqes = 0;
2919 struct ocrdma_qp *qp;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302920 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302921
2922 /* poll cqes from adapter CQ */
2923 spin_lock_irqsave(&cq->cq_lock, flags);
2924 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2925 spin_unlock_irqrestore(&cq->cq_lock, flags);
2926 cqes_to_poll -= num_os_cqe;
2927
2928 if (cqes_to_poll) {
2929 wc = wc + num_os_cqe;
2930 /* adapter returns single error cqe when qp moves to
2931 * error state. So insert error cqes with wc_status as
2932 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2933 * respectively which uses this CQ.
2934 */
2935 spin_lock_irqsave(&dev->flush_q_lock, flags);
2936 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2937 if (cqes_to_poll == 0)
2938 break;
2939 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2940 cqes_to_poll -= err_cqes;
2941 num_os_cqe += err_cqes;
2942 wc = wc + err_cqes;
2943 }
2944 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2945 }
2946 return num_os_cqe;
2947}
2948
2949int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2950{
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302951 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2952 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302953 u16 cq_id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302954 unsigned long flags;
Devesh Sharmaea6176262014-02-04 11:56:54 +05302955 bool arm_needed = false, sol_needed = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302956
Parav Panditfe2caef2012-03-21 04:09:06 +05302957 cq_id = cq->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05302958
2959 spin_lock_irqsave(&cq->cq_lock, flags);
2960 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
Devesh Sharmaea6176262014-02-04 11:56:54 +05302961 arm_needed = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302962 if (cq_flags & IB_CQ_SOLICITED)
Devesh Sharmaea6176262014-02-04 11:56:54 +05302963 sol_needed = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302964
Devesh Sharmaea6176262014-02-04 11:56:54 +05302965 if (cq->first_arm) {
2966 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2967 cq->first_arm = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302968 }
Devesh Sharmaea6176262014-02-04 11:56:54 +05302969
Devesh Sharmaf93439e2014-06-09 10:52:38 +05302970 cq->deferred_arm = true;
Devesh Sharmaea6176262014-02-04 11:56:54 +05302971 cq->deferred_sol = sol_needed;
Parav Panditfe2caef2012-03-21 04:09:06 +05302972 spin_unlock_irqrestore(&cq->cq_lock, flags);
Devesh Sharmaea6176262014-02-04 11:56:54 +05302973
Parav Panditfe2caef2012-03-21 04:09:06 +05302974 return 0;
2975}
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302976
2977struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2978{
2979 int status;
2980 struct ocrdma_mr *mr;
2981 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2982 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2983
2984 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2985 return ERR_PTR(-EINVAL);
2986
2987 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2988 if (!mr)
2989 return ERR_PTR(-ENOMEM);
2990
2991 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2992 if (status)
2993 goto pbl_err;
2994 mr->hwmr.fr_mr = 1;
2995 mr->hwmr.remote_rd = 0;
2996 mr->hwmr.remote_wr = 0;
2997 mr->hwmr.local_rd = 0;
2998 mr->hwmr.local_wr = 0;
2999 mr->hwmr.mw_bind = 0;
3000 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3001 if (status)
3002 goto pbl_err;
3003 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3004 if (status)
3005 goto mbx_err;
3006 mr->ibmr.rkey = mr->hwmr.lkey;
3007 mr->ibmr.lkey = mr->hwmr.lkey;
Roland Dreier7a1e89d2014-03-17 23:14:17 -07003008 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3009 (unsigned long) mr;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05303010 return &mr->ibmr;
3011mbx_err:
3012 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3013pbl_err:
3014 kfree(mr);
3015 return ERR_PTR(-ENOMEM);
3016}
3017
3018struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3019 *ibdev,
3020 int page_list_len)
3021{
3022 struct ib_fast_reg_page_list *frmr_list;
3023 int size;
3024
3025 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3026 frmr_list = kzalloc(size, GFP_KERNEL);
3027 if (!frmr_list)
3028 return ERR_PTR(-ENOMEM);
3029 frmr_list->page_list = (u64 *)(frmr_list + 1);
3030 return frmr_list;
3031}
3032
3033void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3034{
3035 kfree(page_list);
3036}
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05303037
3038#define MAX_KERNEL_PBE_SIZE 65536
3039static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3040 int buf_cnt, u32 *pbe_size)
3041{
3042 u64 total_size = 0;
3043 u64 buf_size = 0;
3044 int i;
3045 *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3046 *pbe_size = roundup_pow_of_two(*pbe_size);
3047
3048 /* find the smallest PBE size that we can have */
3049 for (i = 0; i < buf_cnt; i++) {
3050 /* first addr may not be page aligned, so ignore checking */
3051 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3052 (buf_list[i].size & ~PAGE_MASK))) {
3053 return 0;
3054 }
3055
3056 /* if configured PBE size is greater then the chosen one,
3057 * reduce the PBE size.
3058 */
3059 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3060 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3061 buf_size = roundup_pow_of_two(buf_size);
3062 if (*pbe_size > buf_size)
3063 *pbe_size = buf_size;
3064
3065 total_size += buf_size;
3066 }
3067 *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3068 (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3069
3070 /* num_pbes = total_size / (*pbe_size); this is implemented below. */
3071
3072 return total_size >> ilog2(*pbe_size);
3073}
3074
3075static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3076 u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3077 struct ocrdma_hw_mr *hwmr)
3078{
3079 int i;
3080 int idx;
3081 int pbes_per_buf = 0;
3082 u64 buf_addr = 0;
3083 int num_pbes;
3084 struct ocrdma_pbe *pbe;
3085 int total_num_pbes = 0;
3086
3087 if (!hwmr->num_pbes)
3088 return;
3089
3090 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3091 num_pbes = 0;
3092
3093 /* go through the OS phy regions & fill hw pbe entries into pbls. */
3094 for (i = 0; i < ib_buf_cnt; i++) {
3095 buf_addr = buf_list[i].addr;
3096 pbes_per_buf =
3097 roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3098 pbe_size;
3099 hwmr->len += buf_list[i].size;
3100 /* number of pbes can be more for one OS buf, when
3101 * buffers are of different sizes.
3102 * split the ib_buf to one or more pbes.
3103 */
3104 for (idx = 0; idx < pbes_per_buf; idx++) {
3105 /* we program always page aligned addresses,
3106 * first unaligned address is taken care by fbo.
3107 */
3108 if (i == 0) {
3109 /* for non zero fbo, assign the
3110 * start of the page.
3111 */
3112 pbe->pa_lo =
3113 cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3114 pbe->pa_hi =
3115 cpu_to_le32((u32) upper_32_bits(buf_addr));
3116 } else {
3117 pbe->pa_lo =
3118 cpu_to_le32((u32) (buf_addr & 0xffffffff));
3119 pbe->pa_hi =
3120 cpu_to_le32((u32) upper_32_bits(buf_addr));
3121 }
3122 buf_addr += pbe_size;
3123 num_pbes += 1;
3124 total_num_pbes += 1;
3125 pbe++;
3126
3127 if (total_num_pbes == hwmr->num_pbes)
3128 goto mr_tbl_done;
3129 /* if the pbl is full storing the pbes,
3130 * move to next pbl.
3131 */
3132 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3133 pbl_tbl++;
3134 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3135 num_pbes = 0;
3136 }
3137 }
3138 }
3139mr_tbl_done:
3140 return;
3141}
3142
3143struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3144 struct ib_phys_buf *buf_list,
3145 int buf_cnt, int acc, u64 *iova_start)
3146{
3147 int status = -ENOMEM;
3148 struct ocrdma_mr *mr;
3149 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3150 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3151 u32 num_pbes;
3152 u32 pbe_size = 0;
3153
3154 if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3155 return ERR_PTR(-EINVAL);
3156
3157 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3158 if (!mr)
3159 return ERR_PTR(status);
3160
3161 num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3162 if (num_pbes == 0) {
3163 status = -EINVAL;
3164 goto pbl_err;
3165 }
3166 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3167 if (status)
3168 goto pbl_err;
3169
3170 mr->hwmr.pbe_size = pbe_size;
3171 mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3172 mr->hwmr.va = *iova_start;
3173 mr->hwmr.local_rd = 1;
3174 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3175 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3176 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3177 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3178 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3179
3180 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3181 if (status)
3182 goto pbl_err;
3183 build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3184 &mr->hwmr);
3185 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3186 if (status)
3187 goto mbx_err;
3188
3189 mr->ibmr.lkey = mr->hwmr.lkey;
3190 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3191 mr->ibmr.rkey = mr->hwmr.lkey;
3192 return &mr->ibmr;
3193
3194mbx_err:
3195 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3196pbl_err:
3197 kfree(mr);
3198 return ERR_PTR(status);
3199}