blob: 7430ef07a0e173df55cb6bb70d1864e67b6c191e [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800226/* Port */
227int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229{
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP |
248 IB_PORT_IP_BASED_GIDS;
249
250 /* Max MSG size set to 2G for now */
251 port_attr->max_msg_sz = 0x80000000;
252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
Somnath Kotur74828b12017-08-31 09:27:33 +0530262 port_attr->active_speed = rdev->active_speed;
263 port_attr->active_width = rdev->active_width;
264
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800265 return 0;
266}
267
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800268int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269 struct ib_port_immutable *immutable)
270{
271 struct ib_port_attr port_attr;
272
273 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 return -EINVAL;
275
276 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277 immutable->gid_tbl_len = port_attr.gid_tbl_len;
278 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281 return 0;
282}
283
284int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
285 u16 index, u16 *pkey)
286{
287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
288
289 /* Ignore port_num */
290
291 memset(pkey, 0, sizeof(*pkey));
292 return bnxt_qplib_get_pkey(&rdev->qplib_res,
293 &rdev->qplib_res.pkey_tbl, index, pkey);
294}
295
296int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
297 int index, union ib_gid *gid)
298{
299 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
300 int rc = 0;
301
302 /* Ignore port_num */
303 memset(gid, 0, sizeof(*gid));
304 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
305 &rdev->qplib_res.sgid_tbl, index,
306 (struct bnxt_qplib_gid *)gid);
307 return rc;
308}
309
310int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
311 unsigned int index, void **context)
312{
313 int rc = 0;
314 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
315 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
316 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
317
318 /* Delete the entry from the hardware */
319 ctx = *context;
320 if (!ctx)
321 return -EINVAL;
322
323 if (sgid_tbl && sgid_tbl->active) {
324 if (ctx->idx >= sgid_tbl->max)
325 return -EINVAL;
326 ctx->refcnt--;
327 if (!ctx->refcnt) {
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700328 rc = bnxt_qplib_del_sgid(sgid_tbl,
329 &sgid_tbl->tbl[ctx->idx],
330 true);
331 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800332 dev_err(rdev_to_dev(rdev),
333 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700334 } else {
335 ctx_tbl = sgid_tbl->ctx;
336 ctx_tbl[ctx->idx] = NULL;
337 kfree(ctx);
338 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800339 }
340 } else {
341 return -EINVAL;
342 }
343 return rc;
344}
345
346int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
347 unsigned int index, const union ib_gid *gid,
348 const struct ib_gid_attr *attr, void **context)
349{
350 int rc;
351 u32 tbl_idx = 0;
352 u16 vlan_id = 0xFFFF;
353 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
354 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
355 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
356
357 if ((attr->ndev) && is_vlan_dev(attr->ndev))
358 vlan_id = vlan_dev_vlan_id(attr->ndev);
359
360 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
361 rdev->qplib_res.netdev->dev_addr,
362 vlan_id, true, &tbl_idx);
363 if (rc == -EALREADY) {
364 ctx_tbl = sgid_tbl->ctx;
365 ctx_tbl[tbl_idx]->refcnt++;
366 *context = ctx_tbl[tbl_idx];
367 return 0;
368 }
369
370 if (rc < 0) {
371 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
372 return rc;
373 }
374
375 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
376 if (!ctx)
377 return -ENOMEM;
378 ctx_tbl = sgid_tbl->ctx;
379 ctx->idx = tbl_idx;
380 ctx->refcnt = 1;
381 ctx_tbl[tbl_idx] = ctx;
382
383 return rc;
384}
385
386enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
387 u8 port_num)
388{
389 return IB_LINK_LAYER_ETHERNET;
390}
391
Eddie Wai9152e0b2017-06-14 03:26:23 -0700392#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
393
394static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
395{
396 struct bnxt_re_fence_data *fence = &pd->fence;
397 struct ib_mr *ib_mr = &fence->mr->ib_mr;
398 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
399
400 memset(wqe, 0, sizeof(*wqe));
401 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
402 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
403 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
404 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
405 wqe->bind.zero_based = false;
406 wqe->bind.parent_l_key = ib_mr->lkey;
407 wqe->bind.va = (u64)(unsigned long)fence->va;
408 wqe->bind.length = fence->size;
409 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
410 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
411
412 /* Save the initial rkey in fence structure for now;
413 * wqe->bind.r_key will be set at (re)bind time.
414 */
415 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
416}
417
418static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
419{
420 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
421 qplib_qp);
422 struct ib_pd *ib_pd = qp->ib_qp.pd;
423 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
424 struct bnxt_re_fence_data *fence = &pd->fence;
425 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
426 struct bnxt_qplib_swqe wqe;
427 int rc;
428
429 memcpy(&wqe, fence_wqe, sizeof(wqe));
430 wqe.bind.r_key = fence->bind_rkey;
431 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
432
433 dev_dbg(rdev_to_dev(qp->rdev),
434 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
435 wqe.bind.r_key, qp->qplib_qp.id, pd);
436 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
437 if (rc) {
438 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
439 return rc;
440 }
441 bnxt_qplib_post_send_db(&qp->qplib_qp);
442
443 return rc;
444}
445
446static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
447{
448 struct bnxt_re_fence_data *fence = &pd->fence;
449 struct bnxt_re_dev *rdev = pd->rdev;
450 struct device *dev = &rdev->en_dev->pdev->dev;
451 struct bnxt_re_mr *mr = fence->mr;
452
453 if (fence->mw) {
454 bnxt_re_dealloc_mw(fence->mw);
455 fence->mw = NULL;
456 }
457 if (mr) {
458 if (mr->ib_mr.rkey)
459 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
460 true);
461 if (mr->ib_mr.lkey)
462 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
463 kfree(mr);
464 fence->mr = NULL;
465 }
466 if (fence->dma_addr) {
467 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
468 DMA_BIDIRECTIONAL);
469 fence->dma_addr = 0;
470 }
471}
472
473static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
474{
475 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
476 struct bnxt_re_fence_data *fence = &pd->fence;
477 struct bnxt_re_dev *rdev = pd->rdev;
478 struct device *dev = &rdev->en_dev->pdev->dev;
479 struct bnxt_re_mr *mr = NULL;
480 dma_addr_t dma_addr = 0;
481 struct ib_mw *mw;
482 u64 pbl_tbl;
483 int rc;
484
485 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
486 DMA_BIDIRECTIONAL);
487 rc = dma_mapping_error(dev, dma_addr);
488 if (rc) {
489 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
490 rc = -EIO;
491 fence->dma_addr = 0;
492 goto fail;
493 }
494 fence->dma_addr = dma_addr;
495
496 /* Allocate a MR */
497 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
498 if (!mr) {
499 rc = -ENOMEM;
500 goto fail;
501 }
502 fence->mr = mr;
503 mr->rdev = rdev;
504 mr->qplib_mr.pd = &pd->qplib_pd;
505 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
506 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
507 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
508 if (rc) {
509 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
510 goto fail;
511 }
512
513 /* Register MR */
514 mr->ib_mr.lkey = mr->qplib_mr.lkey;
515 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
516 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
517 pbl_tbl = dma_addr;
518 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
519 BNXT_RE_FENCE_PBL_SIZE, false);
520 if (rc) {
521 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
522 goto fail;
523 }
524 mr->ib_mr.rkey = mr->qplib_mr.rkey;
525
526 /* Create a fence MW only for kernel consumers */
527 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300528 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700529 dev_err(rdev_to_dev(rdev),
530 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300531 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700532 goto fail;
533 }
534 fence->mw = mw;
535
536 bnxt_re_create_fence_wqe(pd);
537 return 0;
538
539fail:
540 bnxt_re_destroy_fence_mr(pd);
541 return rc;
542}
543
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800544/* Protection Domains */
545int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
546{
547 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
548 struct bnxt_re_dev *rdev = pd->rdev;
549 int rc;
550
Eddie Wai9152e0b2017-06-14 03:26:23 -0700551 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800552
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700553 if (pd->qplib_pd.id) {
554 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
555 &rdev->qplib_res.pd_tbl,
556 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800557 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700558 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800559 }
560
561 kfree(pd);
562 return 0;
563}
564
565struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
566 struct ib_ucontext *ucontext,
567 struct ib_udata *udata)
568{
569 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
570 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
571 struct bnxt_re_ucontext,
572 ib_uctx);
573 struct bnxt_re_pd *pd;
574 int rc;
575
576 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
577 if (!pd)
578 return ERR_PTR(-ENOMEM);
579
580 pd->rdev = rdev;
581 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
582 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
583 rc = -ENOMEM;
584 goto fail;
585 }
586
587 if (udata) {
588 struct bnxt_re_pd_resp resp;
589
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700590 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800591 /* Allocate DPI in alloc_pd to avoid failing of
592 * ibv_devinfo and family of application when DPIs
593 * are depleted.
594 */
595 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700596 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800597 rc = -ENOMEM;
598 goto dbfail;
599 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800600 }
601
602 resp.pdid = pd->qplib_pd.id;
603 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700604 resp.dpi = ucntx->dpi.dpi;
605 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800606
607 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
608 if (rc) {
609 dev_err(rdev_to_dev(rdev),
610 "Failed to copy user response\n");
611 goto dbfail;
612 }
613 }
614
Eddie Wai9152e0b2017-06-14 03:26:23 -0700615 if (!udata)
616 if (bnxt_re_create_fence_mr(pd))
617 dev_warn(rdev_to_dev(rdev),
618 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800619 return &pd->ib_pd;
620dbfail:
621 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
622 &pd->qplib_pd);
623fail:
624 kfree(pd);
625 return ERR_PTR(rc);
626}
627
628/* Address Handles */
629int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
630{
631 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
632 struct bnxt_re_dev *rdev = ah->rdev;
633 int rc;
634
635 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
636 if (rc) {
637 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
638 return rc;
639 }
640 kfree(ah);
641 return 0;
642}
643
644struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400645 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800646 struct ib_udata *udata)
647{
648 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
649 struct bnxt_re_dev *rdev = pd->rdev;
650 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400651 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800652 int rc;
653 u16 vlan_tag;
654 u8 nw_type;
655
656 struct ib_gid_attr sgid_attr;
657
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400658 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800659 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
660 return ERR_PTR(-EINVAL);
661 }
662 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
663 if (!ah)
664 return ERR_PTR(-ENOMEM);
665
666 ah->rdev = rdev;
667 ah->qplib_ah.pd = &pd->qplib_pd;
668
669 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400670 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800671 sizeof(union ib_gid));
672 /*
673 * If RoCE V2 is enabled, stack will have two entries for
674 * each GID entry. Avoiding this duplicte entry in HW. Dividing
675 * the GID index by 2 for RoCE V2
676 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400677 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
678 ah->qplib_ah.host_sgid_index = grh->sgid_index;
679 ah->qplib_ah.traffic_class = grh->traffic_class;
680 ah->qplib_ah.flow_label = grh->flow_label;
681 ah->qplib_ah.hop_limit = grh->hop_limit;
682 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800683 if (ib_pd->uobject &&
684 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400685 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800686 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400687 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800688 union ib_gid sgid;
689
690 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400691 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800692 &sgid_attr);
693 if (rc) {
694 dev_err(rdev_to_dev(rdev),
695 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400696 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800697 goto fail;
698 }
699 if (sgid_attr.ndev) {
700 if (is_vlan_dev(sgid_attr.ndev))
701 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
702 dev_put(sgid_attr.ndev);
703 }
704 /* Get network header type for this GID */
705 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
706 switch (nw_type) {
707 case RDMA_NETWORK_IPV4:
708 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
709 break;
710 case RDMA_NETWORK_IPV6:
711 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
712 break;
713 default:
714 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
715 break;
716 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400717 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400718 ah_attr->roce.dmac, &vlan_tag,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800719 &sgid_attr.ndev->ifindex,
720 NULL);
721 if (rc) {
722 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
723 goto fail;
724 }
725 }
726
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400727 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800728 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
729 if (rc) {
730 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
731 goto fail;
732 }
733
734 /* Write AVID to shared page. */
735 if (ib_pd->uobject) {
736 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
737 struct bnxt_re_ucontext *uctx;
738 unsigned long flag;
739 u32 *wrptr;
740
741 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
742 spin_lock_irqsave(&uctx->sh_lock, flag);
743 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
744 *wrptr = ah->qplib_ah.id;
745 wmb(); /* make sure cache is updated. */
746 spin_unlock_irqrestore(&uctx->sh_lock, flag);
747 }
748
749 return &ah->ib_ah;
750
751fail:
752 kfree(ah);
753 return ERR_PTR(rc);
754}
755
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400756int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800757{
758 return 0;
759}
760
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400761int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800762{
763 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
764
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400765 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400766 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400767 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400768 rdma_ah_set_grh(ah_attr, NULL, 0,
769 ah->qplib_ah.host_sgid_index,
770 0, ah->qplib_ah.traffic_class);
771 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
772 rdma_ah_set_port_num(ah_attr, 1);
773 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800774 return 0;
775}
776
777/* Queue Pairs */
778int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
779{
780 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
781 struct bnxt_re_dev *rdev = qp->rdev;
782 int rc;
783
Selvin Xavierf218d672017-06-29 12:28:15 -0700784 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800785 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
786 if (rc) {
787 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
788 return rc;
789 }
790 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
791 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
792 &rdev->sqp_ah->qplib_ah);
793 if (rc) {
794 dev_err(rdev_to_dev(rdev),
795 "Failed to destroy HW AH for shadow QP");
796 return rc;
797 }
798
Selvin Xavierf218d672017-06-29 12:28:15 -0700799 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800800 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
801 &rdev->qp1_sqp->qplib_qp);
802 if (rc) {
803 dev_err(rdev_to_dev(rdev),
804 "Failed to destroy Shadow QP");
805 return rc;
806 }
807 mutex_lock(&rdev->qp_lock);
808 list_del(&rdev->qp1_sqp->list);
809 atomic_dec(&rdev->qp_count);
810 mutex_unlock(&rdev->qp_lock);
811
812 kfree(rdev->sqp_ah);
813 kfree(rdev->qp1_sqp);
814 }
815
Doug Ledford374cb862017-04-25 14:00:59 -0400816 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800817 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400818 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800819 ib_umem_release(qp->sumem);
820
821 mutex_lock(&rdev->qp_lock);
822 list_del(&qp->list);
823 atomic_dec(&rdev->qp_count);
824 mutex_unlock(&rdev->qp_lock);
825 kfree(qp);
826 return 0;
827}
828
829static u8 __from_ib_qp_type(enum ib_qp_type type)
830{
831 switch (type) {
832 case IB_QPT_GSI:
833 return CMDQ_CREATE_QP1_TYPE_GSI;
834 case IB_QPT_RC:
835 return CMDQ_CREATE_QP_TYPE_RC;
836 case IB_QPT_UD:
837 return CMDQ_CREATE_QP_TYPE_UD;
838 default:
839 return IB_QPT_MAX;
840 }
841}
842
843static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
844 struct bnxt_re_qp *qp, struct ib_udata *udata)
845{
846 struct bnxt_re_qp_req ureq;
847 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
848 struct ib_umem *umem;
849 int bytes = 0;
850 struct ib_ucontext *context = pd->ib_pd.uobject->context;
851 struct bnxt_re_ucontext *cntx = container_of(context,
852 struct bnxt_re_ucontext,
853 ib_uctx);
854 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
855 return -EFAULT;
856
857 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
858 /* Consider mapping PSN search memory only for RC QPs. */
859 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
860 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
861 bytes = PAGE_ALIGN(bytes);
862 umem = ib_umem_get(context, ureq.qpsva, bytes,
863 IB_ACCESS_LOCAL_WRITE, 1);
864 if (IS_ERR(umem))
865 return PTR_ERR(umem);
866
867 qp->sumem = umem;
868 qplib_qp->sq.sglist = umem->sg_head.sgl;
869 qplib_qp->sq.nmap = umem->nmap;
870 qplib_qp->qp_handle = ureq.qp_handle;
871
872 if (!qp->qplib_qp.srq) {
873 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
874 bytes = PAGE_ALIGN(bytes);
875 umem = ib_umem_get(context, ureq.qprva, bytes,
876 IB_ACCESS_LOCAL_WRITE, 1);
877 if (IS_ERR(umem))
878 goto rqfail;
879 qp->rumem = umem;
880 qplib_qp->rq.sglist = umem->sg_head.sgl;
881 qplib_qp->rq.nmap = umem->nmap;
882 }
883
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700884 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800885 return 0;
886rqfail:
887 ib_umem_release(qp->sumem);
888 qp->sumem = NULL;
889 qplib_qp->sq.sglist = NULL;
890 qplib_qp->sq.nmap = 0;
891
892 return PTR_ERR(umem);
893}
894
895static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
896 (struct bnxt_re_pd *pd,
897 struct bnxt_qplib_res *qp1_res,
898 struct bnxt_qplib_qp *qp1_qp)
899{
900 struct bnxt_re_dev *rdev = pd->rdev;
901 struct bnxt_re_ah *ah;
902 union ib_gid sgid;
903 int rc;
904
905 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
906 if (!ah)
907 return NULL;
908
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800909 ah->rdev = rdev;
910 ah->qplib_ah.pd = &pd->qplib_pd;
911
912 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
913 if (rc)
914 goto fail;
915
916 /* supply the dgid data same as sgid */
917 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
918 sizeof(union ib_gid));
919 ah->qplib_ah.sgid_index = 0;
920
921 ah->qplib_ah.traffic_class = 0;
922 ah->qplib_ah.flow_label = 0;
923 ah->qplib_ah.hop_limit = 1;
924 ah->qplib_ah.sl = 0;
925 /* Have DMAC same as SMAC */
926 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
927
928 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
929 if (rc) {
930 dev_err(rdev_to_dev(rdev),
931 "Failed to allocate HW AH for Shadow QP");
932 goto fail;
933 }
934
935 return ah;
936
937fail:
938 kfree(ah);
939 return NULL;
940}
941
942static struct bnxt_re_qp *bnxt_re_create_shadow_qp
943 (struct bnxt_re_pd *pd,
944 struct bnxt_qplib_res *qp1_res,
945 struct bnxt_qplib_qp *qp1_qp)
946{
947 struct bnxt_re_dev *rdev = pd->rdev;
948 struct bnxt_re_qp *qp;
949 int rc;
950
951 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
952 if (!qp)
953 return NULL;
954
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800955 qp->rdev = rdev;
956
957 /* Initialize the shadow QP structure from the QP1 values */
958 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
959
960 qp->qplib_qp.pd = &pd->qplib_pd;
961 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
962 qp->qplib_qp.type = IB_QPT_UD;
963
964 qp->qplib_qp.max_inline_data = 0;
965 qp->qplib_qp.sig_type = true;
966
967 /* Shadow QP SQ depth should be same as QP1 RQ depth */
968 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
969 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700970 /* Q full delta can be 1 since it is internal QP */
971 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800972
973 qp->qplib_qp.scq = qp1_qp->scq;
974 qp->qplib_qp.rcq = qp1_qp->rcq;
975
976 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
977 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700978 /* Q full delta can be 1 since it is internal QP */
979 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800980
981 qp->qplib_qp.mtu = qp1_qp->mtu;
982
983 qp->qplib_qp.sq_hdr_buf_size = 0;
984 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
985 qp->qplib_qp.dpi = &rdev->dpi_privileged;
986
987 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
988 if (rc)
989 goto fail;
990
991 rdev->sqp_id = qp->qplib_qp.id;
992
993 spin_lock_init(&qp->sq_lock);
994 INIT_LIST_HEAD(&qp->list);
995 mutex_lock(&rdev->qp_lock);
996 list_add_tail(&qp->list, &rdev->qp_list);
997 atomic_inc(&rdev->qp_count);
998 mutex_unlock(&rdev->qp_lock);
999 return qp;
1000fail:
1001 kfree(qp);
1002 return NULL;
1003}
1004
1005struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1006 struct ib_qp_init_attr *qp_init_attr,
1007 struct ib_udata *udata)
1008{
1009 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1010 struct bnxt_re_dev *rdev = pd->rdev;
1011 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1012 struct bnxt_re_qp *qp;
1013 struct bnxt_re_cq *cq;
1014 int rc, entries;
1015
1016 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1017 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1018 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1019 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1020 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1021 return ERR_PTR(-EINVAL);
1022
1023 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1024 if (!qp)
1025 return ERR_PTR(-ENOMEM);
1026
1027 qp->rdev = rdev;
1028 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1029 qp->qplib_qp.pd = &pd->qplib_pd;
1030 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1031 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1032 if (qp->qplib_qp.type == IB_QPT_MAX) {
1033 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1034 qp->qplib_qp.type);
1035 rc = -EINVAL;
1036 goto fail;
1037 }
1038 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1039 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1040 IB_SIGNAL_ALL_WR) ? true : false);
1041
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001042 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1043 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1044 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1045
1046 if (qp_init_attr->send_cq) {
1047 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1048 ib_cq);
1049 if (!cq) {
1050 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1051 rc = -EINVAL;
1052 goto fail;
1053 }
1054 qp->qplib_qp.scq = &cq->qplib_cq;
1055 }
1056
1057 if (qp_init_attr->recv_cq) {
1058 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1059 ib_cq);
1060 if (!cq) {
1061 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1062 rc = -EINVAL;
1063 goto fail;
1064 }
1065 qp->qplib_qp.rcq = &cq->qplib_cq;
1066 }
1067
1068 if (qp_init_attr->srq) {
1069 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1070 rc = -ENOTSUPP;
1071 goto fail;
1072 } else {
1073 /* Allocate 1 more than what's provided so posting max doesn't
1074 * mean empty
1075 */
1076 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1077 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1078 dev_attr->max_qp_wqes + 1);
1079
Eddie Wai9152e0b2017-06-14 03:26:23 -07001080 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1081 qp_init_attr->cap.max_recv_wr;
1082
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001083 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1084 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1085 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1086 }
1087
1088 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1089
1090 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001091 /* Allocate 1 more than what's provided */
1092 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1093 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1094 dev_attr->max_qp_wqes + 1);
1095 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1096 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001097 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1098 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1099 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1100 qp->qplib_qp.sq.max_sge++;
1101 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1102 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1103
1104 qp->qplib_qp.rq_hdr_buf_size =
1105 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1106
1107 qp->qplib_qp.sq_hdr_buf_size =
1108 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1109 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1110 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1111 if (rc) {
1112 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1113 goto fail;
1114 }
1115 /* Create a shadow QP to handle the QP1 traffic */
1116 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1117 &qp->qplib_qp);
1118 if (!rdev->qp1_sqp) {
1119 rc = -EINVAL;
1120 dev_err(rdev_to_dev(rdev),
1121 "Failed to create Shadow QP for QP1");
1122 goto qp_destroy;
1123 }
1124 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1125 &qp->qplib_qp);
1126 if (!rdev->sqp_ah) {
1127 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1128 &rdev->qp1_sqp->qplib_qp);
1129 rc = -EINVAL;
1130 dev_err(rdev_to_dev(rdev),
1131 "Failed to create AH entry for ShadowQP");
1132 goto qp_destroy;
1133 }
1134
1135 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001136 /* Allocate 128 + 1 more than what's provided */
1137 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1138 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1139 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1140 dev_attr->max_qp_wqes +
1141 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1142 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1143
1144 /*
1145 * Reserving one slot for Phantom WQE. Application can
1146 * post one extra entry in this case. But allowing this to avoid
1147 * unexpected Queue full condition
1148 */
1149
1150 qp->qplib_qp.sq.q_full_delta -= 1;
1151
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001152 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1153 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1154 if (udata) {
1155 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1156 if (rc)
1157 goto fail;
1158 } else {
1159 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1160 }
1161
1162 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1163 if (rc) {
1164 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1165 goto fail;
1166 }
1167 }
1168
1169 qp->ib_qp.qp_num = qp->qplib_qp.id;
1170 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001171 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001172
1173 if (udata) {
1174 struct bnxt_re_qp_resp resp;
1175
1176 resp.qpid = qp->ib_qp.qp_num;
1177 resp.rsvd = 0;
1178 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1179 if (rc) {
1180 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1181 goto qp_destroy;
1182 }
1183 }
1184 INIT_LIST_HEAD(&qp->list);
1185 mutex_lock(&rdev->qp_lock);
1186 list_add_tail(&qp->list, &rdev->qp_list);
1187 atomic_inc(&rdev->qp_count);
1188 mutex_unlock(&rdev->qp_lock);
1189
1190 return &qp->ib_qp;
1191qp_destroy:
1192 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1193fail:
1194 kfree(qp);
1195 return ERR_PTR(rc);
1196}
1197
1198static u8 __from_ib_qp_state(enum ib_qp_state state)
1199{
1200 switch (state) {
1201 case IB_QPS_RESET:
1202 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1203 case IB_QPS_INIT:
1204 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1205 case IB_QPS_RTR:
1206 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1207 case IB_QPS_RTS:
1208 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1209 case IB_QPS_SQD:
1210 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1211 case IB_QPS_SQE:
1212 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1213 case IB_QPS_ERR:
1214 default:
1215 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1216 }
1217}
1218
1219static enum ib_qp_state __to_ib_qp_state(u8 state)
1220{
1221 switch (state) {
1222 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1223 return IB_QPS_RESET;
1224 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1225 return IB_QPS_INIT;
1226 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1227 return IB_QPS_RTR;
1228 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1229 return IB_QPS_RTS;
1230 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1231 return IB_QPS_SQD;
1232 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1233 return IB_QPS_SQE;
1234 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1235 default:
1236 return IB_QPS_ERR;
1237 }
1238}
1239
1240static u32 __from_ib_mtu(enum ib_mtu mtu)
1241{
1242 switch (mtu) {
1243 case IB_MTU_256:
1244 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1245 case IB_MTU_512:
1246 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1247 case IB_MTU_1024:
1248 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1249 case IB_MTU_2048:
1250 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1251 case IB_MTU_4096:
1252 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1253 default:
1254 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1255 }
1256}
1257
1258static enum ib_mtu __to_ib_mtu(u32 mtu)
1259{
1260 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1261 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1262 return IB_MTU_256;
1263 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1264 return IB_MTU_512;
1265 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1266 return IB_MTU_1024;
1267 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1268 return IB_MTU_2048;
1269 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1270 return IB_MTU_4096;
1271 default:
1272 return IB_MTU_2048;
1273 }
1274}
1275
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001276static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1277 struct bnxt_re_qp *qp1_qp,
1278 int qp_attr_mask)
1279{
1280 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1281 int rc = 0;
1282
1283 if (qp_attr_mask & IB_QP_STATE) {
1284 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1285 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1286 }
1287 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1288 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1289 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1290 }
1291
1292 if (qp_attr_mask & IB_QP_QKEY) {
1293 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1294 /* Using a Random QKEY */
1295 qp->qplib_qp.qkey = 0x81818181;
1296 }
1297 if (qp_attr_mask & IB_QP_SQ_PSN) {
1298 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1299 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1300 }
1301
1302 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1303 if (rc)
1304 dev_err(rdev_to_dev(rdev),
1305 "Failed to modify Shadow QP for QP1");
1306 return rc;
1307}
1308
1309int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1310 int qp_attr_mask, struct ib_udata *udata)
1311{
1312 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1313 struct bnxt_re_dev *rdev = qp->rdev;
1314 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1315 enum ib_qp_state curr_qp_state, new_qp_state;
1316 int rc, entries;
1317 int status;
1318 union ib_gid sgid;
1319 struct ib_gid_attr sgid_attr;
1320 u8 nw_type;
1321
1322 qp->qplib_qp.modify_flags = 0;
1323 if (qp_attr_mask & IB_QP_STATE) {
1324 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1325 new_qp_state = qp_attr->qp_state;
1326 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1327 ib_qp->qp_type, qp_attr_mask,
1328 IB_LINK_LAYER_ETHERNET)) {
1329 dev_err(rdev_to_dev(rdev),
1330 "Invalid attribute mask: %#x specified ",
1331 qp_attr_mask);
1332 dev_err(rdev_to_dev(rdev),
1333 "for qpn: %#x type: %#x",
1334 ib_qp->qp_num, ib_qp->qp_type);
1335 dev_err(rdev_to_dev(rdev),
1336 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1337 curr_qp_state, new_qp_state);
1338 return -EINVAL;
1339 }
1340 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1341 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
Selvin Xavierf218d672017-06-29 12:28:15 -07001342
1343 if (!qp->sumem &&
1344 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1345 dev_dbg(rdev_to_dev(rdev),
1346 "Move QP = %p to flush list\n",
1347 qp);
1348 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1349 }
1350 if (!qp->sumem &&
1351 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1352 dev_dbg(rdev_to_dev(rdev),
1353 "Move QP = %p out of flush list\n",
1354 qp);
1355 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1356 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001357 }
1358 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1359 qp->qplib_qp.modify_flags |=
1360 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1361 qp->qplib_qp.en_sqd_async_notify = true;
1362 }
1363 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1364 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1365 qp->qplib_qp.access =
1366 __from_ib_access_flags(qp_attr->qp_access_flags);
1367 /* LOCAL_WRITE access must be set to allow RC receive */
1368 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1369 }
1370 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1371 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1372 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1373 }
1374 if (qp_attr_mask & IB_QP_QKEY) {
1375 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1376 qp->qplib_qp.qkey = qp_attr->qkey;
1377 }
1378 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001379 const struct ib_global_route *grh =
1380 rdma_ah_read_grh(&qp_attr->ah_attr);
1381
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001382 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1383 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1384 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1385 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1386 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1387 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1388 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001389 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001390 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001391 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001392 /* If RoCE V2 is enabled, stack will have two entries for
1393 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1394 * the GID index by 2 for RoCE V2
1395 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001396 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1397 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1398 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1399 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1400 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001401 ether_addr_copy(qp->qplib_qp.ah.dmac,
1402 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001403
1404 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001405 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001406 &sgid, &sgid_attr);
1407 if (!status && sgid_attr.ndev) {
1408 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1409 ETH_ALEN);
1410 dev_put(sgid_attr.ndev);
1411 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1412 &sgid);
1413 switch (nw_type) {
1414 case RDMA_NETWORK_IPV4:
1415 qp->qplib_qp.nw_type =
1416 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1417 break;
1418 case RDMA_NETWORK_IPV6:
1419 qp->qplib_qp.nw_type =
1420 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1421 break;
1422 default:
1423 qp->qplib_qp.nw_type =
1424 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1425 break;
1426 }
1427 }
1428 }
1429
1430 if (qp_attr_mask & IB_QP_PATH_MTU) {
1431 qp->qplib_qp.modify_flags |=
1432 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1433 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301434 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001435 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1436 qp->qplib_qp.modify_flags |=
1437 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1438 qp->qplib_qp.path_mtu =
1439 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301440 qp->qplib_qp.mtu =
1441 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001442 }
1443
1444 if (qp_attr_mask & IB_QP_TIMEOUT) {
1445 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1446 qp->qplib_qp.timeout = qp_attr->timeout;
1447 }
1448 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1449 qp->qplib_qp.modify_flags |=
1450 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1451 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1452 }
1453 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1454 qp->qplib_qp.modify_flags |=
1455 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1456 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1457 }
1458 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1459 qp->qplib_qp.modify_flags |=
1460 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1461 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1462 }
1463 if (qp_attr_mask & IB_QP_RQ_PSN) {
1464 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1465 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1466 }
1467 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1468 qp->qplib_qp.modify_flags |=
1469 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001470 /* Cap the max_rd_atomic to device max */
1471 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1472 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001473 }
1474 if (qp_attr_mask & IB_QP_SQ_PSN) {
1475 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1476 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1477 }
1478 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001479 if (qp_attr->max_dest_rd_atomic >
1480 dev_attr->max_qp_init_rd_atom) {
1481 dev_err(rdev_to_dev(rdev),
1482 "max_dest_rd_atomic requested%d is > dev_max%d",
1483 qp_attr->max_dest_rd_atomic,
1484 dev_attr->max_qp_init_rd_atom);
1485 return -EINVAL;
1486 }
1487
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001488 qp->qplib_qp.modify_flags |=
1489 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1490 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1491 }
1492 if (qp_attr_mask & IB_QP_CAP) {
1493 qp->qplib_qp.modify_flags |=
1494 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1495 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1496 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1497 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1498 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1499 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1500 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1501 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1502 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1503 (qp_attr->cap.max_inline_data >=
1504 dev_attr->max_inline_data)) {
1505 dev_err(rdev_to_dev(rdev),
1506 "Create QP failed - max exceeded");
1507 return -EINVAL;
1508 }
1509 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1510 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1511 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001512 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1513 qp_attr->cap.max_send_wr;
1514 /*
1515 * Reserving one slot for Phantom WQE. Some application can
1516 * post one extra entry in this case. Allowing this to avoid
1517 * unexpected Queue full condition
1518 */
1519 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001520 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1521 if (qp->qplib_qp.rq.max_wqe) {
1522 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1523 qp->qplib_qp.rq.max_wqe =
1524 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001525 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1526 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001527 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1528 } else {
1529 /* SRQ was used prior, just ignore the RQ caps */
1530 }
1531 }
1532 if (qp_attr_mask & IB_QP_DEST_QPN) {
1533 qp->qplib_qp.modify_flags |=
1534 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1535 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1536 }
1537 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1538 if (rc) {
1539 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1540 return rc;
1541 }
1542 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1543 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1544 return rc;
1545}
1546
1547int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1548 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1549{
1550 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1551 struct bnxt_re_dev *rdev = qp->rdev;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001552 struct bnxt_qplib_qp *qplib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001553 int rc;
1554
Leon Romanovskye13547b2017-09-19 13:22:13 +03001555 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1556 if (!qplib_qp)
1557 return -ENOMEM;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001558
Leon Romanovskye13547b2017-09-19 13:22:13 +03001559 qplib_qp->id = qp->qplib_qp.id;
1560 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1561
1562 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001563 if (rc) {
1564 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
Leon Romanovskye13547b2017-09-19 13:22:13 +03001565 goto out;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001566 }
Leon Romanovskye13547b2017-09-19 13:22:13 +03001567 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1568 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1569 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1570 qp_attr->pkey_index = qplib_qp->pkey_index;
1571 qp_attr->qkey = qplib_qp->qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001572 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001573 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1574 qplib_qp->ah.host_sgid_index,
1575 qplib_qp->ah.hop_limit,
1576 qplib_qp->ah.traffic_class);
1577 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1578 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1579 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1580 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1581 qp_attr->timeout = qplib_qp->timeout;
1582 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1583 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1584 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1585 qp_attr->rq_psn = qplib_qp->rq.psn;
1586 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1587 qp_attr->sq_psn = qplib_qp->sq.psn;
1588 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1589 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1590 IB_SIGNAL_REQ_WR;
1591 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001592
1593 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1594 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1595 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1596 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1597 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1598 qp_init_attr->cap = qp_attr->cap;
1599
Leon Romanovskye13547b2017-09-19 13:22:13 +03001600out:
1601 kfree(qplib_qp);
1602 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001603}
1604
1605/* Routine for sending QP1 packets for RoCE V1 an V2
1606 */
1607static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1608 struct ib_send_wr *wr,
1609 struct bnxt_qplib_swqe *wqe,
1610 int payload_size)
1611{
1612 struct ib_device *ibdev = &qp->rdev->ibdev;
1613 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1614 ib_ah);
1615 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1616 struct bnxt_qplib_sge sge;
1617 union ib_gid sgid;
1618 u8 nw_type;
1619 u16 ether_type;
1620 struct ib_gid_attr sgid_attr;
1621 union ib_gid dgid;
1622 bool is_eth = false;
1623 bool is_vlan = false;
1624 bool is_grh = false;
1625 bool is_udp = false;
1626 u8 ip_version = 0;
1627 u16 vlan_id = 0xFFFF;
1628 void *buf;
1629 int i, rc = 0, size;
1630
1631 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1632
1633 rc = ib_get_cached_gid(ibdev, 1,
1634 qplib_ah->host_sgid_index, &sgid,
1635 &sgid_attr);
1636 if (rc) {
1637 dev_err(rdev_to_dev(qp->rdev),
1638 "Failed to query gid at index %d",
1639 qplib_ah->host_sgid_index);
1640 return rc;
1641 }
1642 if (sgid_attr.ndev) {
1643 if (is_vlan_dev(sgid_attr.ndev))
1644 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1645 dev_put(sgid_attr.ndev);
1646 }
1647 /* Get network header type for this GID */
1648 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1649 switch (nw_type) {
1650 case RDMA_NETWORK_IPV4:
1651 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1652 break;
1653 case RDMA_NETWORK_IPV6:
1654 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1655 break;
1656 default:
1657 nw_type = BNXT_RE_ROCE_V1_PACKET;
1658 break;
1659 }
1660 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1661 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1662 if (is_udp) {
1663 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1664 ip_version = 4;
1665 ether_type = ETH_P_IP;
1666 } else {
1667 ip_version = 6;
1668 ether_type = ETH_P_IPV6;
1669 }
1670 is_grh = false;
1671 } else {
1672 ether_type = ETH_P_IBOE;
1673 is_grh = true;
1674 }
1675
1676 is_eth = true;
1677 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1678
1679 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1680 ip_version, is_udp, 0, &qp->qp1_hdr);
1681
1682 /* ETH */
1683 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1684 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1685
1686 /* For vlan, check the sgid for vlan existence */
1687
1688 if (!is_vlan) {
1689 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1690 } else {
1691 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1692 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1693 }
1694
1695 if (is_grh || (ip_version == 6)) {
1696 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1697 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1698 sizeof(sgid));
1699 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1700 }
1701
1702 if (ip_version == 4) {
1703 qp->qp1_hdr.ip4.tos = 0;
1704 qp->qp1_hdr.ip4.id = 0;
1705 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1706 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1707
1708 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1709 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1710 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1711 }
1712
1713 if (is_udp) {
1714 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1715 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1716 qp->qp1_hdr.udp.csum = 0;
1717 }
1718
1719 /* BTH */
1720 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1721 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1722 qp->qp1_hdr.immediate_present = 1;
1723 } else {
1724 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1725 }
1726 if (wr->send_flags & IB_SEND_SOLICITED)
1727 qp->qp1_hdr.bth.solicited_event = 1;
1728 /* pad_count */
1729 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1730
1731 /* P_key for QP1 is for all members */
1732 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1733 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1734 qp->qp1_hdr.bth.ack_req = 0;
1735 qp->send_psn++;
1736 qp->send_psn &= BTH_PSN_MASK;
1737 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1738 /* DETH */
1739 /* Use the priviledged Q_Key for QP1 */
1740 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1741 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1742
1743 /* Pack the QP1 to the transmit buffer */
1744 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1745 if (buf) {
1746 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1747 for (i = wqe->num_sge; i; i--) {
1748 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1749 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1750 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1751 }
1752
1753 /*
1754 * Max Header buf size for IPV6 RoCE V2 is 86,
1755 * which is same as the QP1 SQ header buffer.
1756 * Header buf size for IPV4 RoCE V2 can be 66.
1757 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1758 * Subtract 20 bytes from QP1 SQ header buf size
1759 */
1760 if (is_udp && ip_version == 4)
1761 sge.size -= 20;
1762 /*
1763 * Max Header buf size for RoCE V1 is 78.
1764 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1765 * Subtract 8 bytes from QP1 SQ header buf size
1766 */
1767 if (!is_udp)
1768 sge.size -= 8;
1769
1770 /* Subtract 4 bytes for non vlan packets */
1771 if (!is_vlan)
1772 sge.size -= 4;
1773
1774 wqe->sg_list[0].addr = sge.addr;
1775 wqe->sg_list[0].lkey = sge.lkey;
1776 wqe->sg_list[0].size = sge.size;
1777 wqe->num_sge++;
1778
1779 } else {
1780 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1781 rc = -ENOMEM;
1782 }
1783 return rc;
1784}
1785
1786/* For the MAD layer, it only provides the recv SGE the size of
1787 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1788 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1789 * receive packet (334 bytes) with no VLAN and then copy the GRH
1790 * and the MAD datagram out to the provided SGE.
1791 */
1792static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1793 struct ib_recv_wr *wr,
1794 struct bnxt_qplib_swqe *wqe,
1795 int payload_size)
1796{
1797 struct bnxt_qplib_sge ref, sge;
1798 u32 rq_prod_index;
1799 struct bnxt_re_sqp_entries *sqp_entry;
1800
1801 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1802
1803 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1804 return -ENOMEM;
1805
1806 /* Create 1 SGE to receive the entire
1807 * ethernet packet
1808 */
1809 /* Save the reference from ULP */
1810 ref.addr = wqe->sg_list[0].addr;
1811 ref.lkey = wqe->sg_list[0].lkey;
1812 ref.size = wqe->sg_list[0].size;
1813
1814 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1815
1816 /* SGE 1 */
1817 wqe->sg_list[0].addr = sge.addr;
1818 wqe->sg_list[0].lkey = sge.lkey;
1819 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1820 sge.size -= wqe->sg_list[0].size;
1821
1822 sqp_entry->sge.addr = ref.addr;
1823 sqp_entry->sge.lkey = ref.lkey;
1824 sqp_entry->sge.size = ref.size;
1825 /* Store the wrid for reporting completion */
1826 sqp_entry->wrid = wqe->wr_id;
1827 /* change the wqe->wrid to table index */
1828 wqe->wr_id = rq_prod_index;
1829 return 0;
1830}
1831
1832static int is_ud_qp(struct bnxt_re_qp *qp)
1833{
1834 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1835}
1836
1837static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1838 struct ib_send_wr *wr,
1839 struct bnxt_qplib_swqe *wqe)
1840{
1841 struct bnxt_re_ah *ah = NULL;
1842
1843 if (is_ud_qp(qp)) {
1844 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1845 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1846 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1847 wqe->send.avid = ah->qplib_ah.id;
1848 }
1849 switch (wr->opcode) {
1850 case IB_WR_SEND:
1851 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1852 break;
1853 case IB_WR_SEND_WITH_IMM:
1854 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1855 wqe->send.imm_data = wr->ex.imm_data;
1856 break;
1857 case IB_WR_SEND_WITH_INV:
1858 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1859 wqe->send.inv_key = wr->ex.invalidate_rkey;
1860 break;
1861 default:
1862 return -EINVAL;
1863 }
1864 if (wr->send_flags & IB_SEND_SIGNALED)
1865 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1866 if (wr->send_flags & IB_SEND_FENCE)
1867 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1868 if (wr->send_flags & IB_SEND_SOLICITED)
1869 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1870 if (wr->send_flags & IB_SEND_INLINE)
1871 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1872
1873 return 0;
1874}
1875
1876static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1877 struct bnxt_qplib_swqe *wqe)
1878{
1879 switch (wr->opcode) {
1880 case IB_WR_RDMA_WRITE:
1881 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1882 break;
1883 case IB_WR_RDMA_WRITE_WITH_IMM:
1884 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1885 wqe->rdma.imm_data = wr->ex.imm_data;
1886 break;
1887 case IB_WR_RDMA_READ:
1888 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1889 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1890 break;
1891 default:
1892 return -EINVAL;
1893 }
1894 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1895 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1896 if (wr->send_flags & IB_SEND_SIGNALED)
1897 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1898 if (wr->send_flags & IB_SEND_FENCE)
1899 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1900 if (wr->send_flags & IB_SEND_SOLICITED)
1901 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1902 if (wr->send_flags & IB_SEND_INLINE)
1903 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1904
1905 return 0;
1906}
1907
1908static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1909 struct bnxt_qplib_swqe *wqe)
1910{
1911 switch (wr->opcode) {
1912 case IB_WR_ATOMIC_CMP_AND_SWP:
1913 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
Devesh Sharma55311d02017-08-31 09:27:30 +05301914 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001915 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1916 break;
1917 case IB_WR_ATOMIC_FETCH_AND_ADD:
1918 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1919 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1920 break;
1921 default:
1922 return -EINVAL;
1923 }
1924 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1925 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1926 if (wr->send_flags & IB_SEND_SIGNALED)
1927 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1928 if (wr->send_flags & IB_SEND_FENCE)
1929 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1930 if (wr->send_flags & IB_SEND_SOLICITED)
1931 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1932 return 0;
1933}
1934
1935static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1936 struct bnxt_qplib_swqe *wqe)
1937{
1938 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1939 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1940
1941 if (wr->send_flags & IB_SEND_SIGNALED)
1942 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1943 if (wr->send_flags & IB_SEND_FENCE)
1944 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1945 if (wr->send_flags & IB_SEND_SOLICITED)
1946 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1947
1948 return 0;
1949}
1950
1951static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1952 struct bnxt_qplib_swqe *wqe)
1953{
1954 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1955 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1956 int access = wr->access;
1957
1958 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1959 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1960 wqe->frmr.page_list = mr->pages;
1961 wqe->frmr.page_list_len = mr->npages;
1962 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1963 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1964
1965 if (wr->wr.send_flags & IB_SEND_FENCE)
1966 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1967 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1968 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1969
1970 if (access & IB_ACCESS_LOCAL_WRITE)
1971 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1972 if (access & IB_ACCESS_REMOTE_READ)
1973 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1974 if (access & IB_ACCESS_REMOTE_WRITE)
1975 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1976 if (access & IB_ACCESS_REMOTE_ATOMIC)
1977 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1978 if (access & IB_ACCESS_MW_BIND)
1979 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1980
1981 wqe->frmr.l_key = wr->key;
1982 wqe->frmr.length = wr->mr->length;
1983 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1984 wqe->frmr.va = wr->mr->iova;
1985 return 0;
1986}
1987
1988static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1989 struct ib_send_wr *wr,
1990 struct bnxt_qplib_swqe *wqe)
1991{
1992 /* Copy the inline data to the data field */
1993 u8 *in_data;
1994 u32 i, sge_len;
1995 void *sge_addr;
1996
1997 in_data = wqe->inline_data;
1998 for (i = 0; i < wr->num_sge; i++) {
1999 sge_addr = (void *)(unsigned long)
2000 wr->sg_list[i].addr;
2001 sge_len = wr->sg_list[i].length;
2002
2003 if ((sge_len + wqe->inline_len) >
2004 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2005 dev_err(rdev_to_dev(rdev),
2006 "Inline data size requested > supported value");
2007 return -EINVAL;
2008 }
2009 sge_len = wr->sg_list[i].length;
2010
2011 memcpy(in_data, sge_addr, sge_len);
2012 in_data += wr->sg_list[i].length;
2013 wqe->inline_len += wr->sg_list[i].length;
2014 }
2015 return wqe->inline_len;
2016}
2017
2018static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2019 struct ib_send_wr *wr,
2020 struct bnxt_qplib_swqe *wqe)
2021{
2022 int payload_sz = 0;
2023
2024 if (wr->send_flags & IB_SEND_INLINE)
2025 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2026 else
2027 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2028 wqe->num_sge);
2029
2030 return payload_sz;
2031}
2032
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002033static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2034{
2035 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2036 qp->ib_qp.qp_type == IB_QPT_GSI ||
2037 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2038 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2039 int qp_attr_mask;
2040 struct ib_qp_attr qp_attr;
2041
2042 qp_attr_mask = IB_QP_STATE;
2043 qp_attr.qp_state = IB_QPS_RTS;
2044 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2045 qp->qplib_qp.wqe_cnt = 0;
2046 }
2047}
2048
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002049static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2050 struct bnxt_re_qp *qp,
2051 struct ib_send_wr *wr)
2052{
2053 struct bnxt_qplib_swqe wqe;
2054 int rc = 0, payload_sz = 0;
2055 unsigned long flags;
2056
2057 spin_lock_irqsave(&qp->sq_lock, flags);
2058 memset(&wqe, 0, sizeof(wqe));
2059 while (wr) {
2060 /* House keeping */
2061 memset(&wqe, 0, sizeof(wqe));
2062
2063 /* Common */
2064 wqe.num_sge = wr->num_sge;
2065 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2066 dev_err(rdev_to_dev(rdev),
2067 "Limit exceeded for Send SGEs");
2068 rc = -EINVAL;
2069 goto bad;
2070 }
2071
2072 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2073 if (payload_sz < 0) {
2074 rc = -EINVAL;
2075 goto bad;
2076 }
2077 wqe.wr_id = wr->wr_id;
2078
2079 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2080
2081 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2082 if (!rc)
2083 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2084bad:
2085 if (rc) {
2086 dev_err(rdev_to_dev(rdev),
2087 "Post send failed opcode = %#x rc = %d",
2088 wr->opcode, rc);
2089 break;
2090 }
2091 wr = wr->next;
2092 }
2093 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002094 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002095 spin_unlock_irqrestore(&qp->sq_lock, flags);
2096 return rc;
2097}
2098
2099int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2100 struct ib_send_wr **bad_wr)
2101{
2102 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2103 struct bnxt_qplib_swqe wqe;
2104 int rc = 0, payload_sz = 0;
2105 unsigned long flags;
2106
2107 spin_lock_irqsave(&qp->sq_lock, flags);
2108 while (wr) {
2109 /* House keeping */
2110 memset(&wqe, 0, sizeof(wqe));
2111
2112 /* Common */
2113 wqe.num_sge = wr->num_sge;
2114 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2115 dev_err(rdev_to_dev(qp->rdev),
2116 "Limit exceeded for Send SGEs");
2117 rc = -EINVAL;
2118 goto bad;
2119 }
2120
2121 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2122 if (payload_sz < 0) {
2123 rc = -EINVAL;
2124 goto bad;
2125 }
2126 wqe.wr_id = wr->wr_id;
2127
2128 switch (wr->opcode) {
2129 case IB_WR_SEND:
2130 case IB_WR_SEND_WITH_IMM:
2131 if (ib_qp->qp_type == IB_QPT_GSI) {
2132 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2133 payload_sz);
2134 if (rc)
2135 goto bad;
2136 wqe.rawqp1.lflags |=
2137 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2138 }
2139 switch (wr->send_flags) {
2140 case IB_SEND_IP_CSUM:
2141 wqe.rawqp1.lflags |=
2142 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2143 break;
2144 default:
2145 break;
2146 }
2147 /* Fall thru to build the wqe */
2148 case IB_WR_SEND_WITH_INV:
2149 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2150 break;
2151 case IB_WR_RDMA_WRITE:
2152 case IB_WR_RDMA_WRITE_WITH_IMM:
2153 case IB_WR_RDMA_READ:
2154 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2155 break;
2156 case IB_WR_ATOMIC_CMP_AND_SWP:
2157 case IB_WR_ATOMIC_FETCH_AND_ADD:
2158 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2159 break;
2160 case IB_WR_RDMA_READ_WITH_INV:
2161 dev_err(rdev_to_dev(qp->rdev),
2162 "RDMA Read with Invalidate is not supported");
2163 rc = -EINVAL;
2164 goto bad;
2165 case IB_WR_LOCAL_INV:
2166 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2167 break;
2168 case IB_WR_REG_MR:
2169 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2170 break;
2171 default:
2172 /* Unsupported WRs */
2173 dev_err(rdev_to_dev(qp->rdev),
2174 "WR (%#x) is not supported", wr->opcode);
2175 rc = -EINVAL;
2176 goto bad;
2177 }
2178 if (!rc)
2179 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2180bad:
2181 if (rc) {
2182 dev_err(rdev_to_dev(qp->rdev),
2183 "post_send failed op:%#x qps = %#x rc = %d\n",
2184 wr->opcode, qp->qplib_qp.state, rc);
2185 *bad_wr = wr;
2186 break;
2187 }
2188 wr = wr->next;
2189 }
2190 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002191 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002192 spin_unlock_irqrestore(&qp->sq_lock, flags);
2193
2194 return rc;
2195}
2196
2197static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2198 struct bnxt_re_qp *qp,
2199 struct ib_recv_wr *wr)
2200{
2201 struct bnxt_qplib_swqe wqe;
2202 int rc = 0, payload_sz = 0;
2203
2204 memset(&wqe, 0, sizeof(wqe));
2205 while (wr) {
2206 /* House keeping */
2207 memset(&wqe, 0, sizeof(wqe));
2208
2209 /* Common */
2210 wqe.num_sge = wr->num_sge;
2211 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2212 dev_err(rdev_to_dev(rdev),
2213 "Limit exceeded for Receive SGEs");
2214 rc = -EINVAL;
2215 break;
2216 }
2217 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2218 wr->num_sge);
2219 wqe.wr_id = wr->wr_id;
2220 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2221
2222 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2223 if (rc)
2224 break;
2225
2226 wr = wr->next;
2227 }
2228 if (!rc)
2229 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2230 return rc;
2231}
2232
2233int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2234 struct ib_recv_wr **bad_wr)
2235{
2236 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2237 struct bnxt_qplib_swqe wqe;
2238 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002239 unsigned long flags;
2240 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002241
Devesh Sharma018cf592017-05-22 03:15:40 -07002242 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002243 while (wr) {
2244 /* House keeping */
2245 memset(&wqe, 0, sizeof(wqe));
2246
2247 /* Common */
2248 wqe.num_sge = wr->num_sge;
2249 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2250 dev_err(rdev_to_dev(qp->rdev),
2251 "Limit exceeded for Receive SGEs");
2252 rc = -EINVAL;
2253 *bad_wr = wr;
2254 break;
2255 }
2256
2257 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2258 wr->num_sge);
2259 wqe.wr_id = wr->wr_id;
2260 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2261
2262 if (ib_qp->qp_type == IB_QPT_GSI)
2263 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2264 payload_sz);
2265 if (!rc)
2266 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2267 if (rc) {
2268 *bad_wr = wr;
2269 break;
2270 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002271
2272 /* Ring DB if the RQEs posted reaches a threshold value */
2273 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2274 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2275 count = 0;
2276 }
2277
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002278 wr = wr->next;
2279 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002280
2281 if (count)
2282 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2283
2284 spin_unlock_irqrestore(&qp->rq_lock, flags);
2285
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002286 return rc;
2287}
2288
2289/* Completion Queues */
2290int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2291{
2292 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2293 struct bnxt_re_dev *rdev = cq->rdev;
2294 int rc;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002295 struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002296
2297 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2298 if (rc) {
2299 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2300 return rc;
2301 }
Doug Ledford374cb862017-04-25 14:00:59 -04002302 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002303 ib_umem_release(cq->umem);
2304
2305 if (cq) {
2306 kfree(cq->cql);
2307 kfree(cq);
2308 }
2309 atomic_dec(&rdev->cq_count);
Selvin Xavier6a5df912017-08-02 01:46:18 -07002310 nq->budget--;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002311 return 0;
2312}
2313
2314struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2315 const struct ib_cq_init_attr *attr,
2316 struct ib_ucontext *context,
2317 struct ib_udata *udata)
2318{
2319 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2320 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2321 struct bnxt_re_cq *cq = NULL;
2322 int rc, entries;
2323 int cqe = attr->cqe;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002324 struct bnxt_qplib_nq *nq = NULL;
2325 unsigned int nq_alloc_cnt;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002326
2327 /* Validate CQ fields */
2328 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2329 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2330 return ERR_PTR(-EINVAL);
2331 }
2332 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2333 if (!cq)
2334 return ERR_PTR(-ENOMEM);
2335
2336 cq->rdev = rdev;
2337 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2338
2339 entries = roundup_pow_of_two(cqe + 1);
2340 if (entries > dev_attr->max_cq_wqes + 1)
2341 entries = dev_attr->max_cq_wqes + 1;
2342
2343 if (context) {
2344 struct bnxt_re_cq_req req;
2345 struct bnxt_re_ucontext *uctx = container_of
2346 (context,
2347 struct bnxt_re_ucontext,
2348 ib_uctx);
2349 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2350 rc = -EFAULT;
2351 goto fail;
2352 }
2353
2354 cq->umem = ib_umem_get(context, req.cq_va,
2355 entries * sizeof(struct cq_base),
2356 IB_ACCESS_LOCAL_WRITE, 1);
2357 if (IS_ERR(cq->umem)) {
2358 rc = PTR_ERR(cq->umem);
2359 goto fail;
2360 }
2361 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2362 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002363 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002364 } else {
2365 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2366 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2367 GFP_KERNEL);
2368 if (!cq->cql) {
2369 rc = -ENOMEM;
2370 goto fail;
2371 }
2372
2373 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2374 cq->qplib_cq.sghead = NULL;
2375 cq->qplib_cq.nmap = 0;
2376 }
Selvin Xavier6a5df912017-08-02 01:46:18 -07002377 /*
2378 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2379 * used for getting the NQ index.
2380 */
2381 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2382 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002383 cq->qplib_cq.max_wqe = entries;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002384 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2385 cq->qplib_cq.nq = nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002386
2387 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2388 if (rc) {
2389 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2390 goto fail;
2391 }
2392
2393 cq->ib_cq.cqe = entries;
2394 cq->cq_period = cq->qplib_cq.period;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002395 nq->budget++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002396
2397 atomic_inc(&rdev->cq_count);
2398
2399 if (context) {
2400 struct bnxt_re_cq_resp resp;
2401
2402 resp.cqid = cq->qplib_cq.id;
2403 resp.tail = cq->qplib_cq.hwq.cons;
2404 resp.phase = cq->qplib_cq.period;
2405 resp.rsvd = 0;
2406 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2407 if (rc) {
2408 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2409 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2410 goto c2fail;
2411 }
2412 }
2413
2414 return &cq->ib_cq;
2415
2416c2fail:
2417 if (context)
2418 ib_umem_release(cq->umem);
2419fail:
2420 kfree(cq->cql);
2421 kfree(cq);
2422 return ERR_PTR(rc);
2423}
2424
2425static u8 __req_to_ib_wc_status(u8 qstatus)
2426{
2427 switch (qstatus) {
2428 case CQ_REQ_STATUS_OK:
2429 return IB_WC_SUCCESS;
2430 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2431 return IB_WC_BAD_RESP_ERR;
2432 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2433 return IB_WC_LOC_LEN_ERR;
2434 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2435 return IB_WC_LOC_QP_OP_ERR;
2436 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2437 return IB_WC_LOC_PROT_ERR;
2438 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2439 return IB_WC_GENERAL_ERR;
2440 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2441 return IB_WC_REM_INV_REQ_ERR;
2442 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2443 return IB_WC_REM_ACCESS_ERR;
2444 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2445 return IB_WC_REM_OP_ERR;
2446 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2447 return IB_WC_RNR_RETRY_EXC_ERR;
2448 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2449 return IB_WC_RETRY_EXC_ERR;
2450 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2451 return IB_WC_WR_FLUSH_ERR;
2452 default:
2453 return IB_WC_GENERAL_ERR;
2454 }
2455 return 0;
2456}
2457
2458static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2459{
2460 switch (qstatus) {
2461 case CQ_RES_RAWETH_QP1_STATUS_OK:
2462 return IB_WC_SUCCESS;
2463 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2464 return IB_WC_LOC_ACCESS_ERR;
2465 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2466 return IB_WC_LOC_LEN_ERR;
2467 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2468 return IB_WC_LOC_PROT_ERR;
2469 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2470 return IB_WC_LOC_QP_OP_ERR;
2471 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2472 return IB_WC_GENERAL_ERR;
2473 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2474 return IB_WC_WR_FLUSH_ERR;
2475 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2476 return IB_WC_WR_FLUSH_ERR;
2477 default:
2478 return IB_WC_GENERAL_ERR;
2479 }
2480}
2481
2482static u8 __rc_to_ib_wc_status(u8 qstatus)
2483{
2484 switch (qstatus) {
2485 case CQ_RES_RC_STATUS_OK:
2486 return IB_WC_SUCCESS;
2487 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2488 return IB_WC_LOC_ACCESS_ERR;
2489 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2490 return IB_WC_LOC_LEN_ERR;
2491 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2492 return IB_WC_LOC_PROT_ERR;
2493 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2494 return IB_WC_LOC_QP_OP_ERR;
2495 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2496 return IB_WC_GENERAL_ERR;
2497 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2498 return IB_WC_REM_INV_REQ_ERR;
2499 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2500 return IB_WC_WR_FLUSH_ERR;
2501 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2502 return IB_WC_WR_FLUSH_ERR;
2503 default:
2504 return IB_WC_GENERAL_ERR;
2505 }
2506}
2507
2508static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2509{
2510 switch (cqe->type) {
2511 case BNXT_QPLIB_SWQE_TYPE_SEND:
2512 wc->opcode = IB_WC_SEND;
2513 break;
2514 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2515 wc->opcode = IB_WC_SEND;
2516 wc->wc_flags |= IB_WC_WITH_IMM;
2517 break;
2518 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2519 wc->opcode = IB_WC_SEND;
2520 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2521 break;
2522 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2523 wc->opcode = IB_WC_RDMA_WRITE;
2524 break;
2525 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2526 wc->opcode = IB_WC_RDMA_WRITE;
2527 wc->wc_flags |= IB_WC_WITH_IMM;
2528 break;
2529 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2530 wc->opcode = IB_WC_RDMA_READ;
2531 break;
2532 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2533 wc->opcode = IB_WC_COMP_SWAP;
2534 break;
2535 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2536 wc->opcode = IB_WC_FETCH_ADD;
2537 break;
2538 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2539 wc->opcode = IB_WC_LOCAL_INV;
2540 break;
2541 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2542 wc->opcode = IB_WC_REG_MR;
2543 break;
2544 default:
2545 wc->opcode = IB_WC_SEND;
2546 break;
2547 }
2548
2549 wc->status = __req_to_ib_wc_status(cqe->status);
2550}
2551
2552static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2553 u16 raweth_qp1_flags2)
2554{
2555 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2556
2557 /* raweth_qp1_flags Bit 9-6 indicates itype */
2558 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2559 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2560 return -1;
2561
2562 if (raweth_qp1_flags2 &
2563 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2564 raweth_qp1_flags2 &
2565 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2566 is_udp = true;
2567 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2568 (raweth_qp1_flags2 &
2569 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2570 (is_ipv6 = true) : (is_ipv4 = true);
2571 return ((is_ipv6) ?
2572 BNXT_RE_ROCEV2_IPV6_PACKET :
2573 BNXT_RE_ROCEV2_IPV4_PACKET);
2574 } else {
2575 return BNXT_RE_ROCE_V1_PACKET;
2576 }
2577}
2578
2579static int bnxt_re_to_ib_nw_type(int nw_type)
2580{
2581 u8 nw_hdr_type = 0xFF;
2582
2583 switch (nw_type) {
2584 case BNXT_RE_ROCE_V1_PACKET:
2585 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2586 break;
2587 case BNXT_RE_ROCEV2_IPV4_PACKET:
2588 nw_hdr_type = RDMA_NETWORK_IPV4;
2589 break;
2590 case BNXT_RE_ROCEV2_IPV6_PACKET:
2591 nw_hdr_type = RDMA_NETWORK_IPV6;
2592 break;
2593 }
2594 return nw_hdr_type;
2595}
2596
2597static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2598 void *rq_hdr_buf)
2599{
2600 u8 *tmp_buf = NULL;
2601 struct ethhdr *eth_hdr;
2602 u16 eth_type;
2603 bool rc = false;
2604
2605 tmp_buf = (u8 *)rq_hdr_buf;
2606 /*
2607 * If dest mac is not same as I/F mac, this could be a
2608 * loopback address or multicast address, check whether
2609 * it is a loopback packet
2610 */
2611 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2612 tmp_buf += 4;
2613 /* Check the ether type */
2614 eth_hdr = (struct ethhdr *)tmp_buf;
2615 eth_type = ntohs(eth_hdr->h_proto);
2616 switch (eth_type) {
2617 case ETH_P_IBOE:
2618 rc = true;
2619 break;
2620 case ETH_P_IP:
2621 case ETH_P_IPV6: {
2622 u32 len;
2623 struct udphdr *udp_hdr;
2624
2625 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2626 sizeof(struct ipv6hdr));
2627 tmp_buf += sizeof(struct ethhdr) + len;
2628 udp_hdr = (struct udphdr *)tmp_buf;
2629 if (ntohs(udp_hdr->dest) ==
2630 ROCE_V2_UDP_DPORT)
2631 rc = true;
2632 break;
2633 }
2634 default:
2635 break;
2636 }
2637 }
2638
2639 return rc;
2640}
2641
2642static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2643 struct bnxt_qplib_cqe *cqe)
2644{
2645 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2646 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2647 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2648 struct ib_send_wr *swr;
2649 struct ib_ud_wr udwr;
2650 struct ib_recv_wr rwr;
2651 int pkt_type = 0;
2652 u32 tbl_idx;
2653 void *rq_hdr_buf;
2654 dma_addr_t rq_hdr_buf_map;
2655 dma_addr_t shrq_hdr_buf_map;
2656 u32 offset = 0;
2657 u32 skip_bytes = 0;
2658 struct ib_sge s_sge[2];
2659 struct ib_sge r_sge[2];
2660 int rc;
2661
2662 memset(&udwr, 0, sizeof(udwr));
2663 memset(&rwr, 0, sizeof(rwr));
2664 memset(&s_sge, 0, sizeof(s_sge));
2665 memset(&r_sge, 0, sizeof(r_sge));
2666
2667 swr = &udwr.wr;
2668 tbl_idx = cqe->wr_id;
2669
2670 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2671 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2672 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2673 tbl_idx);
2674
2675 /* Shadow QP header buffer */
2676 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2677 tbl_idx);
2678 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2679
2680 /* Store this cqe */
2681 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2682 sqp_entry->qp1_qp = qp1_qp;
2683
2684 /* Find packet type from the cqe */
2685
2686 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2687 cqe->raweth_qp1_flags2);
2688 if (pkt_type < 0) {
2689 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2690 return -EINVAL;
2691 }
2692
2693 /* Adjust the offset for the user buffer and post in the rq */
2694
2695 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2696 offset = 20;
2697
2698 /*
2699 * QP1 loopback packet has 4 bytes of internal header before
2700 * ether header. Skip these four bytes.
2701 */
2702 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2703 skip_bytes = 4;
2704
2705 /* First send SGE . Skip the ether header*/
2706 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2707 + skip_bytes;
2708 s_sge[0].lkey = 0xFFFFFFFF;
2709 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2710 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2711
2712 /* Second Send SGE */
2713 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2714 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2715 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2716 s_sge[1].addr += 8;
2717 s_sge[1].lkey = 0xFFFFFFFF;
2718 s_sge[1].length = 256;
2719
2720 /* First recv SGE */
2721
2722 r_sge[0].addr = shrq_hdr_buf_map;
2723 r_sge[0].lkey = 0xFFFFFFFF;
2724 r_sge[0].length = 40;
2725
2726 r_sge[1].addr = sqp_entry->sge.addr + offset;
2727 r_sge[1].lkey = sqp_entry->sge.lkey;
2728 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2729
2730 /* Create receive work request */
2731 rwr.num_sge = 2;
2732 rwr.sg_list = r_sge;
2733 rwr.wr_id = tbl_idx;
2734 rwr.next = NULL;
2735
2736 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2737 if (rc) {
2738 dev_err(rdev_to_dev(rdev),
2739 "Failed to post Rx buffers to shadow QP");
2740 return -ENOMEM;
2741 }
2742
2743 swr->num_sge = 2;
2744 swr->sg_list = s_sge;
2745 swr->wr_id = tbl_idx;
2746 swr->opcode = IB_WR_SEND;
2747 swr->next = NULL;
2748
2749 udwr.ah = &rdev->sqp_ah->ib_ah;
2750 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2751 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2752
2753 /* post data received in the send queue */
2754 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2755
2756 return 0;
2757}
2758
2759static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2760 struct bnxt_qplib_cqe *cqe)
2761{
2762 wc->opcode = IB_WC_RECV;
2763 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2764 wc->wc_flags |= IB_WC_GRH;
2765}
2766
2767static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2768 struct bnxt_qplib_cqe *cqe)
2769{
2770 wc->opcode = IB_WC_RECV;
2771 wc->status = __rc_to_ib_wc_status(cqe->status);
2772
2773 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2774 wc->wc_flags |= IB_WC_WITH_IMM;
2775 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2776 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2777 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2778 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2779 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2780}
2781
2782static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2783 struct ib_wc *wc,
2784 struct bnxt_qplib_cqe *cqe)
2785{
2786 u32 tbl_idx;
2787 struct bnxt_re_dev *rdev = qp->rdev;
2788 struct bnxt_re_qp *qp1_qp = NULL;
2789 struct bnxt_qplib_cqe *orig_cqe = NULL;
2790 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2791 int nw_type;
2792
2793 tbl_idx = cqe->wr_id;
2794
2795 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2796 qp1_qp = sqp_entry->qp1_qp;
2797 orig_cqe = &sqp_entry->cqe;
2798
2799 wc->wr_id = sqp_entry->wrid;
2800 wc->byte_len = orig_cqe->length;
2801 wc->qp = &qp1_qp->ib_qp;
2802
2803 wc->ex.imm_data = orig_cqe->immdata;
2804 wc->src_qp = orig_cqe->src_qp;
2805 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2806 wc->port_num = 1;
2807 wc->vendor_err = orig_cqe->status;
2808
2809 wc->opcode = IB_WC_RECV;
2810 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2811 wc->wc_flags |= IB_WC_GRH;
2812
2813 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2814 orig_cqe->raweth_qp1_flags2);
2815 if (nw_type >= 0) {
2816 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2817 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2818 }
2819}
2820
2821static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2822 struct bnxt_qplib_cqe *cqe)
2823{
2824 wc->opcode = IB_WC_RECV;
2825 wc->status = __rc_to_ib_wc_status(cqe->status);
2826
2827 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2828 wc->wc_flags |= IB_WC_WITH_IMM;
2829 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2830 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2831 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2832 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2833 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2834}
2835
Eddie Wai9152e0b2017-06-14 03:26:23 -07002836static int send_phantom_wqe(struct bnxt_re_qp *qp)
2837{
2838 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2839 unsigned long flags;
2840 int rc = 0;
2841
2842 spin_lock_irqsave(&qp->sq_lock, flags);
2843
2844 rc = bnxt_re_bind_fence_mw(lib_qp);
2845 if (!rc) {
2846 lib_qp->sq.phantom_wqe_cnt++;
2847 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2848 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2849 lib_qp->id, lib_qp->sq.hwq.prod,
2850 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2851 lib_qp->sq.phantom_wqe_cnt);
2852 }
2853
2854 spin_unlock_irqrestore(&qp->sq_lock, flags);
2855 return rc;
2856}
2857
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002858int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2859{
2860 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2861 struct bnxt_re_qp *qp;
2862 struct bnxt_qplib_cqe *cqe;
2863 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002864 struct bnxt_qplib_q *sq;
2865 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002866 u32 tbl_idx;
2867 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2868 unsigned long flags;
2869
2870 spin_lock_irqsave(&cq->cq_lock, flags);
2871 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002872 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002873 if (!cq->cql) {
2874 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2875 goto exit;
2876 }
2877 cqe = &cq->cql[0];
2878 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002879 lib_qp = NULL;
2880 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2881 if (lib_qp) {
2882 sq = &lib_qp->sq;
2883 if (sq->send_phantom) {
2884 qp = container_of(lib_qp,
2885 struct bnxt_re_qp, qplib_qp);
2886 if (send_phantom_wqe(qp) == -ENOMEM)
2887 dev_err(rdev_to_dev(cq->rdev),
2888 "Phantom failed! Scheduled to send again\n");
2889 else
2890 sq->send_phantom = false;
2891 }
2892 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002893 if (ncqe < budget)
2894 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
2895 cqe + ncqe,
2896 budget - ncqe);
Eddie Wai9152e0b2017-06-14 03:26:23 -07002897
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002898 if (!ncqe)
2899 break;
2900
2901 for (i = 0; i < ncqe; i++, cqe++) {
2902 /* Transcribe each qplib_wqe back to ib_wc */
2903 memset(wc, 0, sizeof(*wc));
2904
2905 wc->wr_id = cqe->wr_id;
2906 wc->byte_len = cqe->length;
2907 qp = container_of
2908 ((struct bnxt_qplib_qp *)
2909 (unsigned long)(cqe->qp_handle),
2910 struct bnxt_re_qp, qplib_qp);
2911 if (!qp) {
2912 dev_err(rdev_to_dev(cq->rdev),
2913 "POLL CQ : bad QP handle");
2914 continue;
2915 }
2916 wc->qp = &qp->ib_qp;
2917 wc->ex.imm_data = cqe->immdata;
2918 wc->src_qp = cqe->src_qp;
2919 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2920 wc->port_num = 1;
2921 wc->vendor_err = cqe->status;
2922
2923 switch (cqe->opcode) {
2924 case CQ_BASE_CQE_TYPE_REQ:
2925 if (qp->qplib_qp.id ==
2926 qp->rdev->qp1_sqp->qplib_qp.id) {
2927 /* Handle this completion with
2928 * the stored completion
2929 */
2930 memset(wc, 0, sizeof(*wc));
2931 continue;
2932 }
2933 bnxt_re_process_req_wc(wc, cqe);
2934 break;
2935 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2936 if (!cqe->status) {
2937 int rc = 0;
2938
2939 rc = bnxt_re_process_raw_qp_pkt_rx
2940 (qp, cqe);
2941 if (!rc) {
2942 memset(wc, 0, sizeof(*wc));
2943 continue;
2944 }
2945 cqe->status = -1;
2946 }
2947 /* Errors need not be looped back.
2948 * But change the wr_id to the one
2949 * stored in the table
2950 */
2951 tbl_idx = cqe->wr_id;
2952 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2953 wc->wr_id = sqp_entry->wrid;
2954 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2955 break;
2956 case CQ_BASE_CQE_TYPE_RES_RC:
2957 bnxt_re_process_res_rc_wc(wc, cqe);
2958 break;
2959 case CQ_BASE_CQE_TYPE_RES_UD:
2960 if (qp->qplib_qp.id ==
2961 qp->rdev->qp1_sqp->qplib_qp.id) {
2962 /* Handle this completion with
2963 * the stored completion
2964 */
2965 if (cqe->status) {
2966 continue;
2967 } else {
2968 bnxt_re_process_res_shadow_qp_wc
2969 (qp, wc, cqe);
2970 break;
2971 }
2972 }
2973 bnxt_re_process_res_ud_wc(wc, cqe);
2974 break;
2975 default:
2976 dev_err(rdev_to_dev(cq->rdev),
2977 "POLL CQ : type 0x%x not handled",
2978 cqe->opcode);
2979 continue;
2980 }
2981 wc++;
2982 budget--;
2983 }
2984 }
2985exit:
2986 spin_unlock_irqrestore(&cq->cq_lock, flags);
2987 return num_entries - budget;
2988}
2989
2990int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2991 enum ib_cq_notify_flags ib_cqn_flags)
2992{
2993 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2994 int type = 0;
2995
2996 /* Trigger on the very next completion */
2997 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
2998 type = DBR_DBR_TYPE_CQ_ARMALL;
2999 /* Trigger on the next solicited completion */
3000 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3001 type = DBR_DBR_TYPE_CQ_ARMSE;
3002
Selvin Xavier499e4562017-06-29 12:28:18 -07003003 /* Poll to see if there are missed events */
3004 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3005 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3006 return 1;
3007
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003008 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3009
3010 return 0;
3011}
3012
3013/* Memory Regions */
3014struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3015{
3016 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3017 struct bnxt_re_dev *rdev = pd->rdev;
3018 struct bnxt_re_mr *mr;
3019 u64 pbl = 0;
3020 int rc;
3021
3022 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3023 if (!mr)
3024 return ERR_PTR(-ENOMEM);
3025
3026 mr->rdev = rdev;
3027 mr->qplib_mr.pd = &pd->qplib_pd;
3028 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3029 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3030
3031 /* Allocate and register 0 as the address */
3032 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3033 if (rc)
3034 goto fail;
3035
3036 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3037 mr->qplib_mr.total_size = -1; /* Infinte length */
3038 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3039 if (rc)
3040 goto fail_mr;
3041
3042 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3043 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3044 IB_ACCESS_REMOTE_ATOMIC))
3045 mr->ib_mr.rkey = mr->ib_mr.lkey;
3046 atomic_inc(&rdev->mr_count);
3047
3048 return &mr->ib_mr;
3049
3050fail_mr:
3051 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3052fail:
3053 kfree(mr);
3054 return ERR_PTR(rc);
3055}
3056
3057int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3058{
3059 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3060 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003061 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003062
Selvin Xavier1c980b02017-05-22 03:15:34 -07003063 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3064 if (rc) {
3065 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3066 return rc;
3067 }
3068
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003069 if (mr->npages && mr->pages) {
3070 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3071 &mr->qplib_frpl);
3072 kfree(mr->pages);
3073 mr->npages = 0;
3074 mr->pages = NULL;
3075 }
Doug Ledford374cb862017-04-25 14:00:59 -04003076 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003077 ib_umem_release(mr->ib_umem);
3078
3079 kfree(mr);
3080 atomic_dec(&rdev->mr_count);
3081 return rc;
3082}
3083
3084static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3085{
3086 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3087
3088 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3089 return -ENOMEM;
3090
3091 mr->pages[mr->npages++] = addr;
3092 return 0;
3093}
3094
3095int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3096 unsigned int *sg_offset)
3097{
3098 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3099
3100 mr->npages = 0;
3101 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3102}
3103
3104struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3105 u32 max_num_sg)
3106{
3107 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3108 struct bnxt_re_dev *rdev = pd->rdev;
3109 struct bnxt_re_mr *mr = NULL;
3110 int rc;
3111
3112 if (type != IB_MR_TYPE_MEM_REG) {
3113 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3114 return ERR_PTR(-EINVAL);
3115 }
3116 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3117 return ERR_PTR(-EINVAL);
3118
3119 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3120 if (!mr)
3121 return ERR_PTR(-ENOMEM);
3122
3123 mr->rdev = rdev;
3124 mr->qplib_mr.pd = &pd->qplib_pd;
3125 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3126 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3127
3128 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3129 if (rc)
3130 goto fail;
3131
3132 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3133 mr->ib_mr.rkey = mr->ib_mr.lkey;
3134
3135 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3136 if (!mr->pages) {
3137 rc = -ENOMEM;
3138 goto fail;
3139 }
3140 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3141 &mr->qplib_frpl, max_num_sg);
3142 if (rc) {
3143 dev_err(rdev_to_dev(rdev),
3144 "Failed to allocate HW FR page list");
3145 goto fail_mr;
3146 }
3147
3148 atomic_inc(&rdev->mr_count);
3149 return &mr->ib_mr;
3150
3151fail_mr:
3152 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3153fail:
3154 kfree(mr->pages);
3155 kfree(mr);
3156 return ERR_PTR(rc);
3157}
3158
Eddie Wai9152e0b2017-06-14 03:26:23 -07003159struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3160 struct ib_udata *udata)
3161{
3162 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3163 struct bnxt_re_dev *rdev = pd->rdev;
3164 struct bnxt_re_mw *mw;
3165 int rc;
3166
3167 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3168 if (!mw)
3169 return ERR_PTR(-ENOMEM);
3170 mw->rdev = rdev;
3171 mw->qplib_mw.pd = &pd->qplib_pd;
3172
3173 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3174 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3175 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3176 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3177 if (rc) {
3178 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3179 goto fail;
3180 }
3181 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3182
3183 atomic_inc(&rdev->mw_count);
3184 return &mw->ib_mw;
3185
3186fail:
3187 kfree(mw);
3188 return ERR_PTR(rc);
3189}
3190
3191int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3192{
3193 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3194 struct bnxt_re_dev *rdev = mw->rdev;
3195 int rc;
3196
3197 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3198 if (rc) {
3199 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3200 return rc;
3201 }
3202
3203 kfree(mw);
3204 atomic_dec(&rdev->mw_count);
3205 return rc;
3206}
3207
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003208/* uverbs */
3209struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3210 u64 virt_addr, int mr_access_flags,
3211 struct ib_udata *udata)
3212{
3213 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3214 struct bnxt_re_dev *rdev = pd->rdev;
3215 struct bnxt_re_mr *mr;
3216 struct ib_umem *umem;
3217 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003218 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003219 struct scatterlist *sg;
3220 int entry;
3221
Selvin Xavier58d4a672017-06-29 12:28:12 -07003222 if (length > BNXT_RE_MAX_MR_SIZE) {
3223 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3224 length, BNXT_RE_MAX_MR_SIZE);
3225 return ERR_PTR(-ENOMEM);
3226 }
3227
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003228 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3229 if (!mr)
3230 return ERR_PTR(-ENOMEM);
3231
3232 mr->rdev = rdev;
3233 mr->qplib_mr.pd = &pd->qplib_pd;
3234 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3235 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3236
3237 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3238 mr_access_flags, 0);
3239 if (IS_ERR(umem)) {
3240 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3241 rc = -EFAULT;
3242 goto free_mr;
3243 }
3244 mr->ib_umem = umem;
3245
3246 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3247 if (rc) {
3248 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3249 goto release_umem;
3250 }
3251 /* The fixed portion of the rkey is the same as the lkey */
3252 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3253
3254 mr->qplib_mr.va = virt_addr;
3255 umem_pgs = ib_umem_page_count(umem);
3256 if (!umem_pgs) {
3257 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3258 rc = -EINVAL;
3259 goto free_mrw;
3260 }
3261 mr->qplib_mr.total_size = length;
3262
3263 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3264 if (!pbl_tbl) {
3265 rc = -EINVAL;
3266 goto free_mrw;
3267 }
3268 pbl_tbl_orig = pbl_tbl;
3269
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003270 if (umem->hugetlb) {
3271 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3272 rc = -EFAULT;
3273 goto fail;
3274 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003275
3276 if (umem->page_shift != PAGE_SHIFT) {
3277 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003278 rc = -EFAULT;
3279 goto fail;
3280 }
3281 /* Map umem buf ptrs to the PBL */
3282 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003283 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003284 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003285 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003286 }
3287 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3288 umem_pgs, false);
3289 if (rc) {
3290 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3291 goto fail;
3292 }
3293
3294 kfree(pbl_tbl_orig);
3295
3296 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3297 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3298 atomic_inc(&rdev->mr_count);
3299
3300 return &mr->ib_mr;
3301fail:
3302 kfree(pbl_tbl_orig);
3303free_mrw:
3304 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3305release_umem:
3306 ib_umem_release(umem);
3307free_mr:
3308 kfree(mr);
3309 return ERR_PTR(rc);
3310}
3311
3312struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3313 struct ib_udata *udata)
3314{
3315 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3316 struct bnxt_re_uctx_resp resp;
3317 struct bnxt_re_ucontext *uctx;
3318 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3319 int rc;
3320
3321 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3322 ibdev->uverbs_abi_ver);
3323
3324 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3325 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3326 BNXT_RE_ABI_VERSION);
3327 return ERR_PTR(-EPERM);
3328 }
3329
3330 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3331 if (!uctx)
3332 return ERR_PTR(-ENOMEM);
3333
3334 uctx->rdev = rdev;
3335
3336 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3337 if (!uctx->shpg) {
3338 rc = -ENOMEM;
3339 goto fail;
3340 }
3341 spin_lock_init(&uctx->sh_lock);
3342
3343 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3344 resp.max_qp = rdev->qplib_ctx.qpc_count;
3345 resp.pg_size = PAGE_SIZE;
3346 resp.cqe_sz = sizeof(struct cq_base);
3347 resp.max_cqd = dev_attr->max_cq_wqes;
3348 resp.rsvd = 0;
3349
3350 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3351 if (rc) {
3352 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3353 rc = -EFAULT;
3354 goto cfail;
3355 }
3356
3357 return &uctx->ib_uctx;
3358cfail:
3359 free_page((unsigned long)uctx->shpg);
3360 uctx->shpg = NULL;
3361fail:
3362 kfree(uctx);
3363 return ERR_PTR(rc);
3364}
3365
3366int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3367{
3368 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3369 struct bnxt_re_ucontext,
3370 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003371
3372 struct bnxt_re_dev *rdev = uctx->rdev;
3373 int rc = 0;
3374
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003375 if (uctx->shpg)
3376 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003377
3378 if (uctx->dpi.dbr) {
3379 /* Free DPI only if this is the first PD allocated by the
3380 * application and mark the context dpi as NULL
3381 */
3382 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3383 &rdev->qplib_res.dpi_tbl,
3384 &uctx->dpi);
3385 if (rc)
Colin Ian King24bb4d82017-07-14 08:30:10 +01003386 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003387 /* Don't fail, continue*/
3388 uctx->dpi.dbr = NULL;
3389 }
3390
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003391 kfree(uctx);
3392 return 0;
3393}
3394
3395/* Helper function to mmap the virtual memory from user app */
3396int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3397{
3398 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3399 struct bnxt_re_ucontext,
3400 ib_uctx);
3401 struct bnxt_re_dev *rdev = uctx->rdev;
3402 u64 pfn;
3403
3404 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3405 return -EINVAL;
3406
3407 if (vma->vm_pgoff) {
3408 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3409 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3410 PAGE_SIZE, vma->vm_page_prot)) {
3411 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3412 return -EAGAIN;
3413 }
3414 } else {
3415 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3416 if (remap_pfn_range(vma, vma->vm_start,
3417 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3418 dev_err(rdev_to_dev(rdev),
3419 "Failed to map shared page");
3420 return -EAGAIN;
3421 }
3422 }
3423
3424 return 0;
3425}