blob: 0dbdbe1616abb807c478778181278d1154e331e3 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800226/* Port */
227int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229{
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP |
248 IB_PORT_IP_BASED_GIDS;
249
250 /* Max MSG size set to 2G for now */
251 port_attr->max_msg_sz = 0x80000000;
252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
262 /* call the underlying netdev's ethtool hooks to query speed settings
263 * for which we acquire rtnl_lock _only_ if it's registered with
264 * IB stack to avoid race in the NETDEV_UNREG path
265 */
266 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
Selvin Xavier7be05752017-08-17 07:58:07 -0700267 if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
268 &port_attr->active_width))
Yuval Shaiad4186192017-06-14 23:13:34 +0300269 return -EINVAL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800270 return 0;
271}
272
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800273int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
274 struct ib_port_immutable *immutable)
275{
276 struct ib_port_attr port_attr;
277
278 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
279 return -EINVAL;
280
281 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
282 immutable->gid_tbl_len = port_attr.gid_tbl_len;
283 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
284 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
285 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
286 return 0;
287}
288
289int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
290 u16 index, u16 *pkey)
291{
292 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
293
294 /* Ignore port_num */
295
296 memset(pkey, 0, sizeof(*pkey));
297 return bnxt_qplib_get_pkey(&rdev->qplib_res,
298 &rdev->qplib_res.pkey_tbl, index, pkey);
299}
300
301int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
302 int index, union ib_gid *gid)
303{
304 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
305 int rc = 0;
306
307 /* Ignore port_num */
308 memset(gid, 0, sizeof(*gid));
309 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
310 &rdev->qplib_res.sgid_tbl, index,
311 (struct bnxt_qplib_gid *)gid);
312 return rc;
313}
314
315int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
316 unsigned int index, void **context)
317{
318 int rc = 0;
319 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
321 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
322
323 /* Delete the entry from the hardware */
324 ctx = *context;
325 if (!ctx)
326 return -EINVAL;
327
328 if (sgid_tbl && sgid_tbl->active) {
329 if (ctx->idx >= sgid_tbl->max)
330 return -EINVAL;
331 ctx->refcnt--;
332 if (!ctx->refcnt) {
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700333 rc = bnxt_qplib_del_sgid(sgid_tbl,
334 &sgid_tbl->tbl[ctx->idx],
335 true);
336 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800337 dev_err(rdev_to_dev(rdev),
338 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700339 } else {
340 ctx_tbl = sgid_tbl->ctx;
341 ctx_tbl[ctx->idx] = NULL;
342 kfree(ctx);
343 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800344 }
345 } else {
346 return -EINVAL;
347 }
348 return rc;
349}
350
351int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
352 unsigned int index, const union ib_gid *gid,
353 const struct ib_gid_attr *attr, void **context)
354{
355 int rc;
356 u32 tbl_idx = 0;
357 u16 vlan_id = 0xFFFF;
358 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
359 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
360 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
361
362 if ((attr->ndev) && is_vlan_dev(attr->ndev))
363 vlan_id = vlan_dev_vlan_id(attr->ndev);
364
365 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
366 rdev->qplib_res.netdev->dev_addr,
367 vlan_id, true, &tbl_idx);
368 if (rc == -EALREADY) {
369 ctx_tbl = sgid_tbl->ctx;
370 ctx_tbl[tbl_idx]->refcnt++;
371 *context = ctx_tbl[tbl_idx];
372 return 0;
373 }
374
375 if (rc < 0) {
376 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
377 return rc;
378 }
379
380 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
381 if (!ctx)
382 return -ENOMEM;
383 ctx_tbl = sgid_tbl->ctx;
384 ctx->idx = tbl_idx;
385 ctx->refcnt = 1;
386 ctx_tbl[tbl_idx] = ctx;
387
388 return rc;
389}
390
391enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
392 u8 port_num)
393{
394 return IB_LINK_LAYER_ETHERNET;
395}
396
Eddie Wai9152e0b2017-06-14 03:26:23 -0700397#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
398
399static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
400{
401 struct bnxt_re_fence_data *fence = &pd->fence;
402 struct ib_mr *ib_mr = &fence->mr->ib_mr;
403 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404
405 memset(wqe, 0, sizeof(*wqe));
406 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
407 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
409 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
410 wqe->bind.zero_based = false;
411 wqe->bind.parent_l_key = ib_mr->lkey;
412 wqe->bind.va = (u64)(unsigned long)fence->va;
413 wqe->bind.length = fence->size;
414 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
415 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
416
417 /* Save the initial rkey in fence structure for now;
418 * wqe->bind.r_key will be set at (re)bind time.
419 */
420 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
421}
422
423static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
424{
425 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
426 qplib_qp);
427 struct ib_pd *ib_pd = qp->ib_qp.pd;
428 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
429 struct bnxt_re_fence_data *fence = &pd->fence;
430 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
431 struct bnxt_qplib_swqe wqe;
432 int rc;
433
434 memcpy(&wqe, fence_wqe, sizeof(wqe));
435 wqe.bind.r_key = fence->bind_rkey;
436 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
437
438 dev_dbg(rdev_to_dev(qp->rdev),
439 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
440 wqe.bind.r_key, qp->qplib_qp.id, pd);
441 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
442 if (rc) {
443 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
444 return rc;
445 }
446 bnxt_qplib_post_send_db(&qp->qplib_qp);
447
448 return rc;
449}
450
451static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
452{
453 struct bnxt_re_fence_data *fence = &pd->fence;
454 struct bnxt_re_dev *rdev = pd->rdev;
455 struct device *dev = &rdev->en_dev->pdev->dev;
456 struct bnxt_re_mr *mr = fence->mr;
457
458 if (fence->mw) {
459 bnxt_re_dealloc_mw(fence->mw);
460 fence->mw = NULL;
461 }
462 if (mr) {
463 if (mr->ib_mr.rkey)
464 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
465 true);
466 if (mr->ib_mr.lkey)
467 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
468 kfree(mr);
469 fence->mr = NULL;
470 }
471 if (fence->dma_addr) {
472 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
473 DMA_BIDIRECTIONAL);
474 fence->dma_addr = 0;
475 }
476}
477
478static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
479{
480 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
481 struct bnxt_re_fence_data *fence = &pd->fence;
482 struct bnxt_re_dev *rdev = pd->rdev;
483 struct device *dev = &rdev->en_dev->pdev->dev;
484 struct bnxt_re_mr *mr = NULL;
485 dma_addr_t dma_addr = 0;
486 struct ib_mw *mw;
487 u64 pbl_tbl;
488 int rc;
489
490 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
491 DMA_BIDIRECTIONAL);
492 rc = dma_mapping_error(dev, dma_addr);
493 if (rc) {
494 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
495 rc = -EIO;
496 fence->dma_addr = 0;
497 goto fail;
498 }
499 fence->dma_addr = dma_addr;
500
501 /* Allocate a MR */
502 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
503 if (!mr) {
504 rc = -ENOMEM;
505 goto fail;
506 }
507 fence->mr = mr;
508 mr->rdev = rdev;
509 mr->qplib_mr.pd = &pd->qplib_pd;
510 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
511 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
512 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
513 if (rc) {
514 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
515 goto fail;
516 }
517
518 /* Register MR */
519 mr->ib_mr.lkey = mr->qplib_mr.lkey;
520 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
521 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
522 pbl_tbl = dma_addr;
523 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
524 BNXT_RE_FENCE_PBL_SIZE, false);
525 if (rc) {
526 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
527 goto fail;
528 }
529 mr->ib_mr.rkey = mr->qplib_mr.rkey;
530
531 /* Create a fence MW only for kernel consumers */
532 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300533 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700534 dev_err(rdev_to_dev(rdev),
535 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300536 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700537 goto fail;
538 }
539 fence->mw = mw;
540
541 bnxt_re_create_fence_wqe(pd);
542 return 0;
543
544fail:
545 bnxt_re_destroy_fence_mr(pd);
546 return rc;
547}
548
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800549/* Protection Domains */
550int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
551{
552 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
553 struct bnxt_re_dev *rdev = pd->rdev;
554 int rc;
555
Eddie Wai9152e0b2017-06-14 03:26:23 -0700556 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800557
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700558 if (pd->qplib_pd.id) {
559 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
560 &rdev->qplib_res.pd_tbl,
561 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800562 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700563 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800564 }
565
566 kfree(pd);
567 return 0;
568}
569
570struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
571 struct ib_ucontext *ucontext,
572 struct ib_udata *udata)
573{
574 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
575 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
576 struct bnxt_re_ucontext,
577 ib_uctx);
578 struct bnxt_re_pd *pd;
579 int rc;
580
581 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
582 if (!pd)
583 return ERR_PTR(-ENOMEM);
584
585 pd->rdev = rdev;
586 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
587 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
588 rc = -ENOMEM;
589 goto fail;
590 }
591
592 if (udata) {
593 struct bnxt_re_pd_resp resp;
594
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700595 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800596 /* Allocate DPI in alloc_pd to avoid failing of
597 * ibv_devinfo and family of application when DPIs
598 * are depleted.
599 */
600 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700601 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800602 rc = -ENOMEM;
603 goto dbfail;
604 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800605 }
606
607 resp.pdid = pd->qplib_pd.id;
608 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700609 resp.dpi = ucntx->dpi.dpi;
610 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800611
612 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
613 if (rc) {
614 dev_err(rdev_to_dev(rdev),
615 "Failed to copy user response\n");
616 goto dbfail;
617 }
618 }
619
Eddie Wai9152e0b2017-06-14 03:26:23 -0700620 if (!udata)
621 if (bnxt_re_create_fence_mr(pd))
622 dev_warn(rdev_to_dev(rdev),
623 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800624 return &pd->ib_pd;
625dbfail:
626 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
627 &pd->qplib_pd);
628fail:
629 kfree(pd);
630 return ERR_PTR(rc);
631}
632
633/* Address Handles */
634int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
635{
636 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
637 struct bnxt_re_dev *rdev = ah->rdev;
638 int rc;
639
640 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
641 if (rc) {
642 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
643 return rc;
644 }
645 kfree(ah);
646 return 0;
647}
648
649struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400650 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800651 struct ib_udata *udata)
652{
653 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
654 struct bnxt_re_dev *rdev = pd->rdev;
655 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400656 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800657 int rc;
658 u16 vlan_tag;
659 u8 nw_type;
660
661 struct ib_gid_attr sgid_attr;
662
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400663 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800664 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
665 return ERR_PTR(-EINVAL);
666 }
667 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
668 if (!ah)
669 return ERR_PTR(-ENOMEM);
670
671 ah->rdev = rdev;
672 ah->qplib_ah.pd = &pd->qplib_pd;
673
674 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400675 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800676 sizeof(union ib_gid));
677 /*
678 * If RoCE V2 is enabled, stack will have two entries for
679 * each GID entry. Avoiding this duplicte entry in HW. Dividing
680 * the GID index by 2 for RoCE V2
681 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400682 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
683 ah->qplib_ah.host_sgid_index = grh->sgid_index;
684 ah->qplib_ah.traffic_class = grh->traffic_class;
685 ah->qplib_ah.flow_label = grh->flow_label;
686 ah->qplib_ah.hop_limit = grh->hop_limit;
687 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800688 if (ib_pd->uobject &&
689 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400690 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800691 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400692 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800693 union ib_gid sgid;
694
695 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400696 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800697 &sgid_attr);
698 if (rc) {
699 dev_err(rdev_to_dev(rdev),
700 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400701 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800702 goto fail;
703 }
704 if (sgid_attr.ndev) {
705 if (is_vlan_dev(sgid_attr.ndev))
706 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
707 dev_put(sgid_attr.ndev);
708 }
709 /* Get network header type for this GID */
710 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
711 switch (nw_type) {
712 case RDMA_NETWORK_IPV4:
713 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
714 break;
715 case RDMA_NETWORK_IPV6:
716 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
717 break;
718 default:
719 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
720 break;
721 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400722 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400723 ah_attr->roce.dmac, &vlan_tag,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800724 &sgid_attr.ndev->ifindex,
725 NULL);
726 if (rc) {
727 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
728 goto fail;
729 }
730 }
731
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400732 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800733 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
734 if (rc) {
735 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
736 goto fail;
737 }
738
739 /* Write AVID to shared page. */
740 if (ib_pd->uobject) {
741 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
742 struct bnxt_re_ucontext *uctx;
743 unsigned long flag;
744 u32 *wrptr;
745
746 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
747 spin_lock_irqsave(&uctx->sh_lock, flag);
748 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
749 *wrptr = ah->qplib_ah.id;
750 wmb(); /* make sure cache is updated. */
751 spin_unlock_irqrestore(&uctx->sh_lock, flag);
752 }
753
754 return &ah->ib_ah;
755
756fail:
757 kfree(ah);
758 return ERR_PTR(rc);
759}
760
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400761int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800762{
763 return 0;
764}
765
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400766int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800767{
768 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
769
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400770 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400771 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400772 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400773 rdma_ah_set_grh(ah_attr, NULL, 0,
774 ah->qplib_ah.host_sgid_index,
775 0, ah->qplib_ah.traffic_class);
776 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
777 rdma_ah_set_port_num(ah_attr, 1);
778 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800779 return 0;
780}
781
782/* Queue Pairs */
783int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
784{
785 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
786 struct bnxt_re_dev *rdev = qp->rdev;
787 int rc;
788
Selvin Xavierf218d672017-06-29 12:28:15 -0700789 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800790 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
791 if (rc) {
792 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
793 return rc;
794 }
795 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
796 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
797 &rdev->sqp_ah->qplib_ah);
798 if (rc) {
799 dev_err(rdev_to_dev(rdev),
800 "Failed to destroy HW AH for shadow QP");
801 return rc;
802 }
803
Selvin Xavierf218d672017-06-29 12:28:15 -0700804 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800805 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
806 &rdev->qp1_sqp->qplib_qp);
807 if (rc) {
808 dev_err(rdev_to_dev(rdev),
809 "Failed to destroy Shadow QP");
810 return rc;
811 }
812 mutex_lock(&rdev->qp_lock);
813 list_del(&rdev->qp1_sqp->list);
814 atomic_dec(&rdev->qp_count);
815 mutex_unlock(&rdev->qp_lock);
816
817 kfree(rdev->sqp_ah);
818 kfree(rdev->qp1_sqp);
819 }
820
Doug Ledford374cb862017-04-25 14:00:59 -0400821 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800822 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400823 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800824 ib_umem_release(qp->sumem);
825
826 mutex_lock(&rdev->qp_lock);
827 list_del(&qp->list);
828 atomic_dec(&rdev->qp_count);
829 mutex_unlock(&rdev->qp_lock);
830 kfree(qp);
831 return 0;
832}
833
834static u8 __from_ib_qp_type(enum ib_qp_type type)
835{
836 switch (type) {
837 case IB_QPT_GSI:
838 return CMDQ_CREATE_QP1_TYPE_GSI;
839 case IB_QPT_RC:
840 return CMDQ_CREATE_QP_TYPE_RC;
841 case IB_QPT_UD:
842 return CMDQ_CREATE_QP_TYPE_UD;
843 default:
844 return IB_QPT_MAX;
845 }
846}
847
848static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
849 struct bnxt_re_qp *qp, struct ib_udata *udata)
850{
851 struct bnxt_re_qp_req ureq;
852 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
853 struct ib_umem *umem;
854 int bytes = 0;
855 struct ib_ucontext *context = pd->ib_pd.uobject->context;
856 struct bnxt_re_ucontext *cntx = container_of(context,
857 struct bnxt_re_ucontext,
858 ib_uctx);
859 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
860 return -EFAULT;
861
862 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
863 /* Consider mapping PSN search memory only for RC QPs. */
864 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
865 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
866 bytes = PAGE_ALIGN(bytes);
867 umem = ib_umem_get(context, ureq.qpsva, bytes,
868 IB_ACCESS_LOCAL_WRITE, 1);
869 if (IS_ERR(umem))
870 return PTR_ERR(umem);
871
872 qp->sumem = umem;
873 qplib_qp->sq.sglist = umem->sg_head.sgl;
874 qplib_qp->sq.nmap = umem->nmap;
875 qplib_qp->qp_handle = ureq.qp_handle;
876
877 if (!qp->qplib_qp.srq) {
878 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
879 bytes = PAGE_ALIGN(bytes);
880 umem = ib_umem_get(context, ureq.qprva, bytes,
881 IB_ACCESS_LOCAL_WRITE, 1);
882 if (IS_ERR(umem))
883 goto rqfail;
884 qp->rumem = umem;
885 qplib_qp->rq.sglist = umem->sg_head.sgl;
886 qplib_qp->rq.nmap = umem->nmap;
887 }
888
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700889 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800890 return 0;
891rqfail:
892 ib_umem_release(qp->sumem);
893 qp->sumem = NULL;
894 qplib_qp->sq.sglist = NULL;
895 qplib_qp->sq.nmap = 0;
896
897 return PTR_ERR(umem);
898}
899
900static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
901 (struct bnxt_re_pd *pd,
902 struct bnxt_qplib_res *qp1_res,
903 struct bnxt_qplib_qp *qp1_qp)
904{
905 struct bnxt_re_dev *rdev = pd->rdev;
906 struct bnxt_re_ah *ah;
907 union ib_gid sgid;
908 int rc;
909
910 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
911 if (!ah)
912 return NULL;
913
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800914 ah->rdev = rdev;
915 ah->qplib_ah.pd = &pd->qplib_pd;
916
917 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
918 if (rc)
919 goto fail;
920
921 /* supply the dgid data same as sgid */
922 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
923 sizeof(union ib_gid));
924 ah->qplib_ah.sgid_index = 0;
925
926 ah->qplib_ah.traffic_class = 0;
927 ah->qplib_ah.flow_label = 0;
928 ah->qplib_ah.hop_limit = 1;
929 ah->qplib_ah.sl = 0;
930 /* Have DMAC same as SMAC */
931 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
932
933 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
934 if (rc) {
935 dev_err(rdev_to_dev(rdev),
936 "Failed to allocate HW AH for Shadow QP");
937 goto fail;
938 }
939
940 return ah;
941
942fail:
943 kfree(ah);
944 return NULL;
945}
946
947static struct bnxt_re_qp *bnxt_re_create_shadow_qp
948 (struct bnxt_re_pd *pd,
949 struct bnxt_qplib_res *qp1_res,
950 struct bnxt_qplib_qp *qp1_qp)
951{
952 struct bnxt_re_dev *rdev = pd->rdev;
953 struct bnxt_re_qp *qp;
954 int rc;
955
956 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
957 if (!qp)
958 return NULL;
959
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800960 qp->rdev = rdev;
961
962 /* Initialize the shadow QP structure from the QP1 values */
963 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
964
965 qp->qplib_qp.pd = &pd->qplib_pd;
966 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
967 qp->qplib_qp.type = IB_QPT_UD;
968
969 qp->qplib_qp.max_inline_data = 0;
970 qp->qplib_qp.sig_type = true;
971
972 /* Shadow QP SQ depth should be same as QP1 RQ depth */
973 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
974 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700975 /* Q full delta can be 1 since it is internal QP */
976 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800977
978 qp->qplib_qp.scq = qp1_qp->scq;
979 qp->qplib_qp.rcq = qp1_qp->rcq;
980
981 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
982 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700983 /* Q full delta can be 1 since it is internal QP */
984 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800985
986 qp->qplib_qp.mtu = qp1_qp->mtu;
987
988 qp->qplib_qp.sq_hdr_buf_size = 0;
989 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
990 qp->qplib_qp.dpi = &rdev->dpi_privileged;
991
992 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
993 if (rc)
994 goto fail;
995
996 rdev->sqp_id = qp->qplib_qp.id;
997
998 spin_lock_init(&qp->sq_lock);
999 INIT_LIST_HEAD(&qp->list);
1000 mutex_lock(&rdev->qp_lock);
1001 list_add_tail(&qp->list, &rdev->qp_list);
1002 atomic_inc(&rdev->qp_count);
1003 mutex_unlock(&rdev->qp_lock);
1004 return qp;
1005fail:
1006 kfree(qp);
1007 return NULL;
1008}
1009
1010struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1011 struct ib_qp_init_attr *qp_init_attr,
1012 struct ib_udata *udata)
1013{
1014 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1015 struct bnxt_re_dev *rdev = pd->rdev;
1016 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1017 struct bnxt_re_qp *qp;
1018 struct bnxt_re_cq *cq;
1019 int rc, entries;
1020
1021 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1022 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1023 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1024 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1025 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1026 return ERR_PTR(-EINVAL);
1027
1028 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1029 if (!qp)
1030 return ERR_PTR(-ENOMEM);
1031
1032 qp->rdev = rdev;
1033 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1034 qp->qplib_qp.pd = &pd->qplib_pd;
1035 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1036 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1037 if (qp->qplib_qp.type == IB_QPT_MAX) {
1038 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1039 qp->qplib_qp.type);
1040 rc = -EINVAL;
1041 goto fail;
1042 }
1043 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1044 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1045 IB_SIGNAL_ALL_WR) ? true : false);
1046
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001047 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1048 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1049 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1050
1051 if (qp_init_attr->send_cq) {
1052 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1053 ib_cq);
1054 if (!cq) {
1055 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1056 rc = -EINVAL;
1057 goto fail;
1058 }
1059 qp->qplib_qp.scq = &cq->qplib_cq;
1060 }
1061
1062 if (qp_init_attr->recv_cq) {
1063 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1064 ib_cq);
1065 if (!cq) {
1066 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1067 rc = -EINVAL;
1068 goto fail;
1069 }
1070 qp->qplib_qp.rcq = &cq->qplib_cq;
1071 }
1072
1073 if (qp_init_attr->srq) {
1074 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1075 rc = -ENOTSUPP;
1076 goto fail;
1077 } else {
1078 /* Allocate 1 more than what's provided so posting max doesn't
1079 * mean empty
1080 */
1081 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1082 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1083 dev_attr->max_qp_wqes + 1);
1084
Eddie Wai9152e0b2017-06-14 03:26:23 -07001085 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1086 qp_init_attr->cap.max_recv_wr;
1087
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001088 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1089 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1090 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1091 }
1092
1093 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1094
1095 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001096 /* Allocate 1 more than what's provided */
1097 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1098 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1099 dev_attr->max_qp_wqes + 1);
1100 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1101 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001102 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1103 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1104 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1105 qp->qplib_qp.sq.max_sge++;
1106 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1107 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1108
1109 qp->qplib_qp.rq_hdr_buf_size =
1110 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1111
1112 qp->qplib_qp.sq_hdr_buf_size =
1113 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1114 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1115 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1116 if (rc) {
1117 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1118 goto fail;
1119 }
1120 /* Create a shadow QP to handle the QP1 traffic */
1121 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1122 &qp->qplib_qp);
1123 if (!rdev->qp1_sqp) {
1124 rc = -EINVAL;
1125 dev_err(rdev_to_dev(rdev),
1126 "Failed to create Shadow QP for QP1");
1127 goto qp_destroy;
1128 }
1129 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1130 &qp->qplib_qp);
1131 if (!rdev->sqp_ah) {
1132 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1133 &rdev->qp1_sqp->qplib_qp);
1134 rc = -EINVAL;
1135 dev_err(rdev_to_dev(rdev),
1136 "Failed to create AH entry for ShadowQP");
1137 goto qp_destroy;
1138 }
1139
1140 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001141 /* Allocate 128 + 1 more than what's provided */
1142 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1143 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1144 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1145 dev_attr->max_qp_wqes +
1146 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1147 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1148
1149 /*
1150 * Reserving one slot for Phantom WQE. Application can
1151 * post one extra entry in this case. But allowing this to avoid
1152 * unexpected Queue full condition
1153 */
1154
1155 qp->qplib_qp.sq.q_full_delta -= 1;
1156
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001157 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1158 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1159 if (udata) {
1160 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1161 if (rc)
1162 goto fail;
1163 } else {
1164 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1165 }
1166
1167 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1168 if (rc) {
1169 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1170 goto fail;
1171 }
1172 }
1173
1174 qp->ib_qp.qp_num = qp->qplib_qp.id;
1175 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001176 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001177
1178 if (udata) {
1179 struct bnxt_re_qp_resp resp;
1180
1181 resp.qpid = qp->ib_qp.qp_num;
1182 resp.rsvd = 0;
1183 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1184 if (rc) {
1185 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1186 goto qp_destroy;
1187 }
1188 }
1189 INIT_LIST_HEAD(&qp->list);
1190 mutex_lock(&rdev->qp_lock);
1191 list_add_tail(&qp->list, &rdev->qp_list);
1192 atomic_inc(&rdev->qp_count);
1193 mutex_unlock(&rdev->qp_lock);
1194
1195 return &qp->ib_qp;
1196qp_destroy:
1197 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1198fail:
1199 kfree(qp);
1200 return ERR_PTR(rc);
1201}
1202
1203static u8 __from_ib_qp_state(enum ib_qp_state state)
1204{
1205 switch (state) {
1206 case IB_QPS_RESET:
1207 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1208 case IB_QPS_INIT:
1209 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1210 case IB_QPS_RTR:
1211 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1212 case IB_QPS_RTS:
1213 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1214 case IB_QPS_SQD:
1215 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1216 case IB_QPS_SQE:
1217 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1218 case IB_QPS_ERR:
1219 default:
1220 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1221 }
1222}
1223
1224static enum ib_qp_state __to_ib_qp_state(u8 state)
1225{
1226 switch (state) {
1227 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1228 return IB_QPS_RESET;
1229 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1230 return IB_QPS_INIT;
1231 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1232 return IB_QPS_RTR;
1233 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1234 return IB_QPS_RTS;
1235 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1236 return IB_QPS_SQD;
1237 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1238 return IB_QPS_SQE;
1239 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1240 default:
1241 return IB_QPS_ERR;
1242 }
1243}
1244
1245static u32 __from_ib_mtu(enum ib_mtu mtu)
1246{
1247 switch (mtu) {
1248 case IB_MTU_256:
1249 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1250 case IB_MTU_512:
1251 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1252 case IB_MTU_1024:
1253 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1254 case IB_MTU_2048:
1255 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1256 case IB_MTU_4096:
1257 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1258 default:
1259 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1260 }
1261}
1262
1263static enum ib_mtu __to_ib_mtu(u32 mtu)
1264{
1265 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1266 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1267 return IB_MTU_256;
1268 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1269 return IB_MTU_512;
1270 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1271 return IB_MTU_1024;
1272 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1273 return IB_MTU_2048;
1274 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1275 return IB_MTU_4096;
1276 default:
1277 return IB_MTU_2048;
1278 }
1279}
1280
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001281static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1282 struct bnxt_re_qp *qp1_qp,
1283 int qp_attr_mask)
1284{
1285 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1286 int rc = 0;
1287
1288 if (qp_attr_mask & IB_QP_STATE) {
1289 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1290 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1291 }
1292 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1293 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1294 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1295 }
1296
1297 if (qp_attr_mask & IB_QP_QKEY) {
1298 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1299 /* Using a Random QKEY */
1300 qp->qplib_qp.qkey = 0x81818181;
1301 }
1302 if (qp_attr_mask & IB_QP_SQ_PSN) {
1303 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1304 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1305 }
1306
1307 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1308 if (rc)
1309 dev_err(rdev_to_dev(rdev),
1310 "Failed to modify Shadow QP for QP1");
1311 return rc;
1312}
1313
1314int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1315 int qp_attr_mask, struct ib_udata *udata)
1316{
1317 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1318 struct bnxt_re_dev *rdev = qp->rdev;
1319 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1320 enum ib_qp_state curr_qp_state, new_qp_state;
1321 int rc, entries;
1322 int status;
1323 union ib_gid sgid;
1324 struct ib_gid_attr sgid_attr;
1325 u8 nw_type;
1326
1327 qp->qplib_qp.modify_flags = 0;
1328 if (qp_attr_mask & IB_QP_STATE) {
1329 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1330 new_qp_state = qp_attr->qp_state;
1331 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1332 ib_qp->qp_type, qp_attr_mask,
1333 IB_LINK_LAYER_ETHERNET)) {
1334 dev_err(rdev_to_dev(rdev),
1335 "Invalid attribute mask: %#x specified ",
1336 qp_attr_mask);
1337 dev_err(rdev_to_dev(rdev),
1338 "for qpn: %#x type: %#x",
1339 ib_qp->qp_num, ib_qp->qp_type);
1340 dev_err(rdev_to_dev(rdev),
1341 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1342 curr_qp_state, new_qp_state);
1343 return -EINVAL;
1344 }
1345 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1346 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
Selvin Xavierf218d672017-06-29 12:28:15 -07001347
1348 if (!qp->sumem &&
1349 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1350 dev_dbg(rdev_to_dev(rdev),
1351 "Move QP = %p to flush list\n",
1352 qp);
1353 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1354 }
1355 if (!qp->sumem &&
1356 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1357 dev_dbg(rdev_to_dev(rdev),
1358 "Move QP = %p out of flush list\n",
1359 qp);
1360 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1361 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001362 }
1363 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1364 qp->qplib_qp.modify_flags |=
1365 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1366 qp->qplib_qp.en_sqd_async_notify = true;
1367 }
1368 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1369 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1370 qp->qplib_qp.access =
1371 __from_ib_access_flags(qp_attr->qp_access_flags);
1372 /* LOCAL_WRITE access must be set to allow RC receive */
1373 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1374 }
1375 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1376 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1377 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1378 }
1379 if (qp_attr_mask & IB_QP_QKEY) {
1380 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1381 qp->qplib_qp.qkey = qp_attr->qkey;
1382 }
1383 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001384 const struct ib_global_route *grh =
1385 rdma_ah_read_grh(&qp_attr->ah_attr);
1386
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001387 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1388 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1389 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1390 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1391 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1392 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1393 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001394 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001395 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001396 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001397 /* If RoCE V2 is enabled, stack will have two entries for
1398 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1399 * the GID index by 2 for RoCE V2
1400 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001401 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1402 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1403 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1404 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1405 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001406 ether_addr_copy(qp->qplib_qp.ah.dmac,
1407 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001408
1409 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001410 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001411 &sgid, &sgid_attr);
1412 if (!status && sgid_attr.ndev) {
1413 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1414 ETH_ALEN);
1415 dev_put(sgid_attr.ndev);
1416 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1417 &sgid);
1418 switch (nw_type) {
1419 case RDMA_NETWORK_IPV4:
1420 qp->qplib_qp.nw_type =
1421 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1422 break;
1423 case RDMA_NETWORK_IPV6:
1424 qp->qplib_qp.nw_type =
1425 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1426 break;
1427 default:
1428 qp->qplib_qp.nw_type =
1429 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1430 break;
1431 }
1432 }
1433 }
1434
1435 if (qp_attr_mask & IB_QP_PATH_MTU) {
1436 qp->qplib_qp.modify_flags |=
1437 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1438 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301439 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001440 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1441 qp->qplib_qp.modify_flags |=
1442 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1443 qp->qplib_qp.path_mtu =
1444 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301445 qp->qplib_qp.mtu =
1446 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001447 }
1448
1449 if (qp_attr_mask & IB_QP_TIMEOUT) {
1450 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1451 qp->qplib_qp.timeout = qp_attr->timeout;
1452 }
1453 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1454 qp->qplib_qp.modify_flags |=
1455 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1456 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1457 }
1458 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1459 qp->qplib_qp.modify_flags |=
1460 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1461 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1462 }
1463 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1464 qp->qplib_qp.modify_flags |=
1465 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1466 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1467 }
1468 if (qp_attr_mask & IB_QP_RQ_PSN) {
1469 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1470 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1471 }
1472 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1473 qp->qplib_qp.modify_flags |=
1474 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001475 /* Cap the max_rd_atomic to device max */
1476 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1477 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001478 }
1479 if (qp_attr_mask & IB_QP_SQ_PSN) {
1480 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1481 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1482 }
1483 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001484 if (qp_attr->max_dest_rd_atomic >
1485 dev_attr->max_qp_init_rd_atom) {
1486 dev_err(rdev_to_dev(rdev),
1487 "max_dest_rd_atomic requested%d is > dev_max%d",
1488 qp_attr->max_dest_rd_atomic,
1489 dev_attr->max_qp_init_rd_atom);
1490 return -EINVAL;
1491 }
1492
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001493 qp->qplib_qp.modify_flags |=
1494 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1495 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1496 }
1497 if (qp_attr_mask & IB_QP_CAP) {
1498 qp->qplib_qp.modify_flags |=
1499 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1500 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1501 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1502 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1503 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1504 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1505 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1506 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1507 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1508 (qp_attr->cap.max_inline_data >=
1509 dev_attr->max_inline_data)) {
1510 dev_err(rdev_to_dev(rdev),
1511 "Create QP failed - max exceeded");
1512 return -EINVAL;
1513 }
1514 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1515 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1516 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001517 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1518 qp_attr->cap.max_send_wr;
1519 /*
1520 * Reserving one slot for Phantom WQE. Some application can
1521 * post one extra entry in this case. Allowing this to avoid
1522 * unexpected Queue full condition
1523 */
1524 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001525 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1526 if (qp->qplib_qp.rq.max_wqe) {
1527 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1528 qp->qplib_qp.rq.max_wqe =
1529 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001530 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1531 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001532 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1533 } else {
1534 /* SRQ was used prior, just ignore the RQ caps */
1535 }
1536 }
1537 if (qp_attr_mask & IB_QP_DEST_QPN) {
1538 qp->qplib_qp.modify_flags |=
1539 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1540 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1541 }
1542 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1543 if (rc) {
1544 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1545 return rc;
1546 }
1547 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1548 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1549 return rc;
1550}
1551
1552int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1553 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1554{
1555 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1556 struct bnxt_re_dev *rdev = qp->rdev;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001557 struct bnxt_qplib_qp *qplib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001558 int rc;
1559
Leon Romanovskye13547b2017-09-19 13:22:13 +03001560 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1561 if (!qplib_qp)
1562 return -ENOMEM;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001563
Leon Romanovskye13547b2017-09-19 13:22:13 +03001564 qplib_qp->id = qp->qplib_qp.id;
1565 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1566
1567 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001568 if (rc) {
1569 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
Leon Romanovskye13547b2017-09-19 13:22:13 +03001570 goto out;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001571 }
Leon Romanovskye13547b2017-09-19 13:22:13 +03001572 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1573 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1574 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1575 qp_attr->pkey_index = qplib_qp->pkey_index;
1576 qp_attr->qkey = qplib_qp->qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001577 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001578 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1579 qplib_qp->ah.host_sgid_index,
1580 qplib_qp->ah.hop_limit,
1581 qplib_qp->ah.traffic_class);
1582 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1583 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1584 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1585 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1586 qp_attr->timeout = qplib_qp->timeout;
1587 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1588 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1589 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1590 qp_attr->rq_psn = qplib_qp->rq.psn;
1591 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1592 qp_attr->sq_psn = qplib_qp->sq.psn;
1593 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1594 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1595 IB_SIGNAL_REQ_WR;
1596 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001597
1598 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1599 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1600 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1601 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1602 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1603 qp_init_attr->cap = qp_attr->cap;
1604
Leon Romanovskye13547b2017-09-19 13:22:13 +03001605out:
1606 kfree(qplib_qp);
1607 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001608}
1609
1610/* Routine for sending QP1 packets for RoCE V1 an V2
1611 */
1612static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1613 struct ib_send_wr *wr,
1614 struct bnxt_qplib_swqe *wqe,
1615 int payload_size)
1616{
1617 struct ib_device *ibdev = &qp->rdev->ibdev;
1618 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1619 ib_ah);
1620 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1621 struct bnxt_qplib_sge sge;
1622 union ib_gid sgid;
1623 u8 nw_type;
1624 u16 ether_type;
1625 struct ib_gid_attr sgid_attr;
1626 union ib_gid dgid;
1627 bool is_eth = false;
1628 bool is_vlan = false;
1629 bool is_grh = false;
1630 bool is_udp = false;
1631 u8 ip_version = 0;
1632 u16 vlan_id = 0xFFFF;
1633 void *buf;
1634 int i, rc = 0, size;
1635
1636 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1637
1638 rc = ib_get_cached_gid(ibdev, 1,
1639 qplib_ah->host_sgid_index, &sgid,
1640 &sgid_attr);
1641 if (rc) {
1642 dev_err(rdev_to_dev(qp->rdev),
1643 "Failed to query gid at index %d",
1644 qplib_ah->host_sgid_index);
1645 return rc;
1646 }
1647 if (sgid_attr.ndev) {
1648 if (is_vlan_dev(sgid_attr.ndev))
1649 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1650 dev_put(sgid_attr.ndev);
1651 }
1652 /* Get network header type for this GID */
1653 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1654 switch (nw_type) {
1655 case RDMA_NETWORK_IPV4:
1656 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1657 break;
1658 case RDMA_NETWORK_IPV6:
1659 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1660 break;
1661 default:
1662 nw_type = BNXT_RE_ROCE_V1_PACKET;
1663 break;
1664 }
1665 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1666 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1667 if (is_udp) {
1668 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1669 ip_version = 4;
1670 ether_type = ETH_P_IP;
1671 } else {
1672 ip_version = 6;
1673 ether_type = ETH_P_IPV6;
1674 }
1675 is_grh = false;
1676 } else {
1677 ether_type = ETH_P_IBOE;
1678 is_grh = true;
1679 }
1680
1681 is_eth = true;
1682 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1683
1684 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1685 ip_version, is_udp, 0, &qp->qp1_hdr);
1686
1687 /* ETH */
1688 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1689 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1690
1691 /* For vlan, check the sgid for vlan existence */
1692
1693 if (!is_vlan) {
1694 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1695 } else {
1696 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1697 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1698 }
1699
1700 if (is_grh || (ip_version == 6)) {
1701 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1702 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1703 sizeof(sgid));
1704 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1705 }
1706
1707 if (ip_version == 4) {
1708 qp->qp1_hdr.ip4.tos = 0;
1709 qp->qp1_hdr.ip4.id = 0;
1710 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1711 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1712
1713 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1714 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1715 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1716 }
1717
1718 if (is_udp) {
1719 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1720 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1721 qp->qp1_hdr.udp.csum = 0;
1722 }
1723
1724 /* BTH */
1725 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1726 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1727 qp->qp1_hdr.immediate_present = 1;
1728 } else {
1729 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1730 }
1731 if (wr->send_flags & IB_SEND_SOLICITED)
1732 qp->qp1_hdr.bth.solicited_event = 1;
1733 /* pad_count */
1734 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1735
1736 /* P_key for QP1 is for all members */
1737 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1738 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1739 qp->qp1_hdr.bth.ack_req = 0;
1740 qp->send_psn++;
1741 qp->send_psn &= BTH_PSN_MASK;
1742 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1743 /* DETH */
1744 /* Use the priviledged Q_Key for QP1 */
1745 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1746 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1747
1748 /* Pack the QP1 to the transmit buffer */
1749 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1750 if (buf) {
1751 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1752 for (i = wqe->num_sge; i; i--) {
1753 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1754 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1755 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1756 }
1757
1758 /*
1759 * Max Header buf size for IPV6 RoCE V2 is 86,
1760 * which is same as the QP1 SQ header buffer.
1761 * Header buf size for IPV4 RoCE V2 can be 66.
1762 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1763 * Subtract 20 bytes from QP1 SQ header buf size
1764 */
1765 if (is_udp && ip_version == 4)
1766 sge.size -= 20;
1767 /*
1768 * Max Header buf size for RoCE V1 is 78.
1769 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1770 * Subtract 8 bytes from QP1 SQ header buf size
1771 */
1772 if (!is_udp)
1773 sge.size -= 8;
1774
1775 /* Subtract 4 bytes for non vlan packets */
1776 if (!is_vlan)
1777 sge.size -= 4;
1778
1779 wqe->sg_list[0].addr = sge.addr;
1780 wqe->sg_list[0].lkey = sge.lkey;
1781 wqe->sg_list[0].size = sge.size;
1782 wqe->num_sge++;
1783
1784 } else {
1785 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1786 rc = -ENOMEM;
1787 }
1788 return rc;
1789}
1790
1791/* For the MAD layer, it only provides the recv SGE the size of
1792 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1793 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1794 * receive packet (334 bytes) with no VLAN and then copy the GRH
1795 * and the MAD datagram out to the provided SGE.
1796 */
1797static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1798 struct ib_recv_wr *wr,
1799 struct bnxt_qplib_swqe *wqe,
1800 int payload_size)
1801{
1802 struct bnxt_qplib_sge ref, sge;
1803 u32 rq_prod_index;
1804 struct bnxt_re_sqp_entries *sqp_entry;
1805
1806 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1807
1808 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1809 return -ENOMEM;
1810
1811 /* Create 1 SGE to receive the entire
1812 * ethernet packet
1813 */
1814 /* Save the reference from ULP */
1815 ref.addr = wqe->sg_list[0].addr;
1816 ref.lkey = wqe->sg_list[0].lkey;
1817 ref.size = wqe->sg_list[0].size;
1818
1819 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1820
1821 /* SGE 1 */
1822 wqe->sg_list[0].addr = sge.addr;
1823 wqe->sg_list[0].lkey = sge.lkey;
1824 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1825 sge.size -= wqe->sg_list[0].size;
1826
1827 sqp_entry->sge.addr = ref.addr;
1828 sqp_entry->sge.lkey = ref.lkey;
1829 sqp_entry->sge.size = ref.size;
1830 /* Store the wrid for reporting completion */
1831 sqp_entry->wrid = wqe->wr_id;
1832 /* change the wqe->wrid to table index */
1833 wqe->wr_id = rq_prod_index;
1834 return 0;
1835}
1836
1837static int is_ud_qp(struct bnxt_re_qp *qp)
1838{
1839 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1840}
1841
1842static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1843 struct ib_send_wr *wr,
1844 struct bnxt_qplib_swqe *wqe)
1845{
1846 struct bnxt_re_ah *ah = NULL;
1847
1848 if (is_ud_qp(qp)) {
1849 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1850 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1851 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1852 wqe->send.avid = ah->qplib_ah.id;
1853 }
1854 switch (wr->opcode) {
1855 case IB_WR_SEND:
1856 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1857 break;
1858 case IB_WR_SEND_WITH_IMM:
1859 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1860 wqe->send.imm_data = wr->ex.imm_data;
1861 break;
1862 case IB_WR_SEND_WITH_INV:
1863 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1864 wqe->send.inv_key = wr->ex.invalidate_rkey;
1865 break;
1866 default:
1867 return -EINVAL;
1868 }
1869 if (wr->send_flags & IB_SEND_SIGNALED)
1870 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1871 if (wr->send_flags & IB_SEND_FENCE)
1872 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1873 if (wr->send_flags & IB_SEND_SOLICITED)
1874 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1875 if (wr->send_flags & IB_SEND_INLINE)
1876 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1877
1878 return 0;
1879}
1880
1881static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1882 struct bnxt_qplib_swqe *wqe)
1883{
1884 switch (wr->opcode) {
1885 case IB_WR_RDMA_WRITE:
1886 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1887 break;
1888 case IB_WR_RDMA_WRITE_WITH_IMM:
1889 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1890 wqe->rdma.imm_data = wr->ex.imm_data;
1891 break;
1892 case IB_WR_RDMA_READ:
1893 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1894 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1895 break;
1896 default:
1897 return -EINVAL;
1898 }
1899 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1900 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1901 if (wr->send_flags & IB_SEND_SIGNALED)
1902 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1903 if (wr->send_flags & IB_SEND_FENCE)
1904 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1905 if (wr->send_flags & IB_SEND_SOLICITED)
1906 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1907 if (wr->send_flags & IB_SEND_INLINE)
1908 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1909
1910 return 0;
1911}
1912
1913static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1914 struct bnxt_qplib_swqe *wqe)
1915{
1916 switch (wr->opcode) {
1917 case IB_WR_ATOMIC_CMP_AND_SWP:
1918 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
Devesh Sharma55311d02017-08-31 09:27:30 +05301919 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001920 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1921 break;
1922 case IB_WR_ATOMIC_FETCH_AND_ADD:
1923 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1924 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1925 break;
1926 default:
1927 return -EINVAL;
1928 }
1929 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1930 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1931 if (wr->send_flags & IB_SEND_SIGNALED)
1932 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1933 if (wr->send_flags & IB_SEND_FENCE)
1934 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1935 if (wr->send_flags & IB_SEND_SOLICITED)
1936 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1937 return 0;
1938}
1939
1940static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1941 struct bnxt_qplib_swqe *wqe)
1942{
1943 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1944 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1945
1946 if (wr->send_flags & IB_SEND_SIGNALED)
1947 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1948 if (wr->send_flags & IB_SEND_FENCE)
1949 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1950 if (wr->send_flags & IB_SEND_SOLICITED)
1951 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1952
1953 return 0;
1954}
1955
1956static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1957 struct bnxt_qplib_swqe *wqe)
1958{
1959 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1960 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1961 int access = wr->access;
1962
1963 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1964 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1965 wqe->frmr.page_list = mr->pages;
1966 wqe->frmr.page_list_len = mr->npages;
1967 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1968 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1969
1970 if (wr->wr.send_flags & IB_SEND_FENCE)
1971 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1972 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1973 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1974
1975 if (access & IB_ACCESS_LOCAL_WRITE)
1976 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1977 if (access & IB_ACCESS_REMOTE_READ)
1978 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1979 if (access & IB_ACCESS_REMOTE_WRITE)
1980 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1981 if (access & IB_ACCESS_REMOTE_ATOMIC)
1982 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1983 if (access & IB_ACCESS_MW_BIND)
1984 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1985
1986 wqe->frmr.l_key = wr->key;
1987 wqe->frmr.length = wr->mr->length;
1988 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1989 wqe->frmr.va = wr->mr->iova;
1990 return 0;
1991}
1992
1993static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1994 struct ib_send_wr *wr,
1995 struct bnxt_qplib_swqe *wqe)
1996{
1997 /* Copy the inline data to the data field */
1998 u8 *in_data;
1999 u32 i, sge_len;
2000 void *sge_addr;
2001
2002 in_data = wqe->inline_data;
2003 for (i = 0; i < wr->num_sge; i++) {
2004 sge_addr = (void *)(unsigned long)
2005 wr->sg_list[i].addr;
2006 sge_len = wr->sg_list[i].length;
2007
2008 if ((sge_len + wqe->inline_len) >
2009 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2010 dev_err(rdev_to_dev(rdev),
2011 "Inline data size requested > supported value");
2012 return -EINVAL;
2013 }
2014 sge_len = wr->sg_list[i].length;
2015
2016 memcpy(in_data, sge_addr, sge_len);
2017 in_data += wr->sg_list[i].length;
2018 wqe->inline_len += wr->sg_list[i].length;
2019 }
2020 return wqe->inline_len;
2021}
2022
2023static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2024 struct ib_send_wr *wr,
2025 struct bnxt_qplib_swqe *wqe)
2026{
2027 int payload_sz = 0;
2028
2029 if (wr->send_flags & IB_SEND_INLINE)
2030 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2031 else
2032 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2033 wqe->num_sge);
2034
2035 return payload_sz;
2036}
2037
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002038static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2039{
2040 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2041 qp->ib_qp.qp_type == IB_QPT_GSI ||
2042 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2043 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2044 int qp_attr_mask;
2045 struct ib_qp_attr qp_attr;
2046
2047 qp_attr_mask = IB_QP_STATE;
2048 qp_attr.qp_state = IB_QPS_RTS;
2049 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2050 qp->qplib_qp.wqe_cnt = 0;
2051 }
2052}
2053
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002054static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2055 struct bnxt_re_qp *qp,
2056 struct ib_send_wr *wr)
2057{
2058 struct bnxt_qplib_swqe wqe;
2059 int rc = 0, payload_sz = 0;
2060 unsigned long flags;
2061
2062 spin_lock_irqsave(&qp->sq_lock, flags);
2063 memset(&wqe, 0, sizeof(wqe));
2064 while (wr) {
2065 /* House keeping */
2066 memset(&wqe, 0, sizeof(wqe));
2067
2068 /* Common */
2069 wqe.num_sge = wr->num_sge;
2070 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2071 dev_err(rdev_to_dev(rdev),
2072 "Limit exceeded for Send SGEs");
2073 rc = -EINVAL;
2074 goto bad;
2075 }
2076
2077 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2078 if (payload_sz < 0) {
2079 rc = -EINVAL;
2080 goto bad;
2081 }
2082 wqe.wr_id = wr->wr_id;
2083
2084 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2085
2086 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2087 if (!rc)
2088 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2089bad:
2090 if (rc) {
2091 dev_err(rdev_to_dev(rdev),
2092 "Post send failed opcode = %#x rc = %d",
2093 wr->opcode, rc);
2094 break;
2095 }
2096 wr = wr->next;
2097 }
2098 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002099 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002100 spin_unlock_irqrestore(&qp->sq_lock, flags);
2101 return rc;
2102}
2103
2104int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2105 struct ib_send_wr **bad_wr)
2106{
2107 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2108 struct bnxt_qplib_swqe wqe;
2109 int rc = 0, payload_sz = 0;
2110 unsigned long flags;
2111
2112 spin_lock_irqsave(&qp->sq_lock, flags);
2113 while (wr) {
2114 /* House keeping */
2115 memset(&wqe, 0, sizeof(wqe));
2116
2117 /* Common */
2118 wqe.num_sge = wr->num_sge;
2119 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2120 dev_err(rdev_to_dev(qp->rdev),
2121 "Limit exceeded for Send SGEs");
2122 rc = -EINVAL;
2123 goto bad;
2124 }
2125
2126 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2127 if (payload_sz < 0) {
2128 rc = -EINVAL;
2129 goto bad;
2130 }
2131 wqe.wr_id = wr->wr_id;
2132
2133 switch (wr->opcode) {
2134 case IB_WR_SEND:
2135 case IB_WR_SEND_WITH_IMM:
2136 if (ib_qp->qp_type == IB_QPT_GSI) {
2137 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2138 payload_sz);
2139 if (rc)
2140 goto bad;
2141 wqe.rawqp1.lflags |=
2142 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2143 }
2144 switch (wr->send_flags) {
2145 case IB_SEND_IP_CSUM:
2146 wqe.rawqp1.lflags |=
2147 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2148 break;
2149 default:
2150 break;
2151 }
2152 /* Fall thru to build the wqe */
2153 case IB_WR_SEND_WITH_INV:
2154 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2155 break;
2156 case IB_WR_RDMA_WRITE:
2157 case IB_WR_RDMA_WRITE_WITH_IMM:
2158 case IB_WR_RDMA_READ:
2159 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2160 break;
2161 case IB_WR_ATOMIC_CMP_AND_SWP:
2162 case IB_WR_ATOMIC_FETCH_AND_ADD:
2163 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2164 break;
2165 case IB_WR_RDMA_READ_WITH_INV:
2166 dev_err(rdev_to_dev(qp->rdev),
2167 "RDMA Read with Invalidate is not supported");
2168 rc = -EINVAL;
2169 goto bad;
2170 case IB_WR_LOCAL_INV:
2171 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2172 break;
2173 case IB_WR_REG_MR:
2174 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2175 break;
2176 default:
2177 /* Unsupported WRs */
2178 dev_err(rdev_to_dev(qp->rdev),
2179 "WR (%#x) is not supported", wr->opcode);
2180 rc = -EINVAL;
2181 goto bad;
2182 }
2183 if (!rc)
2184 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2185bad:
2186 if (rc) {
2187 dev_err(rdev_to_dev(qp->rdev),
2188 "post_send failed op:%#x qps = %#x rc = %d\n",
2189 wr->opcode, qp->qplib_qp.state, rc);
2190 *bad_wr = wr;
2191 break;
2192 }
2193 wr = wr->next;
2194 }
2195 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002196 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002197 spin_unlock_irqrestore(&qp->sq_lock, flags);
2198
2199 return rc;
2200}
2201
2202static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2203 struct bnxt_re_qp *qp,
2204 struct ib_recv_wr *wr)
2205{
2206 struct bnxt_qplib_swqe wqe;
2207 int rc = 0, payload_sz = 0;
2208
2209 memset(&wqe, 0, sizeof(wqe));
2210 while (wr) {
2211 /* House keeping */
2212 memset(&wqe, 0, sizeof(wqe));
2213
2214 /* Common */
2215 wqe.num_sge = wr->num_sge;
2216 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2217 dev_err(rdev_to_dev(rdev),
2218 "Limit exceeded for Receive SGEs");
2219 rc = -EINVAL;
2220 break;
2221 }
2222 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2223 wr->num_sge);
2224 wqe.wr_id = wr->wr_id;
2225 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2226
2227 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2228 if (rc)
2229 break;
2230
2231 wr = wr->next;
2232 }
2233 if (!rc)
2234 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2235 return rc;
2236}
2237
2238int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2239 struct ib_recv_wr **bad_wr)
2240{
2241 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2242 struct bnxt_qplib_swqe wqe;
2243 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002244 unsigned long flags;
2245 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002246
Devesh Sharma018cf592017-05-22 03:15:40 -07002247 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002248 while (wr) {
2249 /* House keeping */
2250 memset(&wqe, 0, sizeof(wqe));
2251
2252 /* Common */
2253 wqe.num_sge = wr->num_sge;
2254 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2255 dev_err(rdev_to_dev(qp->rdev),
2256 "Limit exceeded for Receive SGEs");
2257 rc = -EINVAL;
2258 *bad_wr = wr;
2259 break;
2260 }
2261
2262 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2263 wr->num_sge);
2264 wqe.wr_id = wr->wr_id;
2265 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2266
2267 if (ib_qp->qp_type == IB_QPT_GSI)
2268 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2269 payload_sz);
2270 if (!rc)
2271 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2272 if (rc) {
2273 *bad_wr = wr;
2274 break;
2275 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002276
2277 /* Ring DB if the RQEs posted reaches a threshold value */
2278 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2279 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2280 count = 0;
2281 }
2282
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002283 wr = wr->next;
2284 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002285
2286 if (count)
2287 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2288
2289 spin_unlock_irqrestore(&qp->rq_lock, flags);
2290
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002291 return rc;
2292}
2293
2294/* Completion Queues */
2295int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2296{
2297 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2298 struct bnxt_re_dev *rdev = cq->rdev;
2299 int rc;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002300 struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002301
2302 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2303 if (rc) {
2304 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2305 return rc;
2306 }
Doug Ledford374cb862017-04-25 14:00:59 -04002307 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002308 ib_umem_release(cq->umem);
2309
2310 if (cq) {
2311 kfree(cq->cql);
2312 kfree(cq);
2313 }
2314 atomic_dec(&rdev->cq_count);
Selvin Xavier6a5df912017-08-02 01:46:18 -07002315 nq->budget--;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002316 return 0;
2317}
2318
2319struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2320 const struct ib_cq_init_attr *attr,
2321 struct ib_ucontext *context,
2322 struct ib_udata *udata)
2323{
2324 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2325 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2326 struct bnxt_re_cq *cq = NULL;
2327 int rc, entries;
2328 int cqe = attr->cqe;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002329 struct bnxt_qplib_nq *nq = NULL;
2330 unsigned int nq_alloc_cnt;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002331
2332 /* Validate CQ fields */
2333 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2334 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2335 return ERR_PTR(-EINVAL);
2336 }
2337 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2338 if (!cq)
2339 return ERR_PTR(-ENOMEM);
2340
2341 cq->rdev = rdev;
2342 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2343
2344 entries = roundup_pow_of_two(cqe + 1);
2345 if (entries > dev_attr->max_cq_wqes + 1)
2346 entries = dev_attr->max_cq_wqes + 1;
2347
2348 if (context) {
2349 struct bnxt_re_cq_req req;
2350 struct bnxt_re_ucontext *uctx = container_of
2351 (context,
2352 struct bnxt_re_ucontext,
2353 ib_uctx);
2354 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2355 rc = -EFAULT;
2356 goto fail;
2357 }
2358
2359 cq->umem = ib_umem_get(context, req.cq_va,
2360 entries * sizeof(struct cq_base),
2361 IB_ACCESS_LOCAL_WRITE, 1);
2362 if (IS_ERR(cq->umem)) {
2363 rc = PTR_ERR(cq->umem);
2364 goto fail;
2365 }
2366 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2367 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002368 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002369 } else {
2370 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2371 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2372 GFP_KERNEL);
2373 if (!cq->cql) {
2374 rc = -ENOMEM;
2375 goto fail;
2376 }
2377
2378 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2379 cq->qplib_cq.sghead = NULL;
2380 cq->qplib_cq.nmap = 0;
2381 }
Selvin Xavier6a5df912017-08-02 01:46:18 -07002382 /*
2383 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2384 * used for getting the NQ index.
2385 */
2386 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2387 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002388 cq->qplib_cq.max_wqe = entries;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002389 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2390 cq->qplib_cq.nq = nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002391
2392 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2393 if (rc) {
2394 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2395 goto fail;
2396 }
2397
2398 cq->ib_cq.cqe = entries;
2399 cq->cq_period = cq->qplib_cq.period;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002400 nq->budget++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002401
2402 atomic_inc(&rdev->cq_count);
2403
2404 if (context) {
2405 struct bnxt_re_cq_resp resp;
2406
2407 resp.cqid = cq->qplib_cq.id;
2408 resp.tail = cq->qplib_cq.hwq.cons;
2409 resp.phase = cq->qplib_cq.period;
2410 resp.rsvd = 0;
2411 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2412 if (rc) {
2413 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2414 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2415 goto c2fail;
2416 }
2417 }
2418
2419 return &cq->ib_cq;
2420
2421c2fail:
2422 if (context)
2423 ib_umem_release(cq->umem);
2424fail:
2425 kfree(cq->cql);
2426 kfree(cq);
2427 return ERR_PTR(rc);
2428}
2429
2430static u8 __req_to_ib_wc_status(u8 qstatus)
2431{
2432 switch (qstatus) {
2433 case CQ_REQ_STATUS_OK:
2434 return IB_WC_SUCCESS;
2435 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2436 return IB_WC_BAD_RESP_ERR;
2437 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2438 return IB_WC_LOC_LEN_ERR;
2439 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2440 return IB_WC_LOC_QP_OP_ERR;
2441 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2442 return IB_WC_LOC_PROT_ERR;
2443 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2444 return IB_WC_GENERAL_ERR;
2445 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2446 return IB_WC_REM_INV_REQ_ERR;
2447 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2448 return IB_WC_REM_ACCESS_ERR;
2449 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2450 return IB_WC_REM_OP_ERR;
2451 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2452 return IB_WC_RNR_RETRY_EXC_ERR;
2453 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2454 return IB_WC_RETRY_EXC_ERR;
2455 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2456 return IB_WC_WR_FLUSH_ERR;
2457 default:
2458 return IB_WC_GENERAL_ERR;
2459 }
2460 return 0;
2461}
2462
2463static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2464{
2465 switch (qstatus) {
2466 case CQ_RES_RAWETH_QP1_STATUS_OK:
2467 return IB_WC_SUCCESS;
2468 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2469 return IB_WC_LOC_ACCESS_ERR;
2470 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2471 return IB_WC_LOC_LEN_ERR;
2472 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2473 return IB_WC_LOC_PROT_ERR;
2474 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2475 return IB_WC_LOC_QP_OP_ERR;
2476 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2477 return IB_WC_GENERAL_ERR;
2478 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2479 return IB_WC_WR_FLUSH_ERR;
2480 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2481 return IB_WC_WR_FLUSH_ERR;
2482 default:
2483 return IB_WC_GENERAL_ERR;
2484 }
2485}
2486
2487static u8 __rc_to_ib_wc_status(u8 qstatus)
2488{
2489 switch (qstatus) {
2490 case CQ_RES_RC_STATUS_OK:
2491 return IB_WC_SUCCESS;
2492 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2493 return IB_WC_LOC_ACCESS_ERR;
2494 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2495 return IB_WC_LOC_LEN_ERR;
2496 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2497 return IB_WC_LOC_PROT_ERR;
2498 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2499 return IB_WC_LOC_QP_OP_ERR;
2500 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2501 return IB_WC_GENERAL_ERR;
2502 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2503 return IB_WC_REM_INV_REQ_ERR;
2504 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2505 return IB_WC_WR_FLUSH_ERR;
2506 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2507 return IB_WC_WR_FLUSH_ERR;
2508 default:
2509 return IB_WC_GENERAL_ERR;
2510 }
2511}
2512
2513static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2514{
2515 switch (cqe->type) {
2516 case BNXT_QPLIB_SWQE_TYPE_SEND:
2517 wc->opcode = IB_WC_SEND;
2518 break;
2519 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2520 wc->opcode = IB_WC_SEND;
2521 wc->wc_flags |= IB_WC_WITH_IMM;
2522 break;
2523 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2524 wc->opcode = IB_WC_SEND;
2525 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2526 break;
2527 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2528 wc->opcode = IB_WC_RDMA_WRITE;
2529 break;
2530 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2531 wc->opcode = IB_WC_RDMA_WRITE;
2532 wc->wc_flags |= IB_WC_WITH_IMM;
2533 break;
2534 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2535 wc->opcode = IB_WC_RDMA_READ;
2536 break;
2537 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2538 wc->opcode = IB_WC_COMP_SWAP;
2539 break;
2540 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2541 wc->opcode = IB_WC_FETCH_ADD;
2542 break;
2543 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2544 wc->opcode = IB_WC_LOCAL_INV;
2545 break;
2546 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2547 wc->opcode = IB_WC_REG_MR;
2548 break;
2549 default:
2550 wc->opcode = IB_WC_SEND;
2551 break;
2552 }
2553
2554 wc->status = __req_to_ib_wc_status(cqe->status);
2555}
2556
2557static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2558 u16 raweth_qp1_flags2)
2559{
2560 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2561
2562 /* raweth_qp1_flags Bit 9-6 indicates itype */
2563 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2564 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2565 return -1;
2566
2567 if (raweth_qp1_flags2 &
2568 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2569 raweth_qp1_flags2 &
2570 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2571 is_udp = true;
2572 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2573 (raweth_qp1_flags2 &
2574 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2575 (is_ipv6 = true) : (is_ipv4 = true);
2576 return ((is_ipv6) ?
2577 BNXT_RE_ROCEV2_IPV6_PACKET :
2578 BNXT_RE_ROCEV2_IPV4_PACKET);
2579 } else {
2580 return BNXT_RE_ROCE_V1_PACKET;
2581 }
2582}
2583
2584static int bnxt_re_to_ib_nw_type(int nw_type)
2585{
2586 u8 nw_hdr_type = 0xFF;
2587
2588 switch (nw_type) {
2589 case BNXT_RE_ROCE_V1_PACKET:
2590 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2591 break;
2592 case BNXT_RE_ROCEV2_IPV4_PACKET:
2593 nw_hdr_type = RDMA_NETWORK_IPV4;
2594 break;
2595 case BNXT_RE_ROCEV2_IPV6_PACKET:
2596 nw_hdr_type = RDMA_NETWORK_IPV6;
2597 break;
2598 }
2599 return nw_hdr_type;
2600}
2601
2602static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2603 void *rq_hdr_buf)
2604{
2605 u8 *tmp_buf = NULL;
2606 struct ethhdr *eth_hdr;
2607 u16 eth_type;
2608 bool rc = false;
2609
2610 tmp_buf = (u8 *)rq_hdr_buf;
2611 /*
2612 * If dest mac is not same as I/F mac, this could be a
2613 * loopback address or multicast address, check whether
2614 * it is a loopback packet
2615 */
2616 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2617 tmp_buf += 4;
2618 /* Check the ether type */
2619 eth_hdr = (struct ethhdr *)tmp_buf;
2620 eth_type = ntohs(eth_hdr->h_proto);
2621 switch (eth_type) {
2622 case ETH_P_IBOE:
2623 rc = true;
2624 break;
2625 case ETH_P_IP:
2626 case ETH_P_IPV6: {
2627 u32 len;
2628 struct udphdr *udp_hdr;
2629
2630 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2631 sizeof(struct ipv6hdr));
2632 tmp_buf += sizeof(struct ethhdr) + len;
2633 udp_hdr = (struct udphdr *)tmp_buf;
2634 if (ntohs(udp_hdr->dest) ==
2635 ROCE_V2_UDP_DPORT)
2636 rc = true;
2637 break;
2638 }
2639 default:
2640 break;
2641 }
2642 }
2643
2644 return rc;
2645}
2646
2647static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2648 struct bnxt_qplib_cqe *cqe)
2649{
2650 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2651 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2652 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2653 struct ib_send_wr *swr;
2654 struct ib_ud_wr udwr;
2655 struct ib_recv_wr rwr;
2656 int pkt_type = 0;
2657 u32 tbl_idx;
2658 void *rq_hdr_buf;
2659 dma_addr_t rq_hdr_buf_map;
2660 dma_addr_t shrq_hdr_buf_map;
2661 u32 offset = 0;
2662 u32 skip_bytes = 0;
2663 struct ib_sge s_sge[2];
2664 struct ib_sge r_sge[2];
2665 int rc;
2666
2667 memset(&udwr, 0, sizeof(udwr));
2668 memset(&rwr, 0, sizeof(rwr));
2669 memset(&s_sge, 0, sizeof(s_sge));
2670 memset(&r_sge, 0, sizeof(r_sge));
2671
2672 swr = &udwr.wr;
2673 tbl_idx = cqe->wr_id;
2674
2675 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2676 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2677 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2678 tbl_idx);
2679
2680 /* Shadow QP header buffer */
2681 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2682 tbl_idx);
2683 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2684
2685 /* Store this cqe */
2686 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2687 sqp_entry->qp1_qp = qp1_qp;
2688
2689 /* Find packet type from the cqe */
2690
2691 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2692 cqe->raweth_qp1_flags2);
2693 if (pkt_type < 0) {
2694 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2695 return -EINVAL;
2696 }
2697
2698 /* Adjust the offset for the user buffer and post in the rq */
2699
2700 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2701 offset = 20;
2702
2703 /*
2704 * QP1 loopback packet has 4 bytes of internal header before
2705 * ether header. Skip these four bytes.
2706 */
2707 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2708 skip_bytes = 4;
2709
2710 /* First send SGE . Skip the ether header*/
2711 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2712 + skip_bytes;
2713 s_sge[0].lkey = 0xFFFFFFFF;
2714 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2715 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2716
2717 /* Second Send SGE */
2718 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2719 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2720 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2721 s_sge[1].addr += 8;
2722 s_sge[1].lkey = 0xFFFFFFFF;
2723 s_sge[1].length = 256;
2724
2725 /* First recv SGE */
2726
2727 r_sge[0].addr = shrq_hdr_buf_map;
2728 r_sge[0].lkey = 0xFFFFFFFF;
2729 r_sge[0].length = 40;
2730
2731 r_sge[1].addr = sqp_entry->sge.addr + offset;
2732 r_sge[1].lkey = sqp_entry->sge.lkey;
2733 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2734
2735 /* Create receive work request */
2736 rwr.num_sge = 2;
2737 rwr.sg_list = r_sge;
2738 rwr.wr_id = tbl_idx;
2739 rwr.next = NULL;
2740
2741 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2742 if (rc) {
2743 dev_err(rdev_to_dev(rdev),
2744 "Failed to post Rx buffers to shadow QP");
2745 return -ENOMEM;
2746 }
2747
2748 swr->num_sge = 2;
2749 swr->sg_list = s_sge;
2750 swr->wr_id = tbl_idx;
2751 swr->opcode = IB_WR_SEND;
2752 swr->next = NULL;
2753
2754 udwr.ah = &rdev->sqp_ah->ib_ah;
2755 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2756 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2757
2758 /* post data received in the send queue */
2759 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2760
2761 return 0;
2762}
2763
2764static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2765 struct bnxt_qplib_cqe *cqe)
2766{
2767 wc->opcode = IB_WC_RECV;
2768 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2769 wc->wc_flags |= IB_WC_GRH;
2770}
2771
2772static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2773 struct bnxt_qplib_cqe *cqe)
2774{
2775 wc->opcode = IB_WC_RECV;
2776 wc->status = __rc_to_ib_wc_status(cqe->status);
2777
2778 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2779 wc->wc_flags |= IB_WC_WITH_IMM;
2780 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2781 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2782 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2783 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2784 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2785}
2786
2787static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2788 struct ib_wc *wc,
2789 struct bnxt_qplib_cqe *cqe)
2790{
2791 u32 tbl_idx;
2792 struct bnxt_re_dev *rdev = qp->rdev;
2793 struct bnxt_re_qp *qp1_qp = NULL;
2794 struct bnxt_qplib_cqe *orig_cqe = NULL;
2795 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2796 int nw_type;
2797
2798 tbl_idx = cqe->wr_id;
2799
2800 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2801 qp1_qp = sqp_entry->qp1_qp;
2802 orig_cqe = &sqp_entry->cqe;
2803
2804 wc->wr_id = sqp_entry->wrid;
2805 wc->byte_len = orig_cqe->length;
2806 wc->qp = &qp1_qp->ib_qp;
2807
2808 wc->ex.imm_data = orig_cqe->immdata;
2809 wc->src_qp = orig_cqe->src_qp;
2810 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2811 wc->port_num = 1;
2812 wc->vendor_err = orig_cqe->status;
2813
2814 wc->opcode = IB_WC_RECV;
2815 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2816 wc->wc_flags |= IB_WC_GRH;
2817
2818 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2819 orig_cqe->raweth_qp1_flags2);
2820 if (nw_type >= 0) {
2821 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2822 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2823 }
2824}
2825
2826static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2827 struct bnxt_qplib_cqe *cqe)
2828{
2829 wc->opcode = IB_WC_RECV;
2830 wc->status = __rc_to_ib_wc_status(cqe->status);
2831
2832 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2833 wc->wc_flags |= IB_WC_WITH_IMM;
2834 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2835 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2836 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2837 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2838 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2839}
2840
Eddie Wai9152e0b2017-06-14 03:26:23 -07002841static int send_phantom_wqe(struct bnxt_re_qp *qp)
2842{
2843 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2844 unsigned long flags;
2845 int rc = 0;
2846
2847 spin_lock_irqsave(&qp->sq_lock, flags);
2848
2849 rc = bnxt_re_bind_fence_mw(lib_qp);
2850 if (!rc) {
2851 lib_qp->sq.phantom_wqe_cnt++;
2852 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2853 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2854 lib_qp->id, lib_qp->sq.hwq.prod,
2855 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2856 lib_qp->sq.phantom_wqe_cnt);
2857 }
2858
2859 spin_unlock_irqrestore(&qp->sq_lock, flags);
2860 return rc;
2861}
2862
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002863int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2864{
2865 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2866 struct bnxt_re_qp *qp;
2867 struct bnxt_qplib_cqe *cqe;
2868 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002869 struct bnxt_qplib_q *sq;
2870 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002871 u32 tbl_idx;
2872 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2873 unsigned long flags;
2874
2875 spin_lock_irqsave(&cq->cq_lock, flags);
2876 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002877 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002878 if (!cq->cql) {
2879 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2880 goto exit;
2881 }
2882 cqe = &cq->cql[0];
2883 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002884 lib_qp = NULL;
2885 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2886 if (lib_qp) {
2887 sq = &lib_qp->sq;
2888 if (sq->send_phantom) {
2889 qp = container_of(lib_qp,
2890 struct bnxt_re_qp, qplib_qp);
2891 if (send_phantom_wqe(qp) == -ENOMEM)
2892 dev_err(rdev_to_dev(cq->rdev),
2893 "Phantom failed! Scheduled to send again\n");
2894 else
2895 sq->send_phantom = false;
2896 }
2897 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002898 if (ncqe < budget)
2899 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
2900 cqe + ncqe,
2901 budget - ncqe);
Eddie Wai9152e0b2017-06-14 03:26:23 -07002902
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002903 if (!ncqe)
2904 break;
2905
2906 for (i = 0; i < ncqe; i++, cqe++) {
2907 /* Transcribe each qplib_wqe back to ib_wc */
2908 memset(wc, 0, sizeof(*wc));
2909
2910 wc->wr_id = cqe->wr_id;
2911 wc->byte_len = cqe->length;
2912 qp = container_of
2913 ((struct bnxt_qplib_qp *)
2914 (unsigned long)(cqe->qp_handle),
2915 struct bnxt_re_qp, qplib_qp);
2916 if (!qp) {
2917 dev_err(rdev_to_dev(cq->rdev),
2918 "POLL CQ : bad QP handle");
2919 continue;
2920 }
2921 wc->qp = &qp->ib_qp;
2922 wc->ex.imm_data = cqe->immdata;
2923 wc->src_qp = cqe->src_qp;
2924 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2925 wc->port_num = 1;
2926 wc->vendor_err = cqe->status;
2927
2928 switch (cqe->opcode) {
2929 case CQ_BASE_CQE_TYPE_REQ:
2930 if (qp->qplib_qp.id ==
2931 qp->rdev->qp1_sqp->qplib_qp.id) {
2932 /* Handle this completion with
2933 * the stored completion
2934 */
2935 memset(wc, 0, sizeof(*wc));
2936 continue;
2937 }
2938 bnxt_re_process_req_wc(wc, cqe);
2939 break;
2940 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2941 if (!cqe->status) {
2942 int rc = 0;
2943
2944 rc = bnxt_re_process_raw_qp_pkt_rx
2945 (qp, cqe);
2946 if (!rc) {
2947 memset(wc, 0, sizeof(*wc));
2948 continue;
2949 }
2950 cqe->status = -1;
2951 }
2952 /* Errors need not be looped back.
2953 * But change the wr_id to the one
2954 * stored in the table
2955 */
2956 tbl_idx = cqe->wr_id;
2957 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2958 wc->wr_id = sqp_entry->wrid;
2959 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2960 break;
2961 case CQ_BASE_CQE_TYPE_RES_RC:
2962 bnxt_re_process_res_rc_wc(wc, cqe);
2963 break;
2964 case CQ_BASE_CQE_TYPE_RES_UD:
2965 if (qp->qplib_qp.id ==
2966 qp->rdev->qp1_sqp->qplib_qp.id) {
2967 /* Handle this completion with
2968 * the stored completion
2969 */
2970 if (cqe->status) {
2971 continue;
2972 } else {
2973 bnxt_re_process_res_shadow_qp_wc
2974 (qp, wc, cqe);
2975 break;
2976 }
2977 }
2978 bnxt_re_process_res_ud_wc(wc, cqe);
2979 break;
2980 default:
2981 dev_err(rdev_to_dev(cq->rdev),
2982 "POLL CQ : type 0x%x not handled",
2983 cqe->opcode);
2984 continue;
2985 }
2986 wc++;
2987 budget--;
2988 }
2989 }
2990exit:
2991 spin_unlock_irqrestore(&cq->cq_lock, flags);
2992 return num_entries - budget;
2993}
2994
2995int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2996 enum ib_cq_notify_flags ib_cqn_flags)
2997{
2998 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2999 int type = 0;
3000
3001 /* Trigger on the very next completion */
3002 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3003 type = DBR_DBR_TYPE_CQ_ARMALL;
3004 /* Trigger on the next solicited completion */
3005 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3006 type = DBR_DBR_TYPE_CQ_ARMSE;
3007
Selvin Xavier499e4562017-06-29 12:28:18 -07003008 /* Poll to see if there are missed events */
3009 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3010 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3011 return 1;
3012
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003013 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3014
3015 return 0;
3016}
3017
3018/* Memory Regions */
3019struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3020{
3021 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3022 struct bnxt_re_dev *rdev = pd->rdev;
3023 struct bnxt_re_mr *mr;
3024 u64 pbl = 0;
3025 int rc;
3026
3027 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3028 if (!mr)
3029 return ERR_PTR(-ENOMEM);
3030
3031 mr->rdev = rdev;
3032 mr->qplib_mr.pd = &pd->qplib_pd;
3033 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3034 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3035
3036 /* Allocate and register 0 as the address */
3037 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3038 if (rc)
3039 goto fail;
3040
3041 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3042 mr->qplib_mr.total_size = -1; /* Infinte length */
3043 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3044 if (rc)
3045 goto fail_mr;
3046
3047 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3048 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3049 IB_ACCESS_REMOTE_ATOMIC))
3050 mr->ib_mr.rkey = mr->ib_mr.lkey;
3051 atomic_inc(&rdev->mr_count);
3052
3053 return &mr->ib_mr;
3054
3055fail_mr:
3056 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3057fail:
3058 kfree(mr);
3059 return ERR_PTR(rc);
3060}
3061
3062int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3063{
3064 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3065 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003066 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003067
Selvin Xavier1c980b02017-05-22 03:15:34 -07003068 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3069 if (rc) {
3070 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3071 return rc;
3072 }
3073
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003074 if (mr->npages && mr->pages) {
3075 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3076 &mr->qplib_frpl);
3077 kfree(mr->pages);
3078 mr->npages = 0;
3079 mr->pages = NULL;
3080 }
Doug Ledford374cb862017-04-25 14:00:59 -04003081 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003082 ib_umem_release(mr->ib_umem);
3083
3084 kfree(mr);
3085 atomic_dec(&rdev->mr_count);
3086 return rc;
3087}
3088
3089static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3090{
3091 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3092
3093 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3094 return -ENOMEM;
3095
3096 mr->pages[mr->npages++] = addr;
3097 return 0;
3098}
3099
3100int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3101 unsigned int *sg_offset)
3102{
3103 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3104
3105 mr->npages = 0;
3106 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3107}
3108
3109struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3110 u32 max_num_sg)
3111{
3112 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3113 struct bnxt_re_dev *rdev = pd->rdev;
3114 struct bnxt_re_mr *mr = NULL;
3115 int rc;
3116
3117 if (type != IB_MR_TYPE_MEM_REG) {
3118 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3119 return ERR_PTR(-EINVAL);
3120 }
3121 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3122 return ERR_PTR(-EINVAL);
3123
3124 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3125 if (!mr)
3126 return ERR_PTR(-ENOMEM);
3127
3128 mr->rdev = rdev;
3129 mr->qplib_mr.pd = &pd->qplib_pd;
3130 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3131 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3132
3133 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3134 if (rc)
3135 goto fail;
3136
3137 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3138 mr->ib_mr.rkey = mr->ib_mr.lkey;
3139
3140 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3141 if (!mr->pages) {
3142 rc = -ENOMEM;
3143 goto fail;
3144 }
3145 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3146 &mr->qplib_frpl, max_num_sg);
3147 if (rc) {
3148 dev_err(rdev_to_dev(rdev),
3149 "Failed to allocate HW FR page list");
3150 goto fail_mr;
3151 }
3152
3153 atomic_inc(&rdev->mr_count);
3154 return &mr->ib_mr;
3155
3156fail_mr:
3157 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3158fail:
3159 kfree(mr->pages);
3160 kfree(mr);
3161 return ERR_PTR(rc);
3162}
3163
Eddie Wai9152e0b2017-06-14 03:26:23 -07003164struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3165 struct ib_udata *udata)
3166{
3167 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3168 struct bnxt_re_dev *rdev = pd->rdev;
3169 struct bnxt_re_mw *mw;
3170 int rc;
3171
3172 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3173 if (!mw)
3174 return ERR_PTR(-ENOMEM);
3175 mw->rdev = rdev;
3176 mw->qplib_mw.pd = &pd->qplib_pd;
3177
3178 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3179 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3180 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3181 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3182 if (rc) {
3183 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3184 goto fail;
3185 }
3186 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3187
3188 atomic_inc(&rdev->mw_count);
3189 return &mw->ib_mw;
3190
3191fail:
3192 kfree(mw);
3193 return ERR_PTR(rc);
3194}
3195
3196int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3197{
3198 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3199 struct bnxt_re_dev *rdev = mw->rdev;
3200 int rc;
3201
3202 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3203 if (rc) {
3204 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3205 return rc;
3206 }
3207
3208 kfree(mw);
3209 atomic_dec(&rdev->mw_count);
3210 return rc;
3211}
3212
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003213/* uverbs */
3214struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3215 u64 virt_addr, int mr_access_flags,
3216 struct ib_udata *udata)
3217{
3218 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3219 struct bnxt_re_dev *rdev = pd->rdev;
3220 struct bnxt_re_mr *mr;
3221 struct ib_umem *umem;
3222 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003223 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003224 struct scatterlist *sg;
3225 int entry;
3226
Selvin Xavier58d4a672017-06-29 12:28:12 -07003227 if (length > BNXT_RE_MAX_MR_SIZE) {
3228 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3229 length, BNXT_RE_MAX_MR_SIZE);
3230 return ERR_PTR(-ENOMEM);
3231 }
3232
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003233 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3234 if (!mr)
3235 return ERR_PTR(-ENOMEM);
3236
3237 mr->rdev = rdev;
3238 mr->qplib_mr.pd = &pd->qplib_pd;
3239 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3240 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3241
3242 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3243 mr_access_flags, 0);
3244 if (IS_ERR(umem)) {
3245 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3246 rc = -EFAULT;
3247 goto free_mr;
3248 }
3249 mr->ib_umem = umem;
3250
3251 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3252 if (rc) {
3253 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3254 goto release_umem;
3255 }
3256 /* The fixed portion of the rkey is the same as the lkey */
3257 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3258
3259 mr->qplib_mr.va = virt_addr;
3260 umem_pgs = ib_umem_page_count(umem);
3261 if (!umem_pgs) {
3262 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3263 rc = -EINVAL;
3264 goto free_mrw;
3265 }
3266 mr->qplib_mr.total_size = length;
3267
3268 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3269 if (!pbl_tbl) {
3270 rc = -EINVAL;
3271 goto free_mrw;
3272 }
3273 pbl_tbl_orig = pbl_tbl;
3274
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003275 if (umem->hugetlb) {
3276 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3277 rc = -EFAULT;
3278 goto fail;
3279 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003280
3281 if (umem->page_shift != PAGE_SHIFT) {
3282 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003283 rc = -EFAULT;
3284 goto fail;
3285 }
3286 /* Map umem buf ptrs to the PBL */
3287 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003288 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003289 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003290 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003291 }
3292 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3293 umem_pgs, false);
3294 if (rc) {
3295 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3296 goto fail;
3297 }
3298
3299 kfree(pbl_tbl_orig);
3300
3301 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3302 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3303 atomic_inc(&rdev->mr_count);
3304
3305 return &mr->ib_mr;
3306fail:
3307 kfree(pbl_tbl_orig);
3308free_mrw:
3309 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3310release_umem:
3311 ib_umem_release(umem);
3312free_mr:
3313 kfree(mr);
3314 return ERR_PTR(rc);
3315}
3316
3317struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3318 struct ib_udata *udata)
3319{
3320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3321 struct bnxt_re_uctx_resp resp;
3322 struct bnxt_re_ucontext *uctx;
3323 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3324 int rc;
3325
3326 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3327 ibdev->uverbs_abi_ver);
3328
3329 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3330 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3331 BNXT_RE_ABI_VERSION);
3332 return ERR_PTR(-EPERM);
3333 }
3334
3335 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3336 if (!uctx)
3337 return ERR_PTR(-ENOMEM);
3338
3339 uctx->rdev = rdev;
3340
3341 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3342 if (!uctx->shpg) {
3343 rc = -ENOMEM;
3344 goto fail;
3345 }
3346 spin_lock_init(&uctx->sh_lock);
3347
3348 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3349 resp.max_qp = rdev->qplib_ctx.qpc_count;
3350 resp.pg_size = PAGE_SIZE;
3351 resp.cqe_sz = sizeof(struct cq_base);
3352 resp.max_cqd = dev_attr->max_cq_wqes;
3353 resp.rsvd = 0;
3354
3355 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3356 if (rc) {
3357 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3358 rc = -EFAULT;
3359 goto cfail;
3360 }
3361
3362 return &uctx->ib_uctx;
3363cfail:
3364 free_page((unsigned long)uctx->shpg);
3365 uctx->shpg = NULL;
3366fail:
3367 kfree(uctx);
3368 return ERR_PTR(rc);
3369}
3370
3371int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3372{
3373 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3374 struct bnxt_re_ucontext,
3375 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003376
3377 struct bnxt_re_dev *rdev = uctx->rdev;
3378 int rc = 0;
3379
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003380 if (uctx->shpg)
3381 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003382
3383 if (uctx->dpi.dbr) {
3384 /* Free DPI only if this is the first PD allocated by the
3385 * application and mark the context dpi as NULL
3386 */
3387 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3388 &rdev->qplib_res.dpi_tbl,
3389 &uctx->dpi);
3390 if (rc)
Colin Ian King24bb4d82017-07-14 08:30:10 +01003391 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003392 /* Don't fail, continue*/
3393 uctx->dpi.dbr = NULL;
3394 }
3395
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003396 kfree(uctx);
3397 return 0;
3398}
3399
3400/* Helper function to mmap the virtual memory from user app */
3401int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3402{
3403 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3404 struct bnxt_re_ucontext,
3405 ib_uctx);
3406 struct bnxt_re_dev *rdev = uctx->rdev;
3407 u64 pfn;
3408
3409 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3410 return -EINVAL;
3411
3412 if (vma->vm_pgoff) {
3413 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3414 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3415 PAGE_SIZE, vma->vm_page_prot)) {
3416 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3417 return -EAGAIN;
3418 }
3419 } else {
3420 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3421 if (remap_pfn_range(vma, vma->vm_start,
3422 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3423 dev_err(rdev_to_dev(rdev),
3424 "Failed to map shared page");
3425 return -EAGAIN;
3426 }
3427 }
3428
3429 return 0;
3430}