blob: d78fedc654d08a431cacdd04f4e6210fb9605b35 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800226/* Port */
227int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229{
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP |
248 IB_PORT_IP_BASED_GIDS;
249
250 /* Max MSG size set to 2G for now */
251 port_attr->max_msg_sz = 0x80000000;
252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
262 /* call the underlying netdev's ethtool hooks to query speed settings
263 * for which we acquire rtnl_lock _only_ if it's registered with
264 * IB stack to avoid race in the NETDEV_UNREG path
265 */
266 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
Yuval Shaiad4186192017-06-14 23:13:34 +0300267 if (!ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
268 &port_attr->active_width))
269 return -EINVAL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800270 return 0;
271}
272
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800273int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
274 struct ib_port_immutable *immutable)
275{
276 struct ib_port_attr port_attr;
277
278 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
279 return -EINVAL;
280
281 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
282 immutable->gid_tbl_len = port_attr.gid_tbl_len;
283 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
284 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
285 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
286 return 0;
287}
288
289int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
290 u16 index, u16 *pkey)
291{
292 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
293
294 /* Ignore port_num */
295
296 memset(pkey, 0, sizeof(*pkey));
297 return bnxt_qplib_get_pkey(&rdev->qplib_res,
298 &rdev->qplib_res.pkey_tbl, index, pkey);
299}
300
301int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
302 int index, union ib_gid *gid)
303{
304 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
305 int rc = 0;
306
307 /* Ignore port_num */
308 memset(gid, 0, sizeof(*gid));
309 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
310 &rdev->qplib_res.sgid_tbl, index,
311 (struct bnxt_qplib_gid *)gid);
312 return rc;
313}
314
315int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
316 unsigned int index, void **context)
317{
318 int rc = 0;
319 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
321 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
322
323 /* Delete the entry from the hardware */
324 ctx = *context;
325 if (!ctx)
326 return -EINVAL;
327
328 if (sgid_tbl && sgid_tbl->active) {
329 if (ctx->idx >= sgid_tbl->max)
330 return -EINVAL;
331 ctx->refcnt--;
332 if (!ctx->refcnt) {
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700333 rc = bnxt_qplib_del_sgid(sgid_tbl,
334 &sgid_tbl->tbl[ctx->idx],
335 true);
336 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800337 dev_err(rdev_to_dev(rdev),
338 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700339 } else {
340 ctx_tbl = sgid_tbl->ctx;
341 ctx_tbl[ctx->idx] = NULL;
342 kfree(ctx);
343 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800344 }
345 } else {
346 return -EINVAL;
347 }
348 return rc;
349}
350
351int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
352 unsigned int index, const union ib_gid *gid,
353 const struct ib_gid_attr *attr, void **context)
354{
355 int rc;
356 u32 tbl_idx = 0;
357 u16 vlan_id = 0xFFFF;
358 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
359 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
360 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
361
362 if ((attr->ndev) && is_vlan_dev(attr->ndev))
363 vlan_id = vlan_dev_vlan_id(attr->ndev);
364
365 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
366 rdev->qplib_res.netdev->dev_addr,
367 vlan_id, true, &tbl_idx);
368 if (rc == -EALREADY) {
369 ctx_tbl = sgid_tbl->ctx;
370 ctx_tbl[tbl_idx]->refcnt++;
371 *context = ctx_tbl[tbl_idx];
372 return 0;
373 }
374
375 if (rc < 0) {
376 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
377 return rc;
378 }
379
380 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
381 if (!ctx)
382 return -ENOMEM;
383 ctx_tbl = sgid_tbl->ctx;
384 ctx->idx = tbl_idx;
385 ctx->refcnt = 1;
386 ctx_tbl[tbl_idx] = ctx;
387
388 return rc;
389}
390
391enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
392 u8 port_num)
393{
394 return IB_LINK_LAYER_ETHERNET;
395}
396
Eddie Wai9152e0b2017-06-14 03:26:23 -0700397#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
398
399static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
400{
401 struct bnxt_re_fence_data *fence = &pd->fence;
402 struct ib_mr *ib_mr = &fence->mr->ib_mr;
403 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404
405 memset(wqe, 0, sizeof(*wqe));
406 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
407 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
409 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
410 wqe->bind.zero_based = false;
411 wqe->bind.parent_l_key = ib_mr->lkey;
412 wqe->bind.va = (u64)(unsigned long)fence->va;
413 wqe->bind.length = fence->size;
414 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
415 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
416
417 /* Save the initial rkey in fence structure for now;
418 * wqe->bind.r_key will be set at (re)bind time.
419 */
420 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
421}
422
423static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
424{
425 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
426 qplib_qp);
427 struct ib_pd *ib_pd = qp->ib_qp.pd;
428 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
429 struct bnxt_re_fence_data *fence = &pd->fence;
430 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
431 struct bnxt_qplib_swqe wqe;
432 int rc;
433
434 memcpy(&wqe, fence_wqe, sizeof(wqe));
435 wqe.bind.r_key = fence->bind_rkey;
436 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
437
438 dev_dbg(rdev_to_dev(qp->rdev),
439 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
440 wqe.bind.r_key, qp->qplib_qp.id, pd);
441 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
442 if (rc) {
443 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
444 return rc;
445 }
446 bnxt_qplib_post_send_db(&qp->qplib_qp);
447
448 return rc;
449}
450
451static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
452{
453 struct bnxt_re_fence_data *fence = &pd->fence;
454 struct bnxt_re_dev *rdev = pd->rdev;
455 struct device *dev = &rdev->en_dev->pdev->dev;
456 struct bnxt_re_mr *mr = fence->mr;
457
458 if (fence->mw) {
459 bnxt_re_dealloc_mw(fence->mw);
460 fence->mw = NULL;
461 }
462 if (mr) {
463 if (mr->ib_mr.rkey)
464 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
465 true);
466 if (mr->ib_mr.lkey)
467 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
468 kfree(mr);
469 fence->mr = NULL;
470 }
471 if (fence->dma_addr) {
472 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
473 DMA_BIDIRECTIONAL);
474 fence->dma_addr = 0;
475 }
476}
477
478static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
479{
480 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
481 struct bnxt_re_fence_data *fence = &pd->fence;
482 struct bnxt_re_dev *rdev = pd->rdev;
483 struct device *dev = &rdev->en_dev->pdev->dev;
484 struct bnxt_re_mr *mr = NULL;
485 dma_addr_t dma_addr = 0;
486 struct ib_mw *mw;
487 u64 pbl_tbl;
488 int rc;
489
490 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
491 DMA_BIDIRECTIONAL);
492 rc = dma_mapping_error(dev, dma_addr);
493 if (rc) {
494 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
495 rc = -EIO;
496 fence->dma_addr = 0;
497 goto fail;
498 }
499 fence->dma_addr = dma_addr;
500
501 /* Allocate a MR */
502 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
503 if (!mr) {
504 rc = -ENOMEM;
505 goto fail;
506 }
507 fence->mr = mr;
508 mr->rdev = rdev;
509 mr->qplib_mr.pd = &pd->qplib_pd;
510 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
511 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
512 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
513 if (rc) {
514 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
515 goto fail;
516 }
517
518 /* Register MR */
519 mr->ib_mr.lkey = mr->qplib_mr.lkey;
520 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
521 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
522 pbl_tbl = dma_addr;
523 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
524 BNXT_RE_FENCE_PBL_SIZE, false);
525 if (rc) {
526 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
527 goto fail;
528 }
529 mr->ib_mr.rkey = mr->qplib_mr.rkey;
530
531 /* Create a fence MW only for kernel consumers */
532 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300533 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700534 dev_err(rdev_to_dev(rdev),
535 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300536 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700537 goto fail;
538 }
539 fence->mw = mw;
540
541 bnxt_re_create_fence_wqe(pd);
542 return 0;
543
544fail:
545 bnxt_re_destroy_fence_mr(pd);
546 return rc;
547}
548
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800549/* Protection Domains */
550int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
551{
552 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
553 struct bnxt_re_dev *rdev = pd->rdev;
554 int rc;
555
Eddie Wai9152e0b2017-06-14 03:26:23 -0700556 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800557
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700558 if (pd->qplib_pd.id) {
559 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
560 &rdev->qplib_res.pd_tbl,
561 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800562 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700563 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800564 }
565
566 kfree(pd);
567 return 0;
568}
569
570struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
571 struct ib_ucontext *ucontext,
572 struct ib_udata *udata)
573{
574 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
575 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
576 struct bnxt_re_ucontext,
577 ib_uctx);
578 struct bnxt_re_pd *pd;
579 int rc;
580
581 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
582 if (!pd)
583 return ERR_PTR(-ENOMEM);
584
585 pd->rdev = rdev;
586 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
587 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
588 rc = -ENOMEM;
589 goto fail;
590 }
591
592 if (udata) {
593 struct bnxt_re_pd_resp resp;
594
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700595 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800596 /* Allocate DPI in alloc_pd to avoid failing of
597 * ibv_devinfo and family of application when DPIs
598 * are depleted.
599 */
600 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700601 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800602 rc = -ENOMEM;
603 goto dbfail;
604 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800605 }
606
607 resp.pdid = pd->qplib_pd.id;
608 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700609 resp.dpi = ucntx->dpi.dpi;
610 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800611
612 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
613 if (rc) {
614 dev_err(rdev_to_dev(rdev),
615 "Failed to copy user response\n");
616 goto dbfail;
617 }
618 }
619
Eddie Wai9152e0b2017-06-14 03:26:23 -0700620 if (!udata)
621 if (bnxt_re_create_fence_mr(pd))
622 dev_warn(rdev_to_dev(rdev),
623 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800624 return &pd->ib_pd;
625dbfail:
626 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
627 &pd->qplib_pd);
628fail:
629 kfree(pd);
630 return ERR_PTR(rc);
631}
632
633/* Address Handles */
634int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
635{
636 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
637 struct bnxt_re_dev *rdev = ah->rdev;
638 int rc;
639
640 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
641 if (rc) {
642 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
643 return rc;
644 }
645 kfree(ah);
646 return 0;
647}
648
649struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400650 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800651 struct ib_udata *udata)
652{
653 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
654 struct bnxt_re_dev *rdev = pd->rdev;
655 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400656 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800657 int rc;
658 u16 vlan_tag;
659 u8 nw_type;
660
661 struct ib_gid_attr sgid_attr;
662
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400663 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800664 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
665 return ERR_PTR(-EINVAL);
666 }
667 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
668 if (!ah)
669 return ERR_PTR(-ENOMEM);
670
671 ah->rdev = rdev;
672 ah->qplib_ah.pd = &pd->qplib_pd;
673
674 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400675 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800676 sizeof(union ib_gid));
677 /*
678 * If RoCE V2 is enabled, stack will have two entries for
679 * each GID entry. Avoiding this duplicte entry in HW. Dividing
680 * the GID index by 2 for RoCE V2
681 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400682 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
683 ah->qplib_ah.host_sgid_index = grh->sgid_index;
684 ah->qplib_ah.traffic_class = grh->traffic_class;
685 ah->qplib_ah.flow_label = grh->flow_label;
686 ah->qplib_ah.hop_limit = grh->hop_limit;
687 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800688 if (ib_pd->uobject &&
689 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400690 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800691 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400692 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800693 union ib_gid sgid;
694
695 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400696 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800697 &sgid_attr);
698 if (rc) {
699 dev_err(rdev_to_dev(rdev),
700 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400701 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800702 goto fail;
703 }
704 if (sgid_attr.ndev) {
705 if (is_vlan_dev(sgid_attr.ndev))
706 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
707 dev_put(sgid_attr.ndev);
708 }
709 /* Get network header type for this GID */
710 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
711 switch (nw_type) {
712 case RDMA_NETWORK_IPV4:
713 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
714 break;
715 case RDMA_NETWORK_IPV6:
716 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
717 break;
718 default:
719 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
720 break;
721 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400722 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400723 ah_attr->roce.dmac, &vlan_tag,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800724 &sgid_attr.ndev->ifindex,
725 NULL);
726 if (rc) {
727 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
728 goto fail;
729 }
730 }
731
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400732 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800733 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
734 if (rc) {
735 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
736 goto fail;
737 }
738
739 /* Write AVID to shared page. */
740 if (ib_pd->uobject) {
741 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
742 struct bnxt_re_ucontext *uctx;
743 unsigned long flag;
744 u32 *wrptr;
745
746 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
747 spin_lock_irqsave(&uctx->sh_lock, flag);
748 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
749 *wrptr = ah->qplib_ah.id;
750 wmb(); /* make sure cache is updated. */
751 spin_unlock_irqrestore(&uctx->sh_lock, flag);
752 }
753
754 return &ah->ib_ah;
755
756fail:
757 kfree(ah);
758 return ERR_PTR(rc);
759}
760
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400761int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800762{
763 return 0;
764}
765
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400766int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800767{
768 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
769
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400770 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400771 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400772 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400773 rdma_ah_set_grh(ah_attr, NULL, 0,
774 ah->qplib_ah.host_sgid_index,
775 0, ah->qplib_ah.traffic_class);
776 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
777 rdma_ah_set_port_num(ah_attr, 1);
778 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800779 return 0;
780}
781
782/* Queue Pairs */
783int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
784{
785 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
786 struct bnxt_re_dev *rdev = qp->rdev;
787 int rc;
788
Selvin Xavierf218d672017-06-29 12:28:15 -0700789 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800790 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
791 if (rc) {
792 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
793 return rc;
794 }
795 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
796 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
797 &rdev->sqp_ah->qplib_ah);
798 if (rc) {
799 dev_err(rdev_to_dev(rdev),
800 "Failed to destroy HW AH for shadow QP");
801 return rc;
802 }
803
Selvin Xavierf218d672017-06-29 12:28:15 -0700804 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800805 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
806 &rdev->qp1_sqp->qplib_qp);
807 if (rc) {
808 dev_err(rdev_to_dev(rdev),
809 "Failed to destroy Shadow QP");
810 return rc;
811 }
812 mutex_lock(&rdev->qp_lock);
813 list_del(&rdev->qp1_sqp->list);
814 atomic_dec(&rdev->qp_count);
815 mutex_unlock(&rdev->qp_lock);
816
817 kfree(rdev->sqp_ah);
818 kfree(rdev->qp1_sqp);
819 }
820
Doug Ledford374cb862017-04-25 14:00:59 -0400821 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800822 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400823 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800824 ib_umem_release(qp->sumem);
825
826 mutex_lock(&rdev->qp_lock);
827 list_del(&qp->list);
828 atomic_dec(&rdev->qp_count);
829 mutex_unlock(&rdev->qp_lock);
830 kfree(qp);
831 return 0;
832}
833
834static u8 __from_ib_qp_type(enum ib_qp_type type)
835{
836 switch (type) {
837 case IB_QPT_GSI:
838 return CMDQ_CREATE_QP1_TYPE_GSI;
839 case IB_QPT_RC:
840 return CMDQ_CREATE_QP_TYPE_RC;
841 case IB_QPT_UD:
842 return CMDQ_CREATE_QP_TYPE_UD;
843 default:
844 return IB_QPT_MAX;
845 }
846}
847
848static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
849 struct bnxt_re_qp *qp, struct ib_udata *udata)
850{
851 struct bnxt_re_qp_req ureq;
852 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
853 struct ib_umem *umem;
854 int bytes = 0;
855 struct ib_ucontext *context = pd->ib_pd.uobject->context;
856 struct bnxt_re_ucontext *cntx = container_of(context,
857 struct bnxt_re_ucontext,
858 ib_uctx);
859 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
860 return -EFAULT;
861
862 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
863 /* Consider mapping PSN search memory only for RC QPs. */
864 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
865 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
866 bytes = PAGE_ALIGN(bytes);
867 umem = ib_umem_get(context, ureq.qpsva, bytes,
868 IB_ACCESS_LOCAL_WRITE, 1);
869 if (IS_ERR(umem))
870 return PTR_ERR(umem);
871
872 qp->sumem = umem;
873 qplib_qp->sq.sglist = umem->sg_head.sgl;
874 qplib_qp->sq.nmap = umem->nmap;
875 qplib_qp->qp_handle = ureq.qp_handle;
876
877 if (!qp->qplib_qp.srq) {
878 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
879 bytes = PAGE_ALIGN(bytes);
880 umem = ib_umem_get(context, ureq.qprva, bytes,
881 IB_ACCESS_LOCAL_WRITE, 1);
882 if (IS_ERR(umem))
883 goto rqfail;
884 qp->rumem = umem;
885 qplib_qp->rq.sglist = umem->sg_head.sgl;
886 qplib_qp->rq.nmap = umem->nmap;
887 }
888
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700889 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800890 return 0;
891rqfail:
892 ib_umem_release(qp->sumem);
893 qp->sumem = NULL;
894 qplib_qp->sq.sglist = NULL;
895 qplib_qp->sq.nmap = 0;
896
897 return PTR_ERR(umem);
898}
899
900static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
901 (struct bnxt_re_pd *pd,
902 struct bnxt_qplib_res *qp1_res,
903 struct bnxt_qplib_qp *qp1_qp)
904{
905 struct bnxt_re_dev *rdev = pd->rdev;
906 struct bnxt_re_ah *ah;
907 union ib_gid sgid;
908 int rc;
909
910 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
911 if (!ah)
912 return NULL;
913
914 memset(ah, 0, sizeof(*ah));
915 ah->rdev = rdev;
916 ah->qplib_ah.pd = &pd->qplib_pd;
917
918 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
919 if (rc)
920 goto fail;
921
922 /* supply the dgid data same as sgid */
923 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
924 sizeof(union ib_gid));
925 ah->qplib_ah.sgid_index = 0;
926
927 ah->qplib_ah.traffic_class = 0;
928 ah->qplib_ah.flow_label = 0;
929 ah->qplib_ah.hop_limit = 1;
930 ah->qplib_ah.sl = 0;
931 /* Have DMAC same as SMAC */
932 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
933
934 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
935 if (rc) {
936 dev_err(rdev_to_dev(rdev),
937 "Failed to allocate HW AH for Shadow QP");
938 goto fail;
939 }
940
941 return ah;
942
943fail:
944 kfree(ah);
945 return NULL;
946}
947
948static struct bnxt_re_qp *bnxt_re_create_shadow_qp
949 (struct bnxt_re_pd *pd,
950 struct bnxt_qplib_res *qp1_res,
951 struct bnxt_qplib_qp *qp1_qp)
952{
953 struct bnxt_re_dev *rdev = pd->rdev;
954 struct bnxt_re_qp *qp;
955 int rc;
956
957 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
958 if (!qp)
959 return NULL;
960
961 memset(qp, 0, sizeof(*qp));
962 qp->rdev = rdev;
963
964 /* Initialize the shadow QP structure from the QP1 values */
965 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
966
967 qp->qplib_qp.pd = &pd->qplib_pd;
968 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
969 qp->qplib_qp.type = IB_QPT_UD;
970
971 qp->qplib_qp.max_inline_data = 0;
972 qp->qplib_qp.sig_type = true;
973
974 /* Shadow QP SQ depth should be same as QP1 RQ depth */
975 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
976 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700977 /* Q full delta can be 1 since it is internal QP */
978 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800979
980 qp->qplib_qp.scq = qp1_qp->scq;
981 qp->qplib_qp.rcq = qp1_qp->rcq;
982
983 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
984 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700985 /* Q full delta can be 1 since it is internal QP */
986 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800987
988 qp->qplib_qp.mtu = qp1_qp->mtu;
989
990 qp->qplib_qp.sq_hdr_buf_size = 0;
991 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
992 qp->qplib_qp.dpi = &rdev->dpi_privileged;
993
994 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
995 if (rc)
996 goto fail;
997
998 rdev->sqp_id = qp->qplib_qp.id;
999
1000 spin_lock_init(&qp->sq_lock);
1001 INIT_LIST_HEAD(&qp->list);
1002 mutex_lock(&rdev->qp_lock);
1003 list_add_tail(&qp->list, &rdev->qp_list);
1004 atomic_inc(&rdev->qp_count);
1005 mutex_unlock(&rdev->qp_lock);
1006 return qp;
1007fail:
1008 kfree(qp);
1009 return NULL;
1010}
1011
1012struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1013 struct ib_qp_init_attr *qp_init_attr,
1014 struct ib_udata *udata)
1015{
1016 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1017 struct bnxt_re_dev *rdev = pd->rdev;
1018 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1019 struct bnxt_re_qp *qp;
1020 struct bnxt_re_cq *cq;
1021 int rc, entries;
1022
1023 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1024 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1025 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1026 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1027 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1028 return ERR_PTR(-EINVAL);
1029
1030 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1031 if (!qp)
1032 return ERR_PTR(-ENOMEM);
1033
1034 qp->rdev = rdev;
1035 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1036 qp->qplib_qp.pd = &pd->qplib_pd;
1037 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1038 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1039 if (qp->qplib_qp.type == IB_QPT_MAX) {
1040 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1041 qp->qplib_qp.type);
1042 rc = -EINVAL;
1043 goto fail;
1044 }
1045 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1046 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1047 IB_SIGNAL_ALL_WR) ? true : false);
1048
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001049 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1050 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1051 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1052
1053 if (qp_init_attr->send_cq) {
1054 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1055 ib_cq);
1056 if (!cq) {
1057 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1058 rc = -EINVAL;
1059 goto fail;
1060 }
1061 qp->qplib_qp.scq = &cq->qplib_cq;
1062 }
1063
1064 if (qp_init_attr->recv_cq) {
1065 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1066 ib_cq);
1067 if (!cq) {
1068 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1069 rc = -EINVAL;
1070 goto fail;
1071 }
1072 qp->qplib_qp.rcq = &cq->qplib_cq;
1073 }
1074
1075 if (qp_init_attr->srq) {
1076 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1077 rc = -ENOTSUPP;
1078 goto fail;
1079 } else {
1080 /* Allocate 1 more than what's provided so posting max doesn't
1081 * mean empty
1082 */
1083 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1084 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1085 dev_attr->max_qp_wqes + 1);
1086
Eddie Wai9152e0b2017-06-14 03:26:23 -07001087 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1088 qp_init_attr->cap.max_recv_wr;
1089
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001090 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1091 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1092 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1093 }
1094
1095 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1096
1097 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001098 /* Allocate 1 more than what's provided */
1099 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1100 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1101 dev_attr->max_qp_wqes + 1);
1102 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1103 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001104 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1105 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1106 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1107 qp->qplib_qp.sq.max_sge++;
1108 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1109 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1110
1111 qp->qplib_qp.rq_hdr_buf_size =
1112 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1113
1114 qp->qplib_qp.sq_hdr_buf_size =
1115 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1116 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1117 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1118 if (rc) {
1119 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1120 goto fail;
1121 }
1122 /* Create a shadow QP to handle the QP1 traffic */
1123 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1124 &qp->qplib_qp);
1125 if (!rdev->qp1_sqp) {
1126 rc = -EINVAL;
1127 dev_err(rdev_to_dev(rdev),
1128 "Failed to create Shadow QP for QP1");
1129 goto qp_destroy;
1130 }
1131 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1132 &qp->qplib_qp);
1133 if (!rdev->sqp_ah) {
1134 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1135 &rdev->qp1_sqp->qplib_qp);
1136 rc = -EINVAL;
1137 dev_err(rdev_to_dev(rdev),
1138 "Failed to create AH entry for ShadowQP");
1139 goto qp_destroy;
1140 }
1141
1142 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001143 /* Allocate 128 + 1 more than what's provided */
1144 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1145 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1146 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1147 dev_attr->max_qp_wqes +
1148 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1149 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1150
1151 /*
1152 * Reserving one slot for Phantom WQE. Application can
1153 * post one extra entry in this case. But allowing this to avoid
1154 * unexpected Queue full condition
1155 */
1156
1157 qp->qplib_qp.sq.q_full_delta -= 1;
1158
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001159 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1160 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1161 if (udata) {
1162 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1163 if (rc)
1164 goto fail;
1165 } else {
1166 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1167 }
1168
1169 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1170 if (rc) {
1171 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1172 goto fail;
1173 }
1174 }
1175
1176 qp->ib_qp.qp_num = qp->qplib_qp.id;
1177 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001178 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001179
1180 if (udata) {
1181 struct bnxt_re_qp_resp resp;
1182
1183 resp.qpid = qp->ib_qp.qp_num;
1184 resp.rsvd = 0;
1185 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1186 if (rc) {
1187 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1188 goto qp_destroy;
1189 }
1190 }
1191 INIT_LIST_HEAD(&qp->list);
1192 mutex_lock(&rdev->qp_lock);
1193 list_add_tail(&qp->list, &rdev->qp_list);
1194 atomic_inc(&rdev->qp_count);
1195 mutex_unlock(&rdev->qp_lock);
1196
1197 return &qp->ib_qp;
1198qp_destroy:
1199 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1200fail:
1201 kfree(qp);
1202 return ERR_PTR(rc);
1203}
1204
1205static u8 __from_ib_qp_state(enum ib_qp_state state)
1206{
1207 switch (state) {
1208 case IB_QPS_RESET:
1209 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1210 case IB_QPS_INIT:
1211 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1212 case IB_QPS_RTR:
1213 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1214 case IB_QPS_RTS:
1215 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1216 case IB_QPS_SQD:
1217 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1218 case IB_QPS_SQE:
1219 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1220 case IB_QPS_ERR:
1221 default:
1222 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1223 }
1224}
1225
1226static enum ib_qp_state __to_ib_qp_state(u8 state)
1227{
1228 switch (state) {
1229 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1230 return IB_QPS_RESET;
1231 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1232 return IB_QPS_INIT;
1233 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1234 return IB_QPS_RTR;
1235 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1236 return IB_QPS_RTS;
1237 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1238 return IB_QPS_SQD;
1239 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1240 return IB_QPS_SQE;
1241 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1242 default:
1243 return IB_QPS_ERR;
1244 }
1245}
1246
1247static u32 __from_ib_mtu(enum ib_mtu mtu)
1248{
1249 switch (mtu) {
1250 case IB_MTU_256:
1251 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1252 case IB_MTU_512:
1253 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1254 case IB_MTU_1024:
1255 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1256 case IB_MTU_2048:
1257 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1258 case IB_MTU_4096:
1259 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1260 default:
1261 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1262 }
1263}
1264
1265static enum ib_mtu __to_ib_mtu(u32 mtu)
1266{
1267 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1268 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1269 return IB_MTU_256;
1270 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1271 return IB_MTU_512;
1272 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1273 return IB_MTU_1024;
1274 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1275 return IB_MTU_2048;
1276 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1277 return IB_MTU_4096;
1278 default:
1279 return IB_MTU_2048;
1280 }
1281}
1282
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001283static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1284 struct bnxt_re_qp *qp1_qp,
1285 int qp_attr_mask)
1286{
1287 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1288 int rc = 0;
1289
1290 if (qp_attr_mask & IB_QP_STATE) {
1291 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1292 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1293 }
1294 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1295 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1296 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1297 }
1298
1299 if (qp_attr_mask & IB_QP_QKEY) {
1300 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1301 /* Using a Random QKEY */
1302 qp->qplib_qp.qkey = 0x81818181;
1303 }
1304 if (qp_attr_mask & IB_QP_SQ_PSN) {
1305 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1306 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1307 }
1308
1309 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1310 if (rc)
1311 dev_err(rdev_to_dev(rdev),
1312 "Failed to modify Shadow QP for QP1");
1313 return rc;
1314}
1315
1316int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1317 int qp_attr_mask, struct ib_udata *udata)
1318{
1319 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1320 struct bnxt_re_dev *rdev = qp->rdev;
1321 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1322 enum ib_qp_state curr_qp_state, new_qp_state;
1323 int rc, entries;
1324 int status;
1325 union ib_gid sgid;
1326 struct ib_gid_attr sgid_attr;
1327 u8 nw_type;
1328
1329 qp->qplib_qp.modify_flags = 0;
1330 if (qp_attr_mask & IB_QP_STATE) {
1331 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1332 new_qp_state = qp_attr->qp_state;
1333 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1334 ib_qp->qp_type, qp_attr_mask,
1335 IB_LINK_LAYER_ETHERNET)) {
1336 dev_err(rdev_to_dev(rdev),
1337 "Invalid attribute mask: %#x specified ",
1338 qp_attr_mask);
1339 dev_err(rdev_to_dev(rdev),
1340 "for qpn: %#x type: %#x",
1341 ib_qp->qp_num, ib_qp->qp_type);
1342 dev_err(rdev_to_dev(rdev),
1343 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1344 curr_qp_state, new_qp_state);
1345 return -EINVAL;
1346 }
1347 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1348 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
Selvin Xavierf218d672017-06-29 12:28:15 -07001349
1350 if (!qp->sumem &&
1351 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1352 dev_dbg(rdev_to_dev(rdev),
1353 "Move QP = %p to flush list\n",
1354 qp);
1355 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1356 }
1357 if (!qp->sumem &&
1358 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1359 dev_dbg(rdev_to_dev(rdev),
1360 "Move QP = %p out of flush list\n",
1361 qp);
1362 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1363 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001364 }
1365 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1366 qp->qplib_qp.modify_flags |=
1367 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1368 qp->qplib_qp.en_sqd_async_notify = true;
1369 }
1370 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1371 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1372 qp->qplib_qp.access =
1373 __from_ib_access_flags(qp_attr->qp_access_flags);
1374 /* LOCAL_WRITE access must be set to allow RC receive */
1375 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1376 }
1377 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1378 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1379 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1380 }
1381 if (qp_attr_mask & IB_QP_QKEY) {
1382 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1383 qp->qplib_qp.qkey = qp_attr->qkey;
1384 }
1385 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001386 const struct ib_global_route *grh =
1387 rdma_ah_read_grh(&qp_attr->ah_attr);
1388
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001389 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1390 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1391 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1392 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1393 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1394 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1395 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001396 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001397 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001398 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001399 /* If RoCE V2 is enabled, stack will have two entries for
1400 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1401 * the GID index by 2 for RoCE V2
1402 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001403 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1404 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1405 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1406 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1407 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001408 ether_addr_copy(qp->qplib_qp.ah.dmac,
1409 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001410
1411 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001412 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001413 &sgid, &sgid_attr);
1414 if (!status && sgid_attr.ndev) {
1415 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1416 ETH_ALEN);
1417 dev_put(sgid_attr.ndev);
1418 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1419 &sgid);
1420 switch (nw_type) {
1421 case RDMA_NETWORK_IPV4:
1422 qp->qplib_qp.nw_type =
1423 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1424 break;
1425 case RDMA_NETWORK_IPV6:
1426 qp->qplib_qp.nw_type =
1427 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1428 break;
1429 default:
1430 qp->qplib_qp.nw_type =
1431 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1432 break;
1433 }
1434 }
1435 }
1436
1437 if (qp_attr_mask & IB_QP_PATH_MTU) {
1438 qp->qplib_qp.modify_flags |=
1439 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1440 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1441 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1442 qp->qplib_qp.modify_flags |=
1443 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1444 qp->qplib_qp.path_mtu =
1445 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1446 }
1447
1448 if (qp_attr_mask & IB_QP_TIMEOUT) {
1449 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1450 qp->qplib_qp.timeout = qp_attr->timeout;
1451 }
1452 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1453 qp->qplib_qp.modify_flags |=
1454 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1455 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1456 }
1457 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1458 qp->qplib_qp.modify_flags |=
1459 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1460 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1461 }
1462 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1463 qp->qplib_qp.modify_flags |=
1464 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1465 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1466 }
1467 if (qp_attr_mask & IB_QP_RQ_PSN) {
1468 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1469 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1470 }
1471 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1472 qp->qplib_qp.modify_flags |=
1473 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001474 /* Cap the max_rd_atomic to device max */
1475 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1476 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001477 }
1478 if (qp_attr_mask & IB_QP_SQ_PSN) {
1479 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1480 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1481 }
1482 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001483 if (qp_attr->max_dest_rd_atomic >
1484 dev_attr->max_qp_init_rd_atom) {
1485 dev_err(rdev_to_dev(rdev),
1486 "max_dest_rd_atomic requested%d is > dev_max%d",
1487 qp_attr->max_dest_rd_atomic,
1488 dev_attr->max_qp_init_rd_atom);
1489 return -EINVAL;
1490 }
1491
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001492 qp->qplib_qp.modify_flags |=
1493 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1494 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1495 }
1496 if (qp_attr_mask & IB_QP_CAP) {
1497 qp->qplib_qp.modify_flags |=
1498 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1499 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1500 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1501 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1502 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1503 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1504 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1505 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1506 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1507 (qp_attr->cap.max_inline_data >=
1508 dev_attr->max_inline_data)) {
1509 dev_err(rdev_to_dev(rdev),
1510 "Create QP failed - max exceeded");
1511 return -EINVAL;
1512 }
1513 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1514 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1515 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001516 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1517 qp_attr->cap.max_send_wr;
1518 /*
1519 * Reserving one slot for Phantom WQE. Some application can
1520 * post one extra entry in this case. Allowing this to avoid
1521 * unexpected Queue full condition
1522 */
1523 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001524 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1525 if (qp->qplib_qp.rq.max_wqe) {
1526 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1527 qp->qplib_qp.rq.max_wqe =
1528 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001529 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1530 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001531 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1532 } else {
1533 /* SRQ was used prior, just ignore the RQ caps */
1534 }
1535 }
1536 if (qp_attr_mask & IB_QP_DEST_QPN) {
1537 qp->qplib_qp.modify_flags |=
1538 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1539 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1540 }
1541 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1542 if (rc) {
1543 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1544 return rc;
1545 }
1546 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1547 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1548 return rc;
1549}
1550
1551int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1552 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1553{
1554 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1555 struct bnxt_re_dev *rdev = qp->rdev;
1556 struct bnxt_qplib_qp qplib_qp;
1557 int rc;
1558
1559 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1560 qplib_qp.id = qp->qplib_qp.id;
1561 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1562
1563 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1564 if (rc) {
1565 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1566 return rc;
1567 }
1568 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1569 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1570 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1571 qp_attr->pkey_index = qplib_qp.pkey_index;
1572 qp_attr->qkey = qplib_qp.qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001573 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001574 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1575 qplib_qp.ah.host_sgid_index,
1576 qplib_qp.ah.hop_limit,
1577 qplib_qp.ah.traffic_class);
1578 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1579 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001580 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001581 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1582 qp_attr->timeout = qplib_qp.timeout;
1583 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1584 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1585 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1586 qp_attr->rq_psn = qplib_qp.rq.psn;
1587 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1588 qp_attr->sq_psn = qplib_qp.sq.psn;
1589 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1590 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1591 IB_SIGNAL_REQ_WR;
1592 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1593
1594 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1595 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1596 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1597 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1598 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1599 qp_init_attr->cap = qp_attr->cap;
1600
1601 return 0;
1602}
1603
1604/* Routine for sending QP1 packets for RoCE V1 an V2
1605 */
1606static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1607 struct ib_send_wr *wr,
1608 struct bnxt_qplib_swqe *wqe,
1609 int payload_size)
1610{
1611 struct ib_device *ibdev = &qp->rdev->ibdev;
1612 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1613 ib_ah);
1614 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1615 struct bnxt_qplib_sge sge;
1616 union ib_gid sgid;
1617 u8 nw_type;
1618 u16 ether_type;
1619 struct ib_gid_attr sgid_attr;
1620 union ib_gid dgid;
1621 bool is_eth = false;
1622 bool is_vlan = false;
1623 bool is_grh = false;
1624 bool is_udp = false;
1625 u8 ip_version = 0;
1626 u16 vlan_id = 0xFFFF;
1627 void *buf;
1628 int i, rc = 0, size;
1629
1630 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1631
1632 rc = ib_get_cached_gid(ibdev, 1,
1633 qplib_ah->host_sgid_index, &sgid,
1634 &sgid_attr);
1635 if (rc) {
1636 dev_err(rdev_to_dev(qp->rdev),
1637 "Failed to query gid at index %d",
1638 qplib_ah->host_sgid_index);
1639 return rc;
1640 }
1641 if (sgid_attr.ndev) {
1642 if (is_vlan_dev(sgid_attr.ndev))
1643 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1644 dev_put(sgid_attr.ndev);
1645 }
1646 /* Get network header type for this GID */
1647 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1648 switch (nw_type) {
1649 case RDMA_NETWORK_IPV4:
1650 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1651 break;
1652 case RDMA_NETWORK_IPV6:
1653 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1654 break;
1655 default:
1656 nw_type = BNXT_RE_ROCE_V1_PACKET;
1657 break;
1658 }
1659 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1660 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1661 if (is_udp) {
1662 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1663 ip_version = 4;
1664 ether_type = ETH_P_IP;
1665 } else {
1666 ip_version = 6;
1667 ether_type = ETH_P_IPV6;
1668 }
1669 is_grh = false;
1670 } else {
1671 ether_type = ETH_P_IBOE;
1672 is_grh = true;
1673 }
1674
1675 is_eth = true;
1676 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1677
1678 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1679 ip_version, is_udp, 0, &qp->qp1_hdr);
1680
1681 /* ETH */
1682 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1683 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1684
1685 /* For vlan, check the sgid for vlan existence */
1686
1687 if (!is_vlan) {
1688 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1689 } else {
1690 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1691 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1692 }
1693
1694 if (is_grh || (ip_version == 6)) {
1695 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1696 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1697 sizeof(sgid));
1698 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1699 }
1700
1701 if (ip_version == 4) {
1702 qp->qp1_hdr.ip4.tos = 0;
1703 qp->qp1_hdr.ip4.id = 0;
1704 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1705 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1706
1707 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1708 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1709 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1710 }
1711
1712 if (is_udp) {
1713 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1714 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1715 qp->qp1_hdr.udp.csum = 0;
1716 }
1717
1718 /* BTH */
1719 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1720 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1721 qp->qp1_hdr.immediate_present = 1;
1722 } else {
1723 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1724 }
1725 if (wr->send_flags & IB_SEND_SOLICITED)
1726 qp->qp1_hdr.bth.solicited_event = 1;
1727 /* pad_count */
1728 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1729
1730 /* P_key for QP1 is for all members */
1731 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1732 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1733 qp->qp1_hdr.bth.ack_req = 0;
1734 qp->send_psn++;
1735 qp->send_psn &= BTH_PSN_MASK;
1736 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1737 /* DETH */
1738 /* Use the priviledged Q_Key for QP1 */
1739 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1740 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1741
1742 /* Pack the QP1 to the transmit buffer */
1743 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1744 if (buf) {
1745 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1746 for (i = wqe->num_sge; i; i--) {
1747 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1748 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1749 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1750 }
1751
1752 /*
1753 * Max Header buf size for IPV6 RoCE V2 is 86,
1754 * which is same as the QP1 SQ header buffer.
1755 * Header buf size for IPV4 RoCE V2 can be 66.
1756 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1757 * Subtract 20 bytes from QP1 SQ header buf size
1758 */
1759 if (is_udp && ip_version == 4)
1760 sge.size -= 20;
1761 /*
1762 * Max Header buf size for RoCE V1 is 78.
1763 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1764 * Subtract 8 bytes from QP1 SQ header buf size
1765 */
1766 if (!is_udp)
1767 sge.size -= 8;
1768
1769 /* Subtract 4 bytes for non vlan packets */
1770 if (!is_vlan)
1771 sge.size -= 4;
1772
1773 wqe->sg_list[0].addr = sge.addr;
1774 wqe->sg_list[0].lkey = sge.lkey;
1775 wqe->sg_list[0].size = sge.size;
1776 wqe->num_sge++;
1777
1778 } else {
1779 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1780 rc = -ENOMEM;
1781 }
1782 return rc;
1783}
1784
1785/* For the MAD layer, it only provides the recv SGE the size of
1786 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1787 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1788 * receive packet (334 bytes) with no VLAN and then copy the GRH
1789 * and the MAD datagram out to the provided SGE.
1790 */
1791static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1792 struct ib_recv_wr *wr,
1793 struct bnxt_qplib_swqe *wqe,
1794 int payload_size)
1795{
1796 struct bnxt_qplib_sge ref, sge;
1797 u32 rq_prod_index;
1798 struct bnxt_re_sqp_entries *sqp_entry;
1799
1800 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1801
1802 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1803 return -ENOMEM;
1804
1805 /* Create 1 SGE to receive the entire
1806 * ethernet packet
1807 */
1808 /* Save the reference from ULP */
1809 ref.addr = wqe->sg_list[0].addr;
1810 ref.lkey = wqe->sg_list[0].lkey;
1811 ref.size = wqe->sg_list[0].size;
1812
1813 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1814
1815 /* SGE 1 */
1816 wqe->sg_list[0].addr = sge.addr;
1817 wqe->sg_list[0].lkey = sge.lkey;
1818 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1819 sge.size -= wqe->sg_list[0].size;
1820
1821 sqp_entry->sge.addr = ref.addr;
1822 sqp_entry->sge.lkey = ref.lkey;
1823 sqp_entry->sge.size = ref.size;
1824 /* Store the wrid for reporting completion */
1825 sqp_entry->wrid = wqe->wr_id;
1826 /* change the wqe->wrid to table index */
1827 wqe->wr_id = rq_prod_index;
1828 return 0;
1829}
1830
1831static int is_ud_qp(struct bnxt_re_qp *qp)
1832{
1833 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1834}
1835
1836static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1837 struct ib_send_wr *wr,
1838 struct bnxt_qplib_swqe *wqe)
1839{
1840 struct bnxt_re_ah *ah = NULL;
1841
1842 if (is_ud_qp(qp)) {
1843 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1844 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1845 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1846 wqe->send.avid = ah->qplib_ah.id;
1847 }
1848 switch (wr->opcode) {
1849 case IB_WR_SEND:
1850 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1851 break;
1852 case IB_WR_SEND_WITH_IMM:
1853 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1854 wqe->send.imm_data = wr->ex.imm_data;
1855 break;
1856 case IB_WR_SEND_WITH_INV:
1857 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1858 wqe->send.inv_key = wr->ex.invalidate_rkey;
1859 break;
1860 default:
1861 return -EINVAL;
1862 }
1863 if (wr->send_flags & IB_SEND_SIGNALED)
1864 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1865 if (wr->send_flags & IB_SEND_FENCE)
1866 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1867 if (wr->send_flags & IB_SEND_SOLICITED)
1868 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1869 if (wr->send_flags & IB_SEND_INLINE)
1870 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1871
1872 return 0;
1873}
1874
1875static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1876 struct bnxt_qplib_swqe *wqe)
1877{
1878 switch (wr->opcode) {
1879 case IB_WR_RDMA_WRITE:
1880 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1881 break;
1882 case IB_WR_RDMA_WRITE_WITH_IMM:
1883 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1884 wqe->rdma.imm_data = wr->ex.imm_data;
1885 break;
1886 case IB_WR_RDMA_READ:
1887 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1888 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1889 break;
1890 default:
1891 return -EINVAL;
1892 }
1893 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1894 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1895 if (wr->send_flags & IB_SEND_SIGNALED)
1896 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1897 if (wr->send_flags & IB_SEND_FENCE)
1898 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1899 if (wr->send_flags & IB_SEND_SOLICITED)
1900 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1901 if (wr->send_flags & IB_SEND_INLINE)
1902 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1903
1904 return 0;
1905}
1906
1907static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1908 struct bnxt_qplib_swqe *wqe)
1909{
1910 switch (wr->opcode) {
1911 case IB_WR_ATOMIC_CMP_AND_SWP:
1912 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1913 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1914 break;
1915 case IB_WR_ATOMIC_FETCH_AND_ADD:
1916 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1917 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1918 break;
1919 default:
1920 return -EINVAL;
1921 }
1922 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1923 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1924 if (wr->send_flags & IB_SEND_SIGNALED)
1925 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1926 if (wr->send_flags & IB_SEND_FENCE)
1927 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1928 if (wr->send_flags & IB_SEND_SOLICITED)
1929 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1930 return 0;
1931}
1932
1933static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1934 struct bnxt_qplib_swqe *wqe)
1935{
1936 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1937 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1938
1939 if (wr->send_flags & IB_SEND_SIGNALED)
1940 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1941 if (wr->send_flags & IB_SEND_FENCE)
1942 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1943 if (wr->send_flags & IB_SEND_SOLICITED)
1944 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1945
1946 return 0;
1947}
1948
1949static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1950 struct bnxt_qplib_swqe *wqe)
1951{
1952 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1953 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1954 int access = wr->access;
1955
1956 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1957 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1958 wqe->frmr.page_list = mr->pages;
1959 wqe->frmr.page_list_len = mr->npages;
1960 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1961 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1962
1963 if (wr->wr.send_flags & IB_SEND_FENCE)
1964 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1965 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1966 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1967
1968 if (access & IB_ACCESS_LOCAL_WRITE)
1969 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1970 if (access & IB_ACCESS_REMOTE_READ)
1971 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1972 if (access & IB_ACCESS_REMOTE_WRITE)
1973 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1974 if (access & IB_ACCESS_REMOTE_ATOMIC)
1975 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1976 if (access & IB_ACCESS_MW_BIND)
1977 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1978
1979 wqe->frmr.l_key = wr->key;
1980 wqe->frmr.length = wr->mr->length;
1981 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1982 wqe->frmr.va = wr->mr->iova;
1983 return 0;
1984}
1985
1986static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1987 struct ib_send_wr *wr,
1988 struct bnxt_qplib_swqe *wqe)
1989{
1990 /* Copy the inline data to the data field */
1991 u8 *in_data;
1992 u32 i, sge_len;
1993 void *sge_addr;
1994
1995 in_data = wqe->inline_data;
1996 for (i = 0; i < wr->num_sge; i++) {
1997 sge_addr = (void *)(unsigned long)
1998 wr->sg_list[i].addr;
1999 sge_len = wr->sg_list[i].length;
2000
2001 if ((sge_len + wqe->inline_len) >
2002 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2003 dev_err(rdev_to_dev(rdev),
2004 "Inline data size requested > supported value");
2005 return -EINVAL;
2006 }
2007 sge_len = wr->sg_list[i].length;
2008
2009 memcpy(in_data, sge_addr, sge_len);
2010 in_data += wr->sg_list[i].length;
2011 wqe->inline_len += wr->sg_list[i].length;
2012 }
2013 return wqe->inline_len;
2014}
2015
2016static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2017 struct ib_send_wr *wr,
2018 struct bnxt_qplib_swqe *wqe)
2019{
2020 int payload_sz = 0;
2021
2022 if (wr->send_flags & IB_SEND_INLINE)
2023 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2024 else
2025 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2026 wqe->num_sge);
2027
2028 return payload_sz;
2029}
2030
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002031static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2032{
2033 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2034 qp->ib_qp.qp_type == IB_QPT_GSI ||
2035 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2036 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2037 int qp_attr_mask;
2038 struct ib_qp_attr qp_attr;
2039
2040 qp_attr_mask = IB_QP_STATE;
2041 qp_attr.qp_state = IB_QPS_RTS;
2042 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2043 qp->qplib_qp.wqe_cnt = 0;
2044 }
2045}
2046
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002047static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2048 struct bnxt_re_qp *qp,
2049 struct ib_send_wr *wr)
2050{
2051 struct bnxt_qplib_swqe wqe;
2052 int rc = 0, payload_sz = 0;
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&qp->sq_lock, flags);
2056 memset(&wqe, 0, sizeof(wqe));
2057 while (wr) {
2058 /* House keeping */
2059 memset(&wqe, 0, sizeof(wqe));
2060
2061 /* Common */
2062 wqe.num_sge = wr->num_sge;
2063 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2064 dev_err(rdev_to_dev(rdev),
2065 "Limit exceeded for Send SGEs");
2066 rc = -EINVAL;
2067 goto bad;
2068 }
2069
2070 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2071 if (payload_sz < 0) {
2072 rc = -EINVAL;
2073 goto bad;
2074 }
2075 wqe.wr_id = wr->wr_id;
2076
2077 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2078
2079 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2080 if (!rc)
2081 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2082bad:
2083 if (rc) {
2084 dev_err(rdev_to_dev(rdev),
2085 "Post send failed opcode = %#x rc = %d",
2086 wr->opcode, rc);
2087 break;
2088 }
2089 wr = wr->next;
2090 }
2091 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002092 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002093 spin_unlock_irqrestore(&qp->sq_lock, flags);
2094 return rc;
2095}
2096
2097int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2098 struct ib_send_wr **bad_wr)
2099{
2100 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2101 struct bnxt_qplib_swqe wqe;
2102 int rc = 0, payload_sz = 0;
2103 unsigned long flags;
2104
2105 spin_lock_irqsave(&qp->sq_lock, flags);
2106 while (wr) {
2107 /* House keeping */
2108 memset(&wqe, 0, sizeof(wqe));
2109
2110 /* Common */
2111 wqe.num_sge = wr->num_sge;
2112 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2113 dev_err(rdev_to_dev(qp->rdev),
2114 "Limit exceeded for Send SGEs");
2115 rc = -EINVAL;
2116 goto bad;
2117 }
2118
2119 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2120 if (payload_sz < 0) {
2121 rc = -EINVAL;
2122 goto bad;
2123 }
2124 wqe.wr_id = wr->wr_id;
2125
2126 switch (wr->opcode) {
2127 case IB_WR_SEND:
2128 case IB_WR_SEND_WITH_IMM:
2129 if (ib_qp->qp_type == IB_QPT_GSI) {
2130 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2131 payload_sz);
2132 if (rc)
2133 goto bad;
2134 wqe.rawqp1.lflags |=
2135 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2136 }
2137 switch (wr->send_flags) {
2138 case IB_SEND_IP_CSUM:
2139 wqe.rawqp1.lflags |=
2140 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2141 break;
2142 default:
2143 break;
2144 }
2145 /* Fall thru to build the wqe */
2146 case IB_WR_SEND_WITH_INV:
2147 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2148 break;
2149 case IB_WR_RDMA_WRITE:
2150 case IB_WR_RDMA_WRITE_WITH_IMM:
2151 case IB_WR_RDMA_READ:
2152 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2153 break;
2154 case IB_WR_ATOMIC_CMP_AND_SWP:
2155 case IB_WR_ATOMIC_FETCH_AND_ADD:
2156 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2157 break;
2158 case IB_WR_RDMA_READ_WITH_INV:
2159 dev_err(rdev_to_dev(qp->rdev),
2160 "RDMA Read with Invalidate is not supported");
2161 rc = -EINVAL;
2162 goto bad;
2163 case IB_WR_LOCAL_INV:
2164 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2165 break;
2166 case IB_WR_REG_MR:
2167 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2168 break;
2169 default:
2170 /* Unsupported WRs */
2171 dev_err(rdev_to_dev(qp->rdev),
2172 "WR (%#x) is not supported", wr->opcode);
2173 rc = -EINVAL;
2174 goto bad;
2175 }
2176 if (!rc)
2177 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2178bad:
2179 if (rc) {
2180 dev_err(rdev_to_dev(qp->rdev),
2181 "post_send failed op:%#x qps = %#x rc = %d\n",
2182 wr->opcode, qp->qplib_qp.state, rc);
2183 *bad_wr = wr;
2184 break;
2185 }
2186 wr = wr->next;
2187 }
2188 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002189 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002190 spin_unlock_irqrestore(&qp->sq_lock, flags);
2191
2192 return rc;
2193}
2194
2195static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2196 struct bnxt_re_qp *qp,
2197 struct ib_recv_wr *wr)
2198{
2199 struct bnxt_qplib_swqe wqe;
2200 int rc = 0, payload_sz = 0;
2201
2202 memset(&wqe, 0, sizeof(wqe));
2203 while (wr) {
2204 /* House keeping */
2205 memset(&wqe, 0, sizeof(wqe));
2206
2207 /* Common */
2208 wqe.num_sge = wr->num_sge;
2209 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2210 dev_err(rdev_to_dev(rdev),
2211 "Limit exceeded for Receive SGEs");
2212 rc = -EINVAL;
2213 break;
2214 }
2215 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2216 wr->num_sge);
2217 wqe.wr_id = wr->wr_id;
2218 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2219
2220 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2221 if (rc)
2222 break;
2223
2224 wr = wr->next;
2225 }
2226 if (!rc)
2227 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2228 return rc;
2229}
2230
2231int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2232 struct ib_recv_wr **bad_wr)
2233{
2234 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2235 struct bnxt_qplib_swqe wqe;
2236 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002237 unsigned long flags;
2238 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002239
Devesh Sharma018cf592017-05-22 03:15:40 -07002240 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002241 while (wr) {
2242 /* House keeping */
2243 memset(&wqe, 0, sizeof(wqe));
2244
2245 /* Common */
2246 wqe.num_sge = wr->num_sge;
2247 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2248 dev_err(rdev_to_dev(qp->rdev),
2249 "Limit exceeded for Receive SGEs");
2250 rc = -EINVAL;
2251 *bad_wr = wr;
2252 break;
2253 }
2254
2255 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2256 wr->num_sge);
2257 wqe.wr_id = wr->wr_id;
2258 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2259
2260 if (ib_qp->qp_type == IB_QPT_GSI)
2261 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2262 payload_sz);
2263 if (!rc)
2264 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2265 if (rc) {
2266 *bad_wr = wr;
2267 break;
2268 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002269
2270 /* Ring DB if the RQEs posted reaches a threshold value */
2271 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2272 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2273 count = 0;
2274 }
2275
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002276 wr = wr->next;
2277 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002278
2279 if (count)
2280 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2281
2282 spin_unlock_irqrestore(&qp->rq_lock, flags);
2283
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002284 return rc;
2285}
2286
2287/* Completion Queues */
2288int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2289{
2290 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2291 struct bnxt_re_dev *rdev = cq->rdev;
2292 int rc;
2293
2294 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2295 if (rc) {
2296 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2297 return rc;
2298 }
Doug Ledford374cb862017-04-25 14:00:59 -04002299 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002300 ib_umem_release(cq->umem);
2301
2302 if (cq) {
2303 kfree(cq->cql);
2304 kfree(cq);
2305 }
2306 atomic_dec(&rdev->cq_count);
2307 rdev->nq.budget--;
2308 return 0;
2309}
2310
2311struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2312 const struct ib_cq_init_attr *attr,
2313 struct ib_ucontext *context,
2314 struct ib_udata *udata)
2315{
2316 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2317 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2318 struct bnxt_re_cq *cq = NULL;
2319 int rc, entries;
2320 int cqe = attr->cqe;
2321
2322 /* Validate CQ fields */
2323 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2324 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2325 return ERR_PTR(-EINVAL);
2326 }
2327 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2328 if (!cq)
2329 return ERR_PTR(-ENOMEM);
2330
2331 cq->rdev = rdev;
2332 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2333
2334 entries = roundup_pow_of_two(cqe + 1);
2335 if (entries > dev_attr->max_cq_wqes + 1)
2336 entries = dev_attr->max_cq_wqes + 1;
2337
2338 if (context) {
2339 struct bnxt_re_cq_req req;
2340 struct bnxt_re_ucontext *uctx = container_of
2341 (context,
2342 struct bnxt_re_ucontext,
2343 ib_uctx);
2344 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2345 rc = -EFAULT;
2346 goto fail;
2347 }
2348
2349 cq->umem = ib_umem_get(context, req.cq_va,
2350 entries * sizeof(struct cq_base),
2351 IB_ACCESS_LOCAL_WRITE, 1);
2352 if (IS_ERR(cq->umem)) {
2353 rc = PTR_ERR(cq->umem);
2354 goto fail;
2355 }
2356 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2357 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002358 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002359 } else {
2360 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2361 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2362 GFP_KERNEL);
2363 if (!cq->cql) {
2364 rc = -ENOMEM;
2365 goto fail;
2366 }
2367
2368 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2369 cq->qplib_cq.sghead = NULL;
2370 cq->qplib_cq.nmap = 0;
2371 }
2372 cq->qplib_cq.max_wqe = entries;
2373 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
Selvin Xavierf218d672017-06-29 12:28:15 -07002374 cq->qplib_cq.nq = &rdev->nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002375
2376 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2377 if (rc) {
2378 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2379 goto fail;
2380 }
2381
2382 cq->ib_cq.cqe = entries;
2383 cq->cq_period = cq->qplib_cq.period;
2384 rdev->nq.budget++;
2385
2386 atomic_inc(&rdev->cq_count);
2387
2388 if (context) {
2389 struct bnxt_re_cq_resp resp;
2390
2391 resp.cqid = cq->qplib_cq.id;
2392 resp.tail = cq->qplib_cq.hwq.cons;
2393 resp.phase = cq->qplib_cq.period;
2394 resp.rsvd = 0;
2395 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2396 if (rc) {
2397 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2398 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2399 goto c2fail;
2400 }
2401 }
2402
2403 return &cq->ib_cq;
2404
2405c2fail:
2406 if (context)
2407 ib_umem_release(cq->umem);
2408fail:
2409 kfree(cq->cql);
2410 kfree(cq);
2411 return ERR_PTR(rc);
2412}
2413
2414static u8 __req_to_ib_wc_status(u8 qstatus)
2415{
2416 switch (qstatus) {
2417 case CQ_REQ_STATUS_OK:
2418 return IB_WC_SUCCESS;
2419 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2420 return IB_WC_BAD_RESP_ERR;
2421 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2422 return IB_WC_LOC_LEN_ERR;
2423 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2424 return IB_WC_LOC_QP_OP_ERR;
2425 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2426 return IB_WC_LOC_PROT_ERR;
2427 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2428 return IB_WC_GENERAL_ERR;
2429 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2430 return IB_WC_REM_INV_REQ_ERR;
2431 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2432 return IB_WC_REM_ACCESS_ERR;
2433 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2434 return IB_WC_REM_OP_ERR;
2435 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2436 return IB_WC_RNR_RETRY_EXC_ERR;
2437 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2438 return IB_WC_RETRY_EXC_ERR;
2439 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2440 return IB_WC_WR_FLUSH_ERR;
2441 default:
2442 return IB_WC_GENERAL_ERR;
2443 }
2444 return 0;
2445}
2446
2447static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2448{
2449 switch (qstatus) {
2450 case CQ_RES_RAWETH_QP1_STATUS_OK:
2451 return IB_WC_SUCCESS;
2452 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2453 return IB_WC_LOC_ACCESS_ERR;
2454 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2455 return IB_WC_LOC_LEN_ERR;
2456 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2457 return IB_WC_LOC_PROT_ERR;
2458 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2459 return IB_WC_LOC_QP_OP_ERR;
2460 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2461 return IB_WC_GENERAL_ERR;
2462 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2463 return IB_WC_WR_FLUSH_ERR;
2464 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2465 return IB_WC_WR_FLUSH_ERR;
2466 default:
2467 return IB_WC_GENERAL_ERR;
2468 }
2469}
2470
2471static u8 __rc_to_ib_wc_status(u8 qstatus)
2472{
2473 switch (qstatus) {
2474 case CQ_RES_RC_STATUS_OK:
2475 return IB_WC_SUCCESS;
2476 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2477 return IB_WC_LOC_ACCESS_ERR;
2478 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2479 return IB_WC_LOC_LEN_ERR;
2480 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2481 return IB_WC_LOC_PROT_ERR;
2482 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2483 return IB_WC_LOC_QP_OP_ERR;
2484 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2485 return IB_WC_GENERAL_ERR;
2486 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2487 return IB_WC_REM_INV_REQ_ERR;
2488 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2489 return IB_WC_WR_FLUSH_ERR;
2490 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2491 return IB_WC_WR_FLUSH_ERR;
2492 default:
2493 return IB_WC_GENERAL_ERR;
2494 }
2495}
2496
2497static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2498{
2499 switch (cqe->type) {
2500 case BNXT_QPLIB_SWQE_TYPE_SEND:
2501 wc->opcode = IB_WC_SEND;
2502 break;
2503 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2504 wc->opcode = IB_WC_SEND;
2505 wc->wc_flags |= IB_WC_WITH_IMM;
2506 break;
2507 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2508 wc->opcode = IB_WC_SEND;
2509 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2510 break;
2511 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2512 wc->opcode = IB_WC_RDMA_WRITE;
2513 break;
2514 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2515 wc->opcode = IB_WC_RDMA_WRITE;
2516 wc->wc_flags |= IB_WC_WITH_IMM;
2517 break;
2518 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2519 wc->opcode = IB_WC_RDMA_READ;
2520 break;
2521 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2522 wc->opcode = IB_WC_COMP_SWAP;
2523 break;
2524 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2525 wc->opcode = IB_WC_FETCH_ADD;
2526 break;
2527 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2528 wc->opcode = IB_WC_LOCAL_INV;
2529 break;
2530 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2531 wc->opcode = IB_WC_REG_MR;
2532 break;
2533 default:
2534 wc->opcode = IB_WC_SEND;
2535 break;
2536 }
2537
2538 wc->status = __req_to_ib_wc_status(cqe->status);
2539}
2540
2541static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2542 u16 raweth_qp1_flags2)
2543{
2544 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2545
2546 /* raweth_qp1_flags Bit 9-6 indicates itype */
2547 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2548 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2549 return -1;
2550
2551 if (raweth_qp1_flags2 &
2552 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2553 raweth_qp1_flags2 &
2554 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2555 is_udp = true;
2556 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2557 (raweth_qp1_flags2 &
2558 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2559 (is_ipv6 = true) : (is_ipv4 = true);
2560 return ((is_ipv6) ?
2561 BNXT_RE_ROCEV2_IPV6_PACKET :
2562 BNXT_RE_ROCEV2_IPV4_PACKET);
2563 } else {
2564 return BNXT_RE_ROCE_V1_PACKET;
2565 }
2566}
2567
2568static int bnxt_re_to_ib_nw_type(int nw_type)
2569{
2570 u8 nw_hdr_type = 0xFF;
2571
2572 switch (nw_type) {
2573 case BNXT_RE_ROCE_V1_PACKET:
2574 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2575 break;
2576 case BNXT_RE_ROCEV2_IPV4_PACKET:
2577 nw_hdr_type = RDMA_NETWORK_IPV4;
2578 break;
2579 case BNXT_RE_ROCEV2_IPV6_PACKET:
2580 nw_hdr_type = RDMA_NETWORK_IPV6;
2581 break;
2582 }
2583 return nw_hdr_type;
2584}
2585
2586static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2587 void *rq_hdr_buf)
2588{
2589 u8 *tmp_buf = NULL;
2590 struct ethhdr *eth_hdr;
2591 u16 eth_type;
2592 bool rc = false;
2593
2594 tmp_buf = (u8 *)rq_hdr_buf;
2595 /*
2596 * If dest mac is not same as I/F mac, this could be a
2597 * loopback address or multicast address, check whether
2598 * it is a loopback packet
2599 */
2600 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2601 tmp_buf += 4;
2602 /* Check the ether type */
2603 eth_hdr = (struct ethhdr *)tmp_buf;
2604 eth_type = ntohs(eth_hdr->h_proto);
2605 switch (eth_type) {
2606 case ETH_P_IBOE:
2607 rc = true;
2608 break;
2609 case ETH_P_IP:
2610 case ETH_P_IPV6: {
2611 u32 len;
2612 struct udphdr *udp_hdr;
2613
2614 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2615 sizeof(struct ipv6hdr));
2616 tmp_buf += sizeof(struct ethhdr) + len;
2617 udp_hdr = (struct udphdr *)tmp_buf;
2618 if (ntohs(udp_hdr->dest) ==
2619 ROCE_V2_UDP_DPORT)
2620 rc = true;
2621 break;
2622 }
2623 default:
2624 break;
2625 }
2626 }
2627
2628 return rc;
2629}
2630
2631static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2632 struct bnxt_qplib_cqe *cqe)
2633{
2634 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2635 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2636 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2637 struct ib_send_wr *swr;
2638 struct ib_ud_wr udwr;
2639 struct ib_recv_wr rwr;
2640 int pkt_type = 0;
2641 u32 tbl_idx;
2642 void *rq_hdr_buf;
2643 dma_addr_t rq_hdr_buf_map;
2644 dma_addr_t shrq_hdr_buf_map;
2645 u32 offset = 0;
2646 u32 skip_bytes = 0;
2647 struct ib_sge s_sge[2];
2648 struct ib_sge r_sge[2];
2649 int rc;
2650
2651 memset(&udwr, 0, sizeof(udwr));
2652 memset(&rwr, 0, sizeof(rwr));
2653 memset(&s_sge, 0, sizeof(s_sge));
2654 memset(&r_sge, 0, sizeof(r_sge));
2655
2656 swr = &udwr.wr;
2657 tbl_idx = cqe->wr_id;
2658
2659 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2660 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2661 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2662 tbl_idx);
2663
2664 /* Shadow QP header buffer */
2665 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2666 tbl_idx);
2667 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2668
2669 /* Store this cqe */
2670 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2671 sqp_entry->qp1_qp = qp1_qp;
2672
2673 /* Find packet type from the cqe */
2674
2675 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2676 cqe->raweth_qp1_flags2);
2677 if (pkt_type < 0) {
2678 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2679 return -EINVAL;
2680 }
2681
2682 /* Adjust the offset for the user buffer and post in the rq */
2683
2684 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2685 offset = 20;
2686
2687 /*
2688 * QP1 loopback packet has 4 bytes of internal header before
2689 * ether header. Skip these four bytes.
2690 */
2691 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2692 skip_bytes = 4;
2693
2694 /* First send SGE . Skip the ether header*/
2695 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2696 + skip_bytes;
2697 s_sge[0].lkey = 0xFFFFFFFF;
2698 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2699 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2700
2701 /* Second Send SGE */
2702 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2703 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2704 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2705 s_sge[1].addr += 8;
2706 s_sge[1].lkey = 0xFFFFFFFF;
2707 s_sge[1].length = 256;
2708
2709 /* First recv SGE */
2710
2711 r_sge[0].addr = shrq_hdr_buf_map;
2712 r_sge[0].lkey = 0xFFFFFFFF;
2713 r_sge[0].length = 40;
2714
2715 r_sge[1].addr = sqp_entry->sge.addr + offset;
2716 r_sge[1].lkey = sqp_entry->sge.lkey;
2717 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2718
2719 /* Create receive work request */
2720 rwr.num_sge = 2;
2721 rwr.sg_list = r_sge;
2722 rwr.wr_id = tbl_idx;
2723 rwr.next = NULL;
2724
2725 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2726 if (rc) {
2727 dev_err(rdev_to_dev(rdev),
2728 "Failed to post Rx buffers to shadow QP");
2729 return -ENOMEM;
2730 }
2731
2732 swr->num_sge = 2;
2733 swr->sg_list = s_sge;
2734 swr->wr_id = tbl_idx;
2735 swr->opcode = IB_WR_SEND;
2736 swr->next = NULL;
2737
2738 udwr.ah = &rdev->sqp_ah->ib_ah;
2739 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2740 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2741
2742 /* post data received in the send queue */
2743 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2744
2745 return 0;
2746}
2747
2748static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2749 struct bnxt_qplib_cqe *cqe)
2750{
2751 wc->opcode = IB_WC_RECV;
2752 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2753 wc->wc_flags |= IB_WC_GRH;
2754}
2755
2756static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2757 struct bnxt_qplib_cqe *cqe)
2758{
2759 wc->opcode = IB_WC_RECV;
2760 wc->status = __rc_to_ib_wc_status(cqe->status);
2761
2762 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2763 wc->wc_flags |= IB_WC_WITH_IMM;
2764 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2765 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2766 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2767 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2768 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2769}
2770
2771static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2772 struct ib_wc *wc,
2773 struct bnxt_qplib_cqe *cqe)
2774{
2775 u32 tbl_idx;
2776 struct bnxt_re_dev *rdev = qp->rdev;
2777 struct bnxt_re_qp *qp1_qp = NULL;
2778 struct bnxt_qplib_cqe *orig_cqe = NULL;
2779 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2780 int nw_type;
2781
2782 tbl_idx = cqe->wr_id;
2783
2784 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2785 qp1_qp = sqp_entry->qp1_qp;
2786 orig_cqe = &sqp_entry->cqe;
2787
2788 wc->wr_id = sqp_entry->wrid;
2789 wc->byte_len = orig_cqe->length;
2790 wc->qp = &qp1_qp->ib_qp;
2791
2792 wc->ex.imm_data = orig_cqe->immdata;
2793 wc->src_qp = orig_cqe->src_qp;
2794 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2795 wc->port_num = 1;
2796 wc->vendor_err = orig_cqe->status;
2797
2798 wc->opcode = IB_WC_RECV;
2799 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2800 wc->wc_flags |= IB_WC_GRH;
2801
2802 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2803 orig_cqe->raweth_qp1_flags2);
2804 if (nw_type >= 0) {
2805 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2806 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2807 }
2808}
2809
2810static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2811 struct bnxt_qplib_cqe *cqe)
2812{
2813 wc->opcode = IB_WC_RECV;
2814 wc->status = __rc_to_ib_wc_status(cqe->status);
2815
2816 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2817 wc->wc_flags |= IB_WC_WITH_IMM;
2818 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2819 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2820 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2821 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2822 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2823}
2824
Eddie Wai9152e0b2017-06-14 03:26:23 -07002825static int send_phantom_wqe(struct bnxt_re_qp *qp)
2826{
2827 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2828 unsigned long flags;
2829 int rc = 0;
2830
2831 spin_lock_irqsave(&qp->sq_lock, flags);
2832
2833 rc = bnxt_re_bind_fence_mw(lib_qp);
2834 if (!rc) {
2835 lib_qp->sq.phantom_wqe_cnt++;
2836 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2837 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2838 lib_qp->id, lib_qp->sq.hwq.prod,
2839 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2840 lib_qp->sq.phantom_wqe_cnt);
2841 }
2842
2843 spin_unlock_irqrestore(&qp->sq_lock, flags);
2844 return rc;
2845}
2846
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002847int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2848{
2849 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2850 struct bnxt_re_qp *qp;
2851 struct bnxt_qplib_cqe *cqe;
2852 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002853 struct bnxt_qplib_q *sq;
2854 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002855 u32 tbl_idx;
2856 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2857 unsigned long flags;
2858
2859 spin_lock_irqsave(&cq->cq_lock, flags);
2860 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002861 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002862 if (!cq->cql) {
2863 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2864 goto exit;
2865 }
2866 cqe = &cq->cql[0];
2867 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002868 lib_qp = NULL;
2869 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2870 if (lib_qp) {
2871 sq = &lib_qp->sq;
2872 if (sq->send_phantom) {
2873 qp = container_of(lib_qp,
2874 struct bnxt_re_qp, qplib_qp);
2875 if (send_phantom_wqe(qp) == -ENOMEM)
2876 dev_err(rdev_to_dev(cq->rdev),
2877 "Phantom failed! Scheduled to send again\n");
2878 else
2879 sq->send_phantom = false;
2880 }
2881 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002882 if (ncqe < budget)
2883 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
2884 cqe + ncqe,
2885 budget - ncqe);
Eddie Wai9152e0b2017-06-14 03:26:23 -07002886
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002887 if (!ncqe)
2888 break;
2889
2890 for (i = 0; i < ncqe; i++, cqe++) {
2891 /* Transcribe each qplib_wqe back to ib_wc */
2892 memset(wc, 0, sizeof(*wc));
2893
2894 wc->wr_id = cqe->wr_id;
2895 wc->byte_len = cqe->length;
2896 qp = container_of
2897 ((struct bnxt_qplib_qp *)
2898 (unsigned long)(cqe->qp_handle),
2899 struct bnxt_re_qp, qplib_qp);
2900 if (!qp) {
2901 dev_err(rdev_to_dev(cq->rdev),
2902 "POLL CQ : bad QP handle");
2903 continue;
2904 }
2905 wc->qp = &qp->ib_qp;
2906 wc->ex.imm_data = cqe->immdata;
2907 wc->src_qp = cqe->src_qp;
2908 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2909 wc->port_num = 1;
2910 wc->vendor_err = cqe->status;
2911
2912 switch (cqe->opcode) {
2913 case CQ_BASE_CQE_TYPE_REQ:
2914 if (qp->qplib_qp.id ==
2915 qp->rdev->qp1_sqp->qplib_qp.id) {
2916 /* Handle this completion with
2917 * the stored completion
2918 */
2919 memset(wc, 0, sizeof(*wc));
2920 continue;
2921 }
2922 bnxt_re_process_req_wc(wc, cqe);
2923 break;
2924 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2925 if (!cqe->status) {
2926 int rc = 0;
2927
2928 rc = bnxt_re_process_raw_qp_pkt_rx
2929 (qp, cqe);
2930 if (!rc) {
2931 memset(wc, 0, sizeof(*wc));
2932 continue;
2933 }
2934 cqe->status = -1;
2935 }
2936 /* Errors need not be looped back.
2937 * But change the wr_id to the one
2938 * stored in the table
2939 */
2940 tbl_idx = cqe->wr_id;
2941 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2942 wc->wr_id = sqp_entry->wrid;
2943 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2944 break;
2945 case CQ_BASE_CQE_TYPE_RES_RC:
2946 bnxt_re_process_res_rc_wc(wc, cqe);
2947 break;
2948 case CQ_BASE_CQE_TYPE_RES_UD:
2949 if (qp->qplib_qp.id ==
2950 qp->rdev->qp1_sqp->qplib_qp.id) {
2951 /* Handle this completion with
2952 * the stored completion
2953 */
2954 if (cqe->status) {
2955 continue;
2956 } else {
2957 bnxt_re_process_res_shadow_qp_wc
2958 (qp, wc, cqe);
2959 break;
2960 }
2961 }
2962 bnxt_re_process_res_ud_wc(wc, cqe);
2963 break;
2964 default:
2965 dev_err(rdev_to_dev(cq->rdev),
2966 "POLL CQ : type 0x%x not handled",
2967 cqe->opcode);
2968 continue;
2969 }
2970 wc++;
2971 budget--;
2972 }
2973 }
2974exit:
2975 spin_unlock_irqrestore(&cq->cq_lock, flags);
2976 return num_entries - budget;
2977}
2978
2979int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2980 enum ib_cq_notify_flags ib_cqn_flags)
2981{
2982 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2983 int type = 0;
2984
2985 /* Trigger on the very next completion */
2986 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
2987 type = DBR_DBR_TYPE_CQ_ARMALL;
2988 /* Trigger on the next solicited completion */
2989 else if (ib_cqn_flags & IB_CQ_SOLICITED)
2990 type = DBR_DBR_TYPE_CQ_ARMSE;
2991
Selvin Xavier499e4562017-06-29 12:28:18 -07002992 /* Poll to see if there are missed events */
2993 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
2994 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
2995 return 1;
2996
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002997 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
2998
2999 return 0;
3000}
3001
3002/* Memory Regions */
3003struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3004{
3005 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3006 struct bnxt_re_dev *rdev = pd->rdev;
3007 struct bnxt_re_mr *mr;
3008 u64 pbl = 0;
3009 int rc;
3010
3011 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3012 if (!mr)
3013 return ERR_PTR(-ENOMEM);
3014
3015 mr->rdev = rdev;
3016 mr->qplib_mr.pd = &pd->qplib_pd;
3017 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3018 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3019
3020 /* Allocate and register 0 as the address */
3021 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3022 if (rc)
3023 goto fail;
3024
3025 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3026 mr->qplib_mr.total_size = -1; /* Infinte length */
3027 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3028 if (rc)
3029 goto fail_mr;
3030
3031 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3032 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3033 IB_ACCESS_REMOTE_ATOMIC))
3034 mr->ib_mr.rkey = mr->ib_mr.lkey;
3035 atomic_inc(&rdev->mr_count);
3036
3037 return &mr->ib_mr;
3038
3039fail_mr:
3040 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3041fail:
3042 kfree(mr);
3043 return ERR_PTR(rc);
3044}
3045
3046int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3047{
3048 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3049 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003050 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003051
Selvin Xavier1c980b02017-05-22 03:15:34 -07003052 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3053 if (rc) {
3054 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3055 return rc;
3056 }
3057
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003058 if (mr->npages && mr->pages) {
3059 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3060 &mr->qplib_frpl);
3061 kfree(mr->pages);
3062 mr->npages = 0;
3063 mr->pages = NULL;
3064 }
Doug Ledford374cb862017-04-25 14:00:59 -04003065 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003066 ib_umem_release(mr->ib_umem);
3067
3068 kfree(mr);
3069 atomic_dec(&rdev->mr_count);
3070 return rc;
3071}
3072
3073static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3074{
3075 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3076
3077 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3078 return -ENOMEM;
3079
3080 mr->pages[mr->npages++] = addr;
3081 return 0;
3082}
3083
3084int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3085 unsigned int *sg_offset)
3086{
3087 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3088
3089 mr->npages = 0;
3090 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3091}
3092
3093struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3094 u32 max_num_sg)
3095{
3096 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3097 struct bnxt_re_dev *rdev = pd->rdev;
3098 struct bnxt_re_mr *mr = NULL;
3099 int rc;
3100
3101 if (type != IB_MR_TYPE_MEM_REG) {
3102 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3103 return ERR_PTR(-EINVAL);
3104 }
3105 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3106 return ERR_PTR(-EINVAL);
3107
3108 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3109 if (!mr)
3110 return ERR_PTR(-ENOMEM);
3111
3112 mr->rdev = rdev;
3113 mr->qplib_mr.pd = &pd->qplib_pd;
3114 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3115 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3116
3117 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3118 if (rc)
3119 goto fail;
3120
3121 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3122 mr->ib_mr.rkey = mr->ib_mr.lkey;
3123
3124 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3125 if (!mr->pages) {
3126 rc = -ENOMEM;
3127 goto fail;
3128 }
3129 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3130 &mr->qplib_frpl, max_num_sg);
3131 if (rc) {
3132 dev_err(rdev_to_dev(rdev),
3133 "Failed to allocate HW FR page list");
3134 goto fail_mr;
3135 }
3136
3137 atomic_inc(&rdev->mr_count);
3138 return &mr->ib_mr;
3139
3140fail_mr:
3141 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3142fail:
3143 kfree(mr->pages);
3144 kfree(mr);
3145 return ERR_PTR(rc);
3146}
3147
Eddie Wai9152e0b2017-06-14 03:26:23 -07003148struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3149 struct ib_udata *udata)
3150{
3151 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3152 struct bnxt_re_dev *rdev = pd->rdev;
3153 struct bnxt_re_mw *mw;
3154 int rc;
3155
3156 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3157 if (!mw)
3158 return ERR_PTR(-ENOMEM);
3159 mw->rdev = rdev;
3160 mw->qplib_mw.pd = &pd->qplib_pd;
3161
3162 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3163 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3164 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3165 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3166 if (rc) {
3167 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3168 goto fail;
3169 }
3170 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3171
3172 atomic_inc(&rdev->mw_count);
3173 return &mw->ib_mw;
3174
3175fail:
3176 kfree(mw);
3177 return ERR_PTR(rc);
3178}
3179
3180int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3181{
3182 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3183 struct bnxt_re_dev *rdev = mw->rdev;
3184 int rc;
3185
3186 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3187 if (rc) {
3188 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3189 return rc;
3190 }
3191
3192 kfree(mw);
3193 atomic_dec(&rdev->mw_count);
3194 return rc;
3195}
3196
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003197/* uverbs */
3198struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3199 u64 virt_addr, int mr_access_flags,
3200 struct ib_udata *udata)
3201{
3202 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3203 struct bnxt_re_dev *rdev = pd->rdev;
3204 struct bnxt_re_mr *mr;
3205 struct ib_umem *umem;
3206 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003207 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003208 struct scatterlist *sg;
3209 int entry;
3210
Selvin Xavier58d4a672017-06-29 12:28:12 -07003211 if (length > BNXT_RE_MAX_MR_SIZE) {
3212 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3213 length, BNXT_RE_MAX_MR_SIZE);
3214 return ERR_PTR(-ENOMEM);
3215 }
3216
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003217 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3218 if (!mr)
3219 return ERR_PTR(-ENOMEM);
3220
3221 mr->rdev = rdev;
3222 mr->qplib_mr.pd = &pd->qplib_pd;
3223 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3224 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3225
3226 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3227 mr_access_flags, 0);
3228 if (IS_ERR(umem)) {
3229 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3230 rc = -EFAULT;
3231 goto free_mr;
3232 }
3233 mr->ib_umem = umem;
3234
3235 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3236 if (rc) {
3237 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3238 goto release_umem;
3239 }
3240 /* The fixed portion of the rkey is the same as the lkey */
3241 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3242
3243 mr->qplib_mr.va = virt_addr;
3244 umem_pgs = ib_umem_page_count(umem);
3245 if (!umem_pgs) {
3246 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3247 rc = -EINVAL;
3248 goto free_mrw;
3249 }
3250 mr->qplib_mr.total_size = length;
3251
3252 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3253 if (!pbl_tbl) {
3254 rc = -EINVAL;
3255 goto free_mrw;
3256 }
3257 pbl_tbl_orig = pbl_tbl;
3258
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003259 if (umem->hugetlb) {
3260 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3261 rc = -EFAULT;
3262 goto fail;
3263 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003264
3265 if (umem->page_shift != PAGE_SHIFT) {
3266 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003267 rc = -EFAULT;
3268 goto fail;
3269 }
3270 /* Map umem buf ptrs to the PBL */
3271 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003272 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003273 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003274 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003275 }
3276 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3277 umem_pgs, false);
3278 if (rc) {
3279 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3280 goto fail;
3281 }
3282
3283 kfree(pbl_tbl_orig);
3284
3285 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3286 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3287 atomic_inc(&rdev->mr_count);
3288
3289 return &mr->ib_mr;
3290fail:
3291 kfree(pbl_tbl_orig);
3292free_mrw:
3293 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3294release_umem:
3295 ib_umem_release(umem);
3296free_mr:
3297 kfree(mr);
3298 return ERR_PTR(rc);
3299}
3300
3301struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3302 struct ib_udata *udata)
3303{
3304 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3305 struct bnxt_re_uctx_resp resp;
3306 struct bnxt_re_ucontext *uctx;
3307 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3308 int rc;
3309
3310 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3311 ibdev->uverbs_abi_ver);
3312
3313 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3314 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3315 BNXT_RE_ABI_VERSION);
3316 return ERR_PTR(-EPERM);
3317 }
3318
3319 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3320 if (!uctx)
3321 return ERR_PTR(-ENOMEM);
3322
3323 uctx->rdev = rdev;
3324
3325 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3326 if (!uctx->shpg) {
3327 rc = -ENOMEM;
3328 goto fail;
3329 }
3330 spin_lock_init(&uctx->sh_lock);
3331
3332 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3333 resp.max_qp = rdev->qplib_ctx.qpc_count;
3334 resp.pg_size = PAGE_SIZE;
3335 resp.cqe_sz = sizeof(struct cq_base);
3336 resp.max_cqd = dev_attr->max_cq_wqes;
3337 resp.rsvd = 0;
3338
3339 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3340 if (rc) {
3341 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3342 rc = -EFAULT;
3343 goto cfail;
3344 }
3345
3346 return &uctx->ib_uctx;
3347cfail:
3348 free_page((unsigned long)uctx->shpg);
3349 uctx->shpg = NULL;
3350fail:
3351 kfree(uctx);
3352 return ERR_PTR(rc);
3353}
3354
3355int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3356{
3357 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3358 struct bnxt_re_ucontext,
3359 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003360
3361 struct bnxt_re_dev *rdev = uctx->rdev;
3362 int rc = 0;
3363
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003364 if (uctx->shpg)
3365 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003366
3367 if (uctx->dpi.dbr) {
3368 /* Free DPI only if this is the first PD allocated by the
3369 * application and mark the context dpi as NULL
3370 */
3371 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3372 &rdev->qplib_res.dpi_tbl,
3373 &uctx->dpi);
3374 if (rc)
3375 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3376 /* Don't fail, continue*/
3377 uctx->dpi.dbr = NULL;
3378 }
3379
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003380 kfree(uctx);
3381 return 0;
3382}
3383
3384/* Helper function to mmap the virtual memory from user app */
3385int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3386{
3387 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3388 struct bnxt_re_ucontext,
3389 ib_uctx);
3390 struct bnxt_re_dev *rdev = uctx->rdev;
3391 u64 pfn;
3392
3393 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3394 return -EINVAL;
3395
3396 if (vma->vm_pgoff) {
3397 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3398 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3399 PAGE_SIZE, vma->vm_page_prot)) {
3400 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3401 return -EAGAIN;
3402 }
3403 } else {
3404 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3405 if (remap_pfn_range(vma, vma->vm_start,
3406 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3407 dev_err(rdev_to_dev(rdev),
3408 "Failed to map shared page");
3409 return -EAGAIN;
3410 }
3411 }
3412
3413 return 0;
3414}