blob: b10e1a6dce847e061f76d6001e3a1e3b87b22d01 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800226/* Port */
227int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229{
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP |
248 IB_PORT_IP_BASED_GIDS;
249
250 /* Max MSG size set to 2G for now */
251 port_attr->max_msg_sz = 0x80000000;
252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
262 /* call the underlying netdev's ethtool hooks to query speed settings
263 * for which we acquire rtnl_lock _only_ if it's registered with
264 * IB stack to avoid race in the NETDEV_UNREG path
265 */
266 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
Yuval Shaiad4186192017-06-14 23:13:34 +0300267 if (!ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
268 &port_attr->active_width))
269 return -EINVAL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800270 return 0;
271}
272
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800273int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
274 struct ib_port_immutable *immutable)
275{
276 struct ib_port_attr port_attr;
277
278 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
279 return -EINVAL;
280
281 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
282 immutable->gid_tbl_len = port_attr.gid_tbl_len;
283 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
284 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
285 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
286 return 0;
287}
288
289int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
290 u16 index, u16 *pkey)
291{
292 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
293
294 /* Ignore port_num */
295
296 memset(pkey, 0, sizeof(*pkey));
297 return bnxt_qplib_get_pkey(&rdev->qplib_res,
298 &rdev->qplib_res.pkey_tbl, index, pkey);
299}
300
301int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
302 int index, union ib_gid *gid)
303{
304 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
305 int rc = 0;
306
307 /* Ignore port_num */
308 memset(gid, 0, sizeof(*gid));
309 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
310 &rdev->qplib_res.sgid_tbl, index,
311 (struct bnxt_qplib_gid *)gid);
312 return rc;
313}
314
315int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
316 unsigned int index, void **context)
317{
318 int rc = 0;
319 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
321 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
322
323 /* Delete the entry from the hardware */
324 ctx = *context;
325 if (!ctx)
326 return -EINVAL;
327
328 if (sgid_tbl && sgid_tbl->active) {
329 if (ctx->idx >= sgid_tbl->max)
330 return -EINVAL;
331 ctx->refcnt--;
332 if (!ctx->refcnt) {
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700333 rc = bnxt_qplib_del_sgid(sgid_tbl,
334 &sgid_tbl->tbl[ctx->idx],
335 true);
336 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800337 dev_err(rdev_to_dev(rdev),
338 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700339 } else {
340 ctx_tbl = sgid_tbl->ctx;
341 ctx_tbl[ctx->idx] = NULL;
342 kfree(ctx);
343 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800344 }
345 } else {
346 return -EINVAL;
347 }
348 return rc;
349}
350
351int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
352 unsigned int index, const union ib_gid *gid,
353 const struct ib_gid_attr *attr, void **context)
354{
355 int rc;
356 u32 tbl_idx = 0;
357 u16 vlan_id = 0xFFFF;
358 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
359 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
360 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
361
362 if ((attr->ndev) && is_vlan_dev(attr->ndev))
363 vlan_id = vlan_dev_vlan_id(attr->ndev);
364
365 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
366 rdev->qplib_res.netdev->dev_addr,
367 vlan_id, true, &tbl_idx);
368 if (rc == -EALREADY) {
369 ctx_tbl = sgid_tbl->ctx;
370 ctx_tbl[tbl_idx]->refcnt++;
371 *context = ctx_tbl[tbl_idx];
372 return 0;
373 }
374
375 if (rc < 0) {
376 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
377 return rc;
378 }
379
380 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
381 if (!ctx)
382 return -ENOMEM;
383 ctx_tbl = sgid_tbl->ctx;
384 ctx->idx = tbl_idx;
385 ctx->refcnt = 1;
386 ctx_tbl[tbl_idx] = ctx;
387
388 return rc;
389}
390
391enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
392 u8 port_num)
393{
394 return IB_LINK_LAYER_ETHERNET;
395}
396
Eddie Wai9152e0b2017-06-14 03:26:23 -0700397#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
398
399static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
400{
401 struct bnxt_re_fence_data *fence = &pd->fence;
402 struct ib_mr *ib_mr = &fence->mr->ib_mr;
403 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404
405 memset(wqe, 0, sizeof(*wqe));
406 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
407 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
409 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
410 wqe->bind.zero_based = false;
411 wqe->bind.parent_l_key = ib_mr->lkey;
412 wqe->bind.va = (u64)(unsigned long)fence->va;
413 wqe->bind.length = fence->size;
414 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
415 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
416
417 /* Save the initial rkey in fence structure for now;
418 * wqe->bind.r_key will be set at (re)bind time.
419 */
420 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
421}
422
423static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
424{
425 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
426 qplib_qp);
427 struct ib_pd *ib_pd = qp->ib_qp.pd;
428 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
429 struct bnxt_re_fence_data *fence = &pd->fence;
430 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
431 struct bnxt_qplib_swqe wqe;
432 int rc;
433
434 memcpy(&wqe, fence_wqe, sizeof(wqe));
435 wqe.bind.r_key = fence->bind_rkey;
436 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
437
438 dev_dbg(rdev_to_dev(qp->rdev),
439 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
440 wqe.bind.r_key, qp->qplib_qp.id, pd);
441 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
442 if (rc) {
443 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
444 return rc;
445 }
446 bnxt_qplib_post_send_db(&qp->qplib_qp);
447
448 return rc;
449}
450
451static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
452{
453 struct bnxt_re_fence_data *fence = &pd->fence;
454 struct bnxt_re_dev *rdev = pd->rdev;
455 struct device *dev = &rdev->en_dev->pdev->dev;
456 struct bnxt_re_mr *mr = fence->mr;
457
458 if (fence->mw) {
459 bnxt_re_dealloc_mw(fence->mw);
460 fence->mw = NULL;
461 }
462 if (mr) {
463 if (mr->ib_mr.rkey)
464 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
465 true);
466 if (mr->ib_mr.lkey)
467 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
468 kfree(mr);
469 fence->mr = NULL;
470 }
471 if (fence->dma_addr) {
472 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
473 DMA_BIDIRECTIONAL);
474 fence->dma_addr = 0;
475 }
476}
477
478static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
479{
480 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
481 struct bnxt_re_fence_data *fence = &pd->fence;
482 struct bnxt_re_dev *rdev = pd->rdev;
483 struct device *dev = &rdev->en_dev->pdev->dev;
484 struct bnxt_re_mr *mr = NULL;
485 dma_addr_t dma_addr = 0;
486 struct ib_mw *mw;
487 u64 pbl_tbl;
488 int rc;
489
490 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
491 DMA_BIDIRECTIONAL);
492 rc = dma_mapping_error(dev, dma_addr);
493 if (rc) {
494 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
495 rc = -EIO;
496 fence->dma_addr = 0;
497 goto fail;
498 }
499 fence->dma_addr = dma_addr;
500
501 /* Allocate a MR */
502 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
503 if (!mr) {
504 rc = -ENOMEM;
505 goto fail;
506 }
507 fence->mr = mr;
508 mr->rdev = rdev;
509 mr->qplib_mr.pd = &pd->qplib_pd;
510 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
511 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
512 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
513 if (rc) {
514 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
515 goto fail;
516 }
517
518 /* Register MR */
519 mr->ib_mr.lkey = mr->qplib_mr.lkey;
520 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
521 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
522 pbl_tbl = dma_addr;
523 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
524 BNXT_RE_FENCE_PBL_SIZE, false);
525 if (rc) {
526 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
527 goto fail;
528 }
529 mr->ib_mr.rkey = mr->qplib_mr.rkey;
530
531 /* Create a fence MW only for kernel consumers */
532 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300533 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700534 dev_err(rdev_to_dev(rdev),
535 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300536 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700537 goto fail;
538 }
539 fence->mw = mw;
540
541 bnxt_re_create_fence_wqe(pd);
542 return 0;
543
544fail:
545 bnxt_re_destroy_fence_mr(pd);
546 return rc;
547}
548
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800549/* Protection Domains */
550int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
551{
552 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
553 struct bnxt_re_dev *rdev = pd->rdev;
554 int rc;
555
Eddie Wai9152e0b2017-06-14 03:26:23 -0700556 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800557
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700558 if (pd->qplib_pd.id) {
559 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
560 &rdev->qplib_res.pd_tbl,
561 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800562 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700563 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800564 }
565
566 kfree(pd);
567 return 0;
568}
569
570struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
571 struct ib_ucontext *ucontext,
572 struct ib_udata *udata)
573{
574 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
575 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
576 struct bnxt_re_ucontext,
577 ib_uctx);
578 struct bnxt_re_pd *pd;
579 int rc;
580
581 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
582 if (!pd)
583 return ERR_PTR(-ENOMEM);
584
585 pd->rdev = rdev;
586 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
587 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
588 rc = -ENOMEM;
589 goto fail;
590 }
591
592 if (udata) {
593 struct bnxt_re_pd_resp resp;
594
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700595 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800596 /* Allocate DPI in alloc_pd to avoid failing of
597 * ibv_devinfo and family of application when DPIs
598 * are depleted.
599 */
600 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700601 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800602 rc = -ENOMEM;
603 goto dbfail;
604 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800605 }
606
607 resp.pdid = pd->qplib_pd.id;
608 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700609 resp.dpi = ucntx->dpi.dpi;
610 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800611
612 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
613 if (rc) {
614 dev_err(rdev_to_dev(rdev),
615 "Failed to copy user response\n");
616 goto dbfail;
617 }
618 }
619
Eddie Wai9152e0b2017-06-14 03:26:23 -0700620 if (!udata)
621 if (bnxt_re_create_fence_mr(pd))
622 dev_warn(rdev_to_dev(rdev),
623 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800624 return &pd->ib_pd;
625dbfail:
626 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
627 &pd->qplib_pd);
628fail:
629 kfree(pd);
630 return ERR_PTR(rc);
631}
632
633/* Address Handles */
634int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
635{
636 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
637 struct bnxt_re_dev *rdev = ah->rdev;
638 int rc;
639
640 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
641 if (rc) {
642 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
643 return rc;
644 }
645 kfree(ah);
646 return 0;
647}
648
649struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400650 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800651 struct ib_udata *udata)
652{
653 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
654 struct bnxt_re_dev *rdev = pd->rdev;
655 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400656 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800657 int rc;
658 u16 vlan_tag;
659 u8 nw_type;
660
661 struct ib_gid_attr sgid_attr;
662
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400663 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800664 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
665 return ERR_PTR(-EINVAL);
666 }
667 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
668 if (!ah)
669 return ERR_PTR(-ENOMEM);
670
671 ah->rdev = rdev;
672 ah->qplib_ah.pd = &pd->qplib_pd;
673
674 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400675 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800676 sizeof(union ib_gid));
677 /*
678 * If RoCE V2 is enabled, stack will have two entries for
679 * each GID entry. Avoiding this duplicte entry in HW. Dividing
680 * the GID index by 2 for RoCE V2
681 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400682 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
683 ah->qplib_ah.host_sgid_index = grh->sgid_index;
684 ah->qplib_ah.traffic_class = grh->traffic_class;
685 ah->qplib_ah.flow_label = grh->flow_label;
686 ah->qplib_ah.hop_limit = grh->hop_limit;
687 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800688 if (ib_pd->uobject &&
689 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400690 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800691 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400692 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800693 union ib_gid sgid;
694
695 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400696 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800697 &sgid_attr);
698 if (rc) {
699 dev_err(rdev_to_dev(rdev),
700 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400701 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800702 goto fail;
703 }
704 if (sgid_attr.ndev) {
705 if (is_vlan_dev(sgid_attr.ndev))
706 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
707 dev_put(sgid_attr.ndev);
708 }
709 /* Get network header type for this GID */
710 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
711 switch (nw_type) {
712 case RDMA_NETWORK_IPV4:
713 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
714 break;
715 case RDMA_NETWORK_IPV6:
716 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
717 break;
718 default:
719 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
720 break;
721 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400722 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400723 ah_attr->roce.dmac, &vlan_tag,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800724 &sgid_attr.ndev->ifindex,
725 NULL);
726 if (rc) {
727 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
728 goto fail;
729 }
730 }
731
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400732 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800733 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
734 if (rc) {
735 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
736 goto fail;
737 }
738
739 /* Write AVID to shared page. */
740 if (ib_pd->uobject) {
741 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
742 struct bnxt_re_ucontext *uctx;
743 unsigned long flag;
744 u32 *wrptr;
745
746 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
747 spin_lock_irqsave(&uctx->sh_lock, flag);
748 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
749 *wrptr = ah->qplib_ah.id;
750 wmb(); /* make sure cache is updated. */
751 spin_unlock_irqrestore(&uctx->sh_lock, flag);
752 }
753
754 return &ah->ib_ah;
755
756fail:
757 kfree(ah);
758 return ERR_PTR(rc);
759}
760
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400761int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800762{
763 return 0;
764}
765
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400766int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800767{
768 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
769
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400770 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400771 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400772 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400773 rdma_ah_set_grh(ah_attr, NULL, 0,
774 ah->qplib_ah.host_sgid_index,
775 0, ah->qplib_ah.traffic_class);
776 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
777 rdma_ah_set_port_num(ah_attr, 1);
778 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800779 return 0;
780}
781
782/* Queue Pairs */
783int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
784{
785 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
786 struct bnxt_re_dev *rdev = qp->rdev;
787 int rc;
788
789 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
790 if (rc) {
791 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
792 return rc;
793 }
794 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
795 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
796 &rdev->sqp_ah->qplib_ah);
797 if (rc) {
798 dev_err(rdev_to_dev(rdev),
799 "Failed to destroy HW AH for shadow QP");
800 return rc;
801 }
802
803 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
804 &rdev->qp1_sqp->qplib_qp);
805 if (rc) {
806 dev_err(rdev_to_dev(rdev),
807 "Failed to destroy Shadow QP");
808 return rc;
809 }
810 mutex_lock(&rdev->qp_lock);
811 list_del(&rdev->qp1_sqp->list);
812 atomic_dec(&rdev->qp_count);
813 mutex_unlock(&rdev->qp_lock);
814
815 kfree(rdev->sqp_ah);
816 kfree(rdev->qp1_sqp);
817 }
818
Doug Ledford374cb862017-04-25 14:00:59 -0400819 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800820 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400821 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800822 ib_umem_release(qp->sumem);
823
824 mutex_lock(&rdev->qp_lock);
825 list_del(&qp->list);
826 atomic_dec(&rdev->qp_count);
827 mutex_unlock(&rdev->qp_lock);
828 kfree(qp);
829 return 0;
830}
831
832static u8 __from_ib_qp_type(enum ib_qp_type type)
833{
834 switch (type) {
835 case IB_QPT_GSI:
836 return CMDQ_CREATE_QP1_TYPE_GSI;
837 case IB_QPT_RC:
838 return CMDQ_CREATE_QP_TYPE_RC;
839 case IB_QPT_UD:
840 return CMDQ_CREATE_QP_TYPE_UD;
841 default:
842 return IB_QPT_MAX;
843 }
844}
845
846static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
847 struct bnxt_re_qp *qp, struct ib_udata *udata)
848{
849 struct bnxt_re_qp_req ureq;
850 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
851 struct ib_umem *umem;
852 int bytes = 0;
853 struct ib_ucontext *context = pd->ib_pd.uobject->context;
854 struct bnxt_re_ucontext *cntx = container_of(context,
855 struct bnxt_re_ucontext,
856 ib_uctx);
857 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
858 return -EFAULT;
859
860 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
861 /* Consider mapping PSN search memory only for RC QPs. */
862 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
863 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
864 bytes = PAGE_ALIGN(bytes);
865 umem = ib_umem_get(context, ureq.qpsva, bytes,
866 IB_ACCESS_LOCAL_WRITE, 1);
867 if (IS_ERR(umem))
868 return PTR_ERR(umem);
869
870 qp->sumem = umem;
871 qplib_qp->sq.sglist = umem->sg_head.sgl;
872 qplib_qp->sq.nmap = umem->nmap;
873 qplib_qp->qp_handle = ureq.qp_handle;
874
875 if (!qp->qplib_qp.srq) {
876 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
877 bytes = PAGE_ALIGN(bytes);
878 umem = ib_umem_get(context, ureq.qprva, bytes,
879 IB_ACCESS_LOCAL_WRITE, 1);
880 if (IS_ERR(umem))
881 goto rqfail;
882 qp->rumem = umem;
883 qplib_qp->rq.sglist = umem->sg_head.sgl;
884 qplib_qp->rq.nmap = umem->nmap;
885 }
886
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700887 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800888 return 0;
889rqfail:
890 ib_umem_release(qp->sumem);
891 qp->sumem = NULL;
892 qplib_qp->sq.sglist = NULL;
893 qplib_qp->sq.nmap = 0;
894
895 return PTR_ERR(umem);
896}
897
898static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
899 (struct bnxt_re_pd *pd,
900 struct bnxt_qplib_res *qp1_res,
901 struct bnxt_qplib_qp *qp1_qp)
902{
903 struct bnxt_re_dev *rdev = pd->rdev;
904 struct bnxt_re_ah *ah;
905 union ib_gid sgid;
906 int rc;
907
908 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
909 if (!ah)
910 return NULL;
911
912 memset(ah, 0, sizeof(*ah));
913 ah->rdev = rdev;
914 ah->qplib_ah.pd = &pd->qplib_pd;
915
916 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
917 if (rc)
918 goto fail;
919
920 /* supply the dgid data same as sgid */
921 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
922 sizeof(union ib_gid));
923 ah->qplib_ah.sgid_index = 0;
924
925 ah->qplib_ah.traffic_class = 0;
926 ah->qplib_ah.flow_label = 0;
927 ah->qplib_ah.hop_limit = 1;
928 ah->qplib_ah.sl = 0;
929 /* Have DMAC same as SMAC */
930 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
931
932 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
933 if (rc) {
934 dev_err(rdev_to_dev(rdev),
935 "Failed to allocate HW AH for Shadow QP");
936 goto fail;
937 }
938
939 return ah;
940
941fail:
942 kfree(ah);
943 return NULL;
944}
945
946static struct bnxt_re_qp *bnxt_re_create_shadow_qp
947 (struct bnxt_re_pd *pd,
948 struct bnxt_qplib_res *qp1_res,
949 struct bnxt_qplib_qp *qp1_qp)
950{
951 struct bnxt_re_dev *rdev = pd->rdev;
952 struct bnxt_re_qp *qp;
953 int rc;
954
955 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
956 if (!qp)
957 return NULL;
958
959 memset(qp, 0, sizeof(*qp));
960 qp->rdev = rdev;
961
962 /* Initialize the shadow QP structure from the QP1 values */
963 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
964
965 qp->qplib_qp.pd = &pd->qplib_pd;
966 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
967 qp->qplib_qp.type = IB_QPT_UD;
968
969 qp->qplib_qp.max_inline_data = 0;
970 qp->qplib_qp.sig_type = true;
971
972 /* Shadow QP SQ depth should be same as QP1 RQ depth */
973 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
974 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700975 /* Q full delta can be 1 since it is internal QP */
976 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800977
978 qp->qplib_qp.scq = qp1_qp->scq;
979 qp->qplib_qp.rcq = qp1_qp->rcq;
980
981 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
982 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700983 /* Q full delta can be 1 since it is internal QP */
984 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800985
986 qp->qplib_qp.mtu = qp1_qp->mtu;
987
988 qp->qplib_qp.sq_hdr_buf_size = 0;
989 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
990 qp->qplib_qp.dpi = &rdev->dpi_privileged;
991
992 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
993 if (rc)
994 goto fail;
995
996 rdev->sqp_id = qp->qplib_qp.id;
997
998 spin_lock_init(&qp->sq_lock);
999 INIT_LIST_HEAD(&qp->list);
1000 mutex_lock(&rdev->qp_lock);
1001 list_add_tail(&qp->list, &rdev->qp_list);
1002 atomic_inc(&rdev->qp_count);
1003 mutex_unlock(&rdev->qp_lock);
1004 return qp;
1005fail:
1006 kfree(qp);
1007 return NULL;
1008}
1009
1010struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1011 struct ib_qp_init_attr *qp_init_attr,
1012 struct ib_udata *udata)
1013{
1014 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1015 struct bnxt_re_dev *rdev = pd->rdev;
1016 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1017 struct bnxt_re_qp *qp;
1018 struct bnxt_re_cq *cq;
1019 int rc, entries;
1020
1021 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1022 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1023 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1024 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1025 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1026 return ERR_PTR(-EINVAL);
1027
1028 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1029 if (!qp)
1030 return ERR_PTR(-ENOMEM);
1031
1032 qp->rdev = rdev;
1033 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1034 qp->qplib_qp.pd = &pd->qplib_pd;
1035 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1036 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1037 if (qp->qplib_qp.type == IB_QPT_MAX) {
1038 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1039 qp->qplib_qp.type);
1040 rc = -EINVAL;
1041 goto fail;
1042 }
1043 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1044 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1045 IB_SIGNAL_ALL_WR) ? true : false);
1046
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001047 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1048 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1049 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1050
1051 if (qp_init_attr->send_cq) {
1052 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1053 ib_cq);
1054 if (!cq) {
1055 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1056 rc = -EINVAL;
1057 goto fail;
1058 }
1059 qp->qplib_qp.scq = &cq->qplib_cq;
1060 }
1061
1062 if (qp_init_attr->recv_cq) {
1063 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1064 ib_cq);
1065 if (!cq) {
1066 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1067 rc = -EINVAL;
1068 goto fail;
1069 }
1070 qp->qplib_qp.rcq = &cq->qplib_cq;
1071 }
1072
1073 if (qp_init_attr->srq) {
1074 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1075 rc = -ENOTSUPP;
1076 goto fail;
1077 } else {
1078 /* Allocate 1 more than what's provided so posting max doesn't
1079 * mean empty
1080 */
1081 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1082 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1083 dev_attr->max_qp_wqes + 1);
1084
Eddie Wai9152e0b2017-06-14 03:26:23 -07001085 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1086 qp_init_attr->cap.max_recv_wr;
1087
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001088 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1089 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1090 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1091 }
1092
1093 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1094
1095 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001096 /* Allocate 1 more than what's provided */
1097 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1098 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1099 dev_attr->max_qp_wqes + 1);
1100 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1101 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001102 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1103 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1104 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1105 qp->qplib_qp.sq.max_sge++;
1106 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1107 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1108
1109 qp->qplib_qp.rq_hdr_buf_size =
1110 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1111
1112 qp->qplib_qp.sq_hdr_buf_size =
1113 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1114 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1115 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1116 if (rc) {
1117 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1118 goto fail;
1119 }
1120 /* Create a shadow QP to handle the QP1 traffic */
1121 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1122 &qp->qplib_qp);
1123 if (!rdev->qp1_sqp) {
1124 rc = -EINVAL;
1125 dev_err(rdev_to_dev(rdev),
1126 "Failed to create Shadow QP for QP1");
1127 goto qp_destroy;
1128 }
1129 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1130 &qp->qplib_qp);
1131 if (!rdev->sqp_ah) {
1132 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1133 &rdev->qp1_sqp->qplib_qp);
1134 rc = -EINVAL;
1135 dev_err(rdev_to_dev(rdev),
1136 "Failed to create AH entry for ShadowQP");
1137 goto qp_destroy;
1138 }
1139
1140 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001141 /* Allocate 128 + 1 more than what's provided */
1142 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1143 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1144 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1145 dev_attr->max_qp_wqes +
1146 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1147 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1148
1149 /*
1150 * Reserving one slot for Phantom WQE. Application can
1151 * post one extra entry in this case. But allowing this to avoid
1152 * unexpected Queue full condition
1153 */
1154
1155 qp->qplib_qp.sq.q_full_delta -= 1;
1156
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001157 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1158 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1159 if (udata) {
1160 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1161 if (rc)
1162 goto fail;
1163 } else {
1164 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1165 }
1166
1167 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1168 if (rc) {
1169 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1170 goto fail;
1171 }
1172 }
1173
1174 qp->ib_qp.qp_num = qp->qplib_qp.id;
1175 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001176 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001177
1178 if (udata) {
1179 struct bnxt_re_qp_resp resp;
1180
1181 resp.qpid = qp->ib_qp.qp_num;
1182 resp.rsvd = 0;
1183 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1184 if (rc) {
1185 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1186 goto qp_destroy;
1187 }
1188 }
1189 INIT_LIST_HEAD(&qp->list);
1190 mutex_lock(&rdev->qp_lock);
1191 list_add_tail(&qp->list, &rdev->qp_list);
1192 atomic_inc(&rdev->qp_count);
1193 mutex_unlock(&rdev->qp_lock);
1194
1195 return &qp->ib_qp;
1196qp_destroy:
1197 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1198fail:
1199 kfree(qp);
1200 return ERR_PTR(rc);
1201}
1202
1203static u8 __from_ib_qp_state(enum ib_qp_state state)
1204{
1205 switch (state) {
1206 case IB_QPS_RESET:
1207 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1208 case IB_QPS_INIT:
1209 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1210 case IB_QPS_RTR:
1211 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1212 case IB_QPS_RTS:
1213 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1214 case IB_QPS_SQD:
1215 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1216 case IB_QPS_SQE:
1217 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1218 case IB_QPS_ERR:
1219 default:
1220 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1221 }
1222}
1223
1224static enum ib_qp_state __to_ib_qp_state(u8 state)
1225{
1226 switch (state) {
1227 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1228 return IB_QPS_RESET;
1229 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1230 return IB_QPS_INIT;
1231 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1232 return IB_QPS_RTR;
1233 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1234 return IB_QPS_RTS;
1235 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1236 return IB_QPS_SQD;
1237 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1238 return IB_QPS_SQE;
1239 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1240 default:
1241 return IB_QPS_ERR;
1242 }
1243}
1244
1245static u32 __from_ib_mtu(enum ib_mtu mtu)
1246{
1247 switch (mtu) {
1248 case IB_MTU_256:
1249 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1250 case IB_MTU_512:
1251 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1252 case IB_MTU_1024:
1253 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1254 case IB_MTU_2048:
1255 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1256 case IB_MTU_4096:
1257 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1258 default:
1259 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1260 }
1261}
1262
1263static enum ib_mtu __to_ib_mtu(u32 mtu)
1264{
1265 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1266 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1267 return IB_MTU_256;
1268 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1269 return IB_MTU_512;
1270 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1271 return IB_MTU_1024;
1272 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1273 return IB_MTU_2048;
1274 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1275 return IB_MTU_4096;
1276 default:
1277 return IB_MTU_2048;
1278 }
1279}
1280
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001281static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1282 struct bnxt_re_qp *qp1_qp,
1283 int qp_attr_mask)
1284{
1285 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1286 int rc = 0;
1287
1288 if (qp_attr_mask & IB_QP_STATE) {
1289 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1290 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1291 }
1292 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1293 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1294 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1295 }
1296
1297 if (qp_attr_mask & IB_QP_QKEY) {
1298 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1299 /* Using a Random QKEY */
1300 qp->qplib_qp.qkey = 0x81818181;
1301 }
1302 if (qp_attr_mask & IB_QP_SQ_PSN) {
1303 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1304 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1305 }
1306
1307 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1308 if (rc)
1309 dev_err(rdev_to_dev(rdev),
1310 "Failed to modify Shadow QP for QP1");
1311 return rc;
1312}
1313
1314int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1315 int qp_attr_mask, struct ib_udata *udata)
1316{
1317 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1318 struct bnxt_re_dev *rdev = qp->rdev;
1319 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1320 enum ib_qp_state curr_qp_state, new_qp_state;
1321 int rc, entries;
1322 int status;
1323 union ib_gid sgid;
1324 struct ib_gid_attr sgid_attr;
1325 u8 nw_type;
1326
1327 qp->qplib_qp.modify_flags = 0;
1328 if (qp_attr_mask & IB_QP_STATE) {
1329 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1330 new_qp_state = qp_attr->qp_state;
1331 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1332 ib_qp->qp_type, qp_attr_mask,
1333 IB_LINK_LAYER_ETHERNET)) {
1334 dev_err(rdev_to_dev(rdev),
1335 "Invalid attribute mask: %#x specified ",
1336 qp_attr_mask);
1337 dev_err(rdev_to_dev(rdev),
1338 "for qpn: %#x type: %#x",
1339 ib_qp->qp_num, ib_qp->qp_type);
1340 dev_err(rdev_to_dev(rdev),
1341 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1342 curr_qp_state, new_qp_state);
1343 return -EINVAL;
1344 }
1345 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1346 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1347 }
1348 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1349 qp->qplib_qp.modify_flags |=
1350 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1351 qp->qplib_qp.en_sqd_async_notify = true;
1352 }
1353 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1354 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1355 qp->qplib_qp.access =
1356 __from_ib_access_flags(qp_attr->qp_access_flags);
1357 /* LOCAL_WRITE access must be set to allow RC receive */
1358 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1359 }
1360 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1361 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1362 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1363 }
1364 if (qp_attr_mask & IB_QP_QKEY) {
1365 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1366 qp->qplib_qp.qkey = qp_attr->qkey;
1367 }
1368 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001369 const struct ib_global_route *grh =
1370 rdma_ah_read_grh(&qp_attr->ah_attr);
1371
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001372 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1373 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1374 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1375 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1376 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1377 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1378 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001379 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001380 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001381 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001382 /* If RoCE V2 is enabled, stack will have two entries for
1383 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1384 * the GID index by 2 for RoCE V2
1385 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001386 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1387 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1388 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1389 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1390 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001391 ether_addr_copy(qp->qplib_qp.ah.dmac,
1392 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001393
1394 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001395 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001396 &sgid, &sgid_attr);
1397 if (!status && sgid_attr.ndev) {
1398 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1399 ETH_ALEN);
1400 dev_put(sgid_attr.ndev);
1401 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1402 &sgid);
1403 switch (nw_type) {
1404 case RDMA_NETWORK_IPV4:
1405 qp->qplib_qp.nw_type =
1406 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1407 break;
1408 case RDMA_NETWORK_IPV6:
1409 qp->qplib_qp.nw_type =
1410 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1411 break;
1412 default:
1413 qp->qplib_qp.nw_type =
1414 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1415 break;
1416 }
1417 }
1418 }
1419
1420 if (qp_attr_mask & IB_QP_PATH_MTU) {
1421 qp->qplib_qp.modify_flags |=
1422 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1423 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1424 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1425 qp->qplib_qp.modify_flags |=
1426 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1427 qp->qplib_qp.path_mtu =
1428 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1429 }
1430
1431 if (qp_attr_mask & IB_QP_TIMEOUT) {
1432 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1433 qp->qplib_qp.timeout = qp_attr->timeout;
1434 }
1435 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1436 qp->qplib_qp.modify_flags |=
1437 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1438 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1439 }
1440 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1441 qp->qplib_qp.modify_flags |=
1442 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1443 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1444 }
1445 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1446 qp->qplib_qp.modify_flags |=
1447 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1448 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1449 }
1450 if (qp_attr_mask & IB_QP_RQ_PSN) {
1451 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1452 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1453 }
1454 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1455 qp->qplib_qp.modify_flags |=
1456 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001457 /* Cap the max_rd_atomic to device max */
1458 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1459 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001460 }
1461 if (qp_attr_mask & IB_QP_SQ_PSN) {
1462 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1463 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1464 }
1465 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001466 if (qp_attr->max_dest_rd_atomic >
1467 dev_attr->max_qp_init_rd_atom) {
1468 dev_err(rdev_to_dev(rdev),
1469 "max_dest_rd_atomic requested%d is > dev_max%d",
1470 qp_attr->max_dest_rd_atomic,
1471 dev_attr->max_qp_init_rd_atom);
1472 return -EINVAL;
1473 }
1474
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001475 qp->qplib_qp.modify_flags |=
1476 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1477 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1478 }
1479 if (qp_attr_mask & IB_QP_CAP) {
1480 qp->qplib_qp.modify_flags |=
1481 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1482 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1483 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1484 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1485 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1486 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1487 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1488 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1489 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1490 (qp_attr->cap.max_inline_data >=
1491 dev_attr->max_inline_data)) {
1492 dev_err(rdev_to_dev(rdev),
1493 "Create QP failed - max exceeded");
1494 return -EINVAL;
1495 }
1496 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1497 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1498 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001499 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1500 qp_attr->cap.max_send_wr;
1501 /*
1502 * Reserving one slot for Phantom WQE. Some application can
1503 * post one extra entry in this case. Allowing this to avoid
1504 * unexpected Queue full condition
1505 */
1506 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001507 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1508 if (qp->qplib_qp.rq.max_wqe) {
1509 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1510 qp->qplib_qp.rq.max_wqe =
1511 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001512 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1513 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001514 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1515 } else {
1516 /* SRQ was used prior, just ignore the RQ caps */
1517 }
1518 }
1519 if (qp_attr_mask & IB_QP_DEST_QPN) {
1520 qp->qplib_qp.modify_flags |=
1521 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1522 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1523 }
1524 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1525 if (rc) {
1526 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1527 return rc;
1528 }
1529 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1530 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1531 return rc;
1532}
1533
1534int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1535 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1536{
1537 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1538 struct bnxt_re_dev *rdev = qp->rdev;
1539 struct bnxt_qplib_qp qplib_qp;
1540 int rc;
1541
1542 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1543 qplib_qp.id = qp->qplib_qp.id;
1544 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1545
1546 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1547 if (rc) {
1548 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1549 return rc;
1550 }
1551 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1552 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1553 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1554 qp_attr->pkey_index = qplib_qp.pkey_index;
1555 qp_attr->qkey = qplib_qp.qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001556 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001557 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1558 qplib_qp.ah.host_sgid_index,
1559 qplib_qp.ah.hop_limit,
1560 qplib_qp.ah.traffic_class);
1561 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1562 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001563 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001564 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1565 qp_attr->timeout = qplib_qp.timeout;
1566 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1567 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1568 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1569 qp_attr->rq_psn = qplib_qp.rq.psn;
1570 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1571 qp_attr->sq_psn = qplib_qp.sq.psn;
1572 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1573 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1574 IB_SIGNAL_REQ_WR;
1575 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1576
1577 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1578 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1579 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1580 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1581 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1582 qp_init_attr->cap = qp_attr->cap;
1583
1584 return 0;
1585}
1586
1587/* Routine for sending QP1 packets for RoCE V1 an V2
1588 */
1589static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1590 struct ib_send_wr *wr,
1591 struct bnxt_qplib_swqe *wqe,
1592 int payload_size)
1593{
1594 struct ib_device *ibdev = &qp->rdev->ibdev;
1595 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1596 ib_ah);
1597 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1598 struct bnxt_qplib_sge sge;
1599 union ib_gid sgid;
1600 u8 nw_type;
1601 u16 ether_type;
1602 struct ib_gid_attr sgid_attr;
1603 union ib_gid dgid;
1604 bool is_eth = false;
1605 bool is_vlan = false;
1606 bool is_grh = false;
1607 bool is_udp = false;
1608 u8 ip_version = 0;
1609 u16 vlan_id = 0xFFFF;
1610 void *buf;
1611 int i, rc = 0, size;
1612
1613 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1614
1615 rc = ib_get_cached_gid(ibdev, 1,
1616 qplib_ah->host_sgid_index, &sgid,
1617 &sgid_attr);
1618 if (rc) {
1619 dev_err(rdev_to_dev(qp->rdev),
1620 "Failed to query gid at index %d",
1621 qplib_ah->host_sgid_index);
1622 return rc;
1623 }
1624 if (sgid_attr.ndev) {
1625 if (is_vlan_dev(sgid_attr.ndev))
1626 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1627 dev_put(sgid_attr.ndev);
1628 }
1629 /* Get network header type for this GID */
1630 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1631 switch (nw_type) {
1632 case RDMA_NETWORK_IPV4:
1633 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1634 break;
1635 case RDMA_NETWORK_IPV6:
1636 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1637 break;
1638 default:
1639 nw_type = BNXT_RE_ROCE_V1_PACKET;
1640 break;
1641 }
1642 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1643 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1644 if (is_udp) {
1645 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1646 ip_version = 4;
1647 ether_type = ETH_P_IP;
1648 } else {
1649 ip_version = 6;
1650 ether_type = ETH_P_IPV6;
1651 }
1652 is_grh = false;
1653 } else {
1654 ether_type = ETH_P_IBOE;
1655 is_grh = true;
1656 }
1657
1658 is_eth = true;
1659 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1660
1661 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1662 ip_version, is_udp, 0, &qp->qp1_hdr);
1663
1664 /* ETH */
1665 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1666 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1667
1668 /* For vlan, check the sgid for vlan existence */
1669
1670 if (!is_vlan) {
1671 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1672 } else {
1673 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1674 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1675 }
1676
1677 if (is_grh || (ip_version == 6)) {
1678 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1679 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1680 sizeof(sgid));
1681 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1682 }
1683
1684 if (ip_version == 4) {
1685 qp->qp1_hdr.ip4.tos = 0;
1686 qp->qp1_hdr.ip4.id = 0;
1687 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1688 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1689
1690 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1691 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1692 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1693 }
1694
1695 if (is_udp) {
1696 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1697 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1698 qp->qp1_hdr.udp.csum = 0;
1699 }
1700
1701 /* BTH */
1702 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1703 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1704 qp->qp1_hdr.immediate_present = 1;
1705 } else {
1706 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1707 }
1708 if (wr->send_flags & IB_SEND_SOLICITED)
1709 qp->qp1_hdr.bth.solicited_event = 1;
1710 /* pad_count */
1711 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1712
1713 /* P_key for QP1 is for all members */
1714 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1715 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1716 qp->qp1_hdr.bth.ack_req = 0;
1717 qp->send_psn++;
1718 qp->send_psn &= BTH_PSN_MASK;
1719 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1720 /* DETH */
1721 /* Use the priviledged Q_Key for QP1 */
1722 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1723 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1724
1725 /* Pack the QP1 to the transmit buffer */
1726 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1727 if (buf) {
1728 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1729 for (i = wqe->num_sge; i; i--) {
1730 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1731 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1732 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1733 }
1734
1735 /*
1736 * Max Header buf size for IPV6 RoCE V2 is 86,
1737 * which is same as the QP1 SQ header buffer.
1738 * Header buf size for IPV4 RoCE V2 can be 66.
1739 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1740 * Subtract 20 bytes from QP1 SQ header buf size
1741 */
1742 if (is_udp && ip_version == 4)
1743 sge.size -= 20;
1744 /*
1745 * Max Header buf size for RoCE V1 is 78.
1746 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1747 * Subtract 8 bytes from QP1 SQ header buf size
1748 */
1749 if (!is_udp)
1750 sge.size -= 8;
1751
1752 /* Subtract 4 bytes for non vlan packets */
1753 if (!is_vlan)
1754 sge.size -= 4;
1755
1756 wqe->sg_list[0].addr = sge.addr;
1757 wqe->sg_list[0].lkey = sge.lkey;
1758 wqe->sg_list[0].size = sge.size;
1759 wqe->num_sge++;
1760
1761 } else {
1762 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1763 rc = -ENOMEM;
1764 }
1765 return rc;
1766}
1767
1768/* For the MAD layer, it only provides the recv SGE the size of
1769 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1770 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1771 * receive packet (334 bytes) with no VLAN and then copy the GRH
1772 * and the MAD datagram out to the provided SGE.
1773 */
1774static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1775 struct ib_recv_wr *wr,
1776 struct bnxt_qplib_swqe *wqe,
1777 int payload_size)
1778{
1779 struct bnxt_qplib_sge ref, sge;
1780 u32 rq_prod_index;
1781 struct bnxt_re_sqp_entries *sqp_entry;
1782
1783 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1784
1785 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1786 return -ENOMEM;
1787
1788 /* Create 1 SGE to receive the entire
1789 * ethernet packet
1790 */
1791 /* Save the reference from ULP */
1792 ref.addr = wqe->sg_list[0].addr;
1793 ref.lkey = wqe->sg_list[0].lkey;
1794 ref.size = wqe->sg_list[0].size;
1795
1796 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1797
1798 /* SGE 1 */
1799 wqe->sg_list[0].addr = sge.addr;
1800 wqe->sg_list[0].lkey = sge.lkey;
1801 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1802 sge.size -= wqe->sg_list[0].size;
1803
1804 sqp_entry->sge.addr = ref.addr;
1805 sqp_entry->sge.lkey = ref.lkey;
1806 sqp_entry->sge.size = ref.size;
1807 /* Store the wrid for reporting completion */
1808 sqp_entry->wrid = wqe->wr_id;
1809 /* change the wqe->wrid to table index */
1810 wqe->wr_id = rq_prod_index;
1811 return 0;
1812}
1813
1814static int is_ud_qp(struct bnxt_re_qp *qp)
1815{
1816 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1817}
1818
1819static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1820 struct ib_send_wr *wr,
1821 struct bnxt_qplib_swqe *wqe)
1822{
1823 struct bnxt_re_ah *ah = NULL;
1824
1825 if (is_ud_qp(qp)) {
1826 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1827 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1828 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1829 wqe->send.avid = ah->qplib_ah.id;
1830 }
1831 switch (wr->opcode) {
1832 case IB_WR_SEND:
1833 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1834 break;
1835 case IB_WR_SEND_WITH_IMM:
1836 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1837 wqe->send.imm_data = wr->ex.imm_data;
1838 break;
1839 case IB_WR_SEND_WITH_INV:
1840 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1841 wqe->send.inv_key = wr->ex.invalidate_rkey;
1842 break;
1843 default:
1844 return -EINVAL;
1845 }
1846 if (wr->send_flags & IB_SEND_SIGNALED)
1847 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1848 if (wr->send_flags & IB_SEND_FENCE)
1849 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1850 if (wr->send_flags & IB_SEND_SOLICITED)
1851 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1852 if (wr->send_flags & IB_SEND_INLINE)
1853 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1854
1855 return 0;
1856}
1857
1858static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1859 struct bnxt_qplib_swqe *wqe)
1860{
1861 switch (wr->opcode) {
1862 case IB_WR_RDMA_WRITE:
1863 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1864 break;
1865 case IB_WR_RDMA_WRITE_WITH_IMM:
1866 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1867 wqe->rdma.imm_data = wr->ex.imm_data;
1868 break;
1869 case IB_WR_RDMA_READ:
1870 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1871 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1872 break;
1873 default:
1874 return -EINVAL;
1875 }
1876 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1877 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1878 if (wr->send_flags & IB_SEND_SIGNALED)
1879 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1880 if (wr->send_flags & IB_SEND_FENCE)
1881 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1882 if (wr->send_flags & IB_SEND_SOLICITED)
1883 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1884 if (wr->send_flags & IB_SEND_INLINE)
1885 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1886
1887 return 0;
1888}
1889
1890static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1891 struct bnxt_qplib_swqe *wqe)
1892{
1893 switch (wr->opcode) {
1894 case IB_WR_ATOMIC_CMP_AND_SWP:
1895 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1896 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1897 break;
1898 case IB_WR_ATOMIC_FETCH_AND_ADD:
1899 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1900 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1901 break;
1902 default:
1903 return -EINVAL;
1904 }
1905 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1906 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1907 if (wr->send_flags & IB_SEND_SIGNALED)
1908 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1909 if (wr->send_flags & IB_SEND_FENCE)
1910 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1911 if (wr->send_flags & IB_SEND_SOLICITED)
1912 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1913 return 0;
1914}
1915
1916static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1917 struct bnxt_qplib_swqe *wqe)
1918{
1919 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1920 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1921
1922 if (wr->send_flags & IB_SEND_SIGNALED)
1923 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1924 if (wr->send_flags & IB_SEND_FENCE)
1925 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1926 if (wr->send_flags & IB_SEND_SOLICITED)
1927 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1928
1929 return 0;
1930}
1931
1932static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1933 struct bnxt_qplib_swqe *wqe)
1934{
1935 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1936 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1937 int access = wr->access;
1938
1939 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1940 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1941 wqe->frmr.page_list = mr->pages;
1942 wqe->frmr.page_list_len = mr->npages;
1943 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1944 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1945
1946 if (wr->wr.send_flags & IB_SEND_FENCE)
1947 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1948 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1949 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1950
1951 if (access & IB_ACCESS_LOCAL_WRITE)
1952 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1953 if (access & IB_ACCESS_REMOTE_READ)
1954 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1955 if (access & IB_ACCESS_REMOTE_WRITE)
1956 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1957 if (access & IB_ACCESS_REMOTE_ATOMIC)
1958 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1959 if (access & IB_ACCESS_MW_BIND)
1960 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1961
1962 wqe->frmr.l_key = wr->key;
1963 wqe->frmr.length = wr->mr->length;
1964 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1965 wqe->frmr.va = wr->mr->iova;
1966 return 0;
1967}
1968
1969static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1970 struct ib_send_wr *wr,
1971 struct bnxt_qplib_swqe *wqe)
1972{
1973 /* Copy the inline data to the data field */
1974 u8 *in_data;
1975 u32 i, sge_len;
1976 void *sge_addr;
1977
1978 in_data = wqe->inline_data;
1979 for (i = 0; i < wr->num_sge; i++) {
1980 sge_addr = (void *)(unsigned long)
1981 wr->sg_list[i].addr;
1982 sge_len = wr->sg_list[i].length;
1983
1984 if ((sge_len + wqe->inline_len) >
1985 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1986 dev_err(rdev_to_dev(rdev),
1987 "Inline data size requested > supported value");
1988 return -EINVAL;
1989 }
1990 sge_len = wr->sg_list[i].length;
1991
1992 memcpy(in_data, sge_addr, sge_len);
1993 in_data += wr->sg_list[i].length;
1994 wqe->inline_len += wr->sg_list[i].length;
1995 }
1996 return wqe->inline_len;
1997}
1998
1999static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2000 struct ib_send_wr *wr,
2001 struct bnxt_qplib_swqe *wqe)
2002{
2003 int payload_sz = 0;
2004
2005 if (wr->send_flags & IB_SEND_INLINE)
2006 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2007 else
2008 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2009 wqe->num_sge);
2010
2011 return payload_sz;
2012}
2013
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002014static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2015{
2016 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2017 qp->ib_qp.qp_type == IB_QPT_GSI ||
2018 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2019 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2020 int qp_attr_mask;
2021 struct ib_qp_attr qp_attr;
2022
2023 qp_attr_mask = IB_QP_STATE;
2024 qp_attr.qp_state = IB_QPS_RTS;
2025 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2026 qp->qplib_qp.wqe_cnt = 0;
2027 }
2028}
2029
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002030static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2031 struct bnxt_re_qp *qp,
2032 struct ib_send_wr *wr)
2033{
2034 struct bnxt_qplib_swqe wqe;
2035 int rc = 0, payload_sz = 0;
2036 unsigned long flags;
2037
2038 spin_lock_irqsave(&qp->sq_lock, flags);
2039 memset(&wqe, 0, sizeof(wqe));
2040 while (wr) {
2041 /* House keeping */
2042 memset(&wqe, 0, sizeof(wqe));
2043
2044 /* Common */
2045 wqe.num_sge = wr->num_sge;
2046 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2047 dev_err(rdev_to_dev(rdev),
2048 "Limit exceeded for Send SGEs");
2049 rc = -EINVAL;
2050 goto bad;
2051 }
2052
2053 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2054 if (payload_sz < 0) {
2055 rc = -EINVAL;
2056 goto bad;
2057 }
2058 wqe.wr_id = wr->wr_id;
2059
2060 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2061
2062 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2063 if (!rc)
2064 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2065bad:
2066 if (rc) {
2067 dev_err(rdev_to_dev(rdev),
2068 "Post send failed opcode = %#x rc = %d",
2069 wr->opcode, rc);
2070 break;
2071 }
2072 wr = wr->next;
2073 }
2074 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002075 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002076 spin_unlock_irqrestore(&qp->sq_lock, flags);
2077 return rc;
2078}
2079
2080int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2081 struct ib_send_wr **bad_wr)
2082{
2083 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2084 struct bnxt_qplib_swqe wqe;
2085 int rc = 0, payload_sz = 0;
2086 unsigned long flags;
2087
2088 spin_lock_irqsave(&qp->sq_lock, flags);
2089 while (wr) {
2090 /* House keeping */
2091 memset(&wqe, 0, sizeof(wqe));
2092
2093 /* Common */
2094 wqe.num_sge = wr->num_sge;
2095 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2096 dev_err(rdev_to_dev(qp->rdev),
2097 "Limit exceeded for Send SGEs");
2098 rc = -EINVAL;
2099 goto bad;
2100 }
2101
2102 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2103 if (payload_sz < 0) {
2104 rc = -EINVAL;
2105 goto bad;
2106 }
2107 wqe.wr_id = wr->wr_id;
2108
2109 switch (wr->opcode) {
2110 case IB_WR_SEND:
2111 case IB_WR_SEND_WITH_IMM:
2112 if (ib_qp->qp_type == IB_QPT_GSI) {
2113 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2114 payload_sz);
2115 if (rc)
2116 goto bad;
2117 wqe.rawqp1.lflags |=
2118 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2119 }
2120 switch (wr->send_flags) {
2121 case IB_SEND_IP_CSUM:
2122 wqe.rawqp1.lflags |=
2123 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2124 break;
2125 default:
2126 break;
2127 }
2128 /* Fall thru to build the wqe */
2129 case IB_WR_SEND_WITH_INV:
2130 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2131 break;
2132 case IB_WR_RDMA_WRITE:
2133 case IB_WR_RDMA_WRITE_WITH_IMM:
2134 case IB_WR_RDMA_READ:
2135 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2136 break;
2137 case IB_WR_ATOMIC_CMP_AND_SWP:
2138 case IB_WR_ATOMIC_FETCH_AND_ADD:
2139 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2140 break;
2141 case IB_WR_RDMA_READ_WITH_INV:
2142 dev_err(rdev_to_dev(qp->rdev),
2143 "RDMA Read with Invalidate is not supported");
2144 rc = -EINVAL;
2145 goto bad;
2146 case IB_WR_LOCAL_INV:
2147 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2148 break;
2149 case IB_WR_REG_MR:
2150 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2151 break;
2152 default:
2153 /* Unsupported WRs */
2154 dev_err(rdev_to_dev(qp->rdev),
2155 "WR (%#x) is not supported", wr->opcode);
2156 rc = -EINVAL;
2157 goto bad;
2158 }
2159 if (!rc)
2160 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2161bad:
2162 if (rc) {
2163 dev_err(rdev_to_dev(qp->rdev),
2164 "post_send failed op:%#x qps = %#x rc = %d\n",
2165 wr->opcode, qp->qplib_qp.state, rc);
2166 *bad_wr = wr;
2167 break;
2168 }
2169 wr = wr->next;
2170 }
2171 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002172 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002173 spin_unlock_irqrestore(&qp->sq_lock, flags);
2174
2175 return rc;
2176}
2177
2178static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2179 struct bnxt_re_qp *qp,
2180 struct ib_recv_wr *wr)
2181{
2182 struct bnxt_qplib_swqe wqe;
2183 int rc = 0, payload_sz = 0;
2184
2185 memset(&wqe, 0, sizeof(wqe));
2186 while (wr) {
2187 /* House keeping */
2188 memset(&wqe, 0, sizeof(wqe));
2189
2190 /* Common */
2191 wqe.num_sge = wr->num_sge;
2192 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2193 dev_err(rdev_to_dev(rdev),
2194 "Limit exceeded for Receive SGEs");
2195 rc = -EINVAL;
2196 break;
2197 }
2198 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2199 wr->num_sge);
2200 wqe.wr_id = wr->wr_id;
2201 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2202
2203 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2204 if (rc)
2205 break;
2206
2207 wr = wr->next;
2208 }
2209 if (!rc)
2210 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2211 return rc;
2212}
2213
2214int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2215 struct ib_recv_wr **bad_wr)
2216{
2217 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2218 struct bnxt_qplib_swqe wqe;
2219 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002220 unsigned long flags;
2221 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002222
Devesh Sharma018cf592017-05-22 03:15:40 -07002223 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002224 while (wr) {
2225 /* House keeping */
2226 memset(&wqe, 0, sizeof(wqe));
2227
2228 /* Common */
2229 wqe.num_sge = wr->num_sge;
2230 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2231 dev_err(rdev_to_dev(qp->rdev),
2232 "Limit exceeded for Receive SGEs");
2233 rc = -EINVAL;
2234 *bad_wr = wr;
2235 break;
2236 }
2237
2238 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2239 wr->num_sge);
2240 wqe.wr_id = wr->wr_id;
2241 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2242
2243 if (ib_qp->qp_type == IB_QPT_GSI)
2244 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2245 payload_sz);
2246 if (!rc)
2247 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2248 if (rc) {
2249 *bad_wr = wr;
2250 break;
2251 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002252
2253 /* Ring DB if the RQEs posted reaches a threshold value */
2254 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2255 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2256 count = 0;
2257 }
2258
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002259 wr = wr->next;
2260 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002261
2262 if (count)
2263 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2264
2265 spin_unlock_irqrestore(&qp->rq_lock, flags);
2266
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002267 return rc;
2268}
2269
2270/* Completion Queues */
2271int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2272{
2273 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2274 struct bnxt_re_dev *rdev = cq->rdev;
2275 int rc;
2276
2277 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2278 if (rc) {
2279 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2280 return rc;
2281 }
Doug Ledford374cb862017-04-25 14:00:59 -04002282 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002283 ib_umem_release(cq->umem);
2284
2285 if (cq) {
2286 kfree(cq->cql);
2287 kfree(cq);
2288 }
2289 atomic_dec(&rdev->cq_count);
2290 rdev->nq.budget--;
2291 return 0;
2292}
2293
2294struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2295 const struct ib_cq_init_attr *attr,
2296 struct ib_ucontext *context,
2297 struct ib_udata *udata)
2298{
2299 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2300 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2301 struct bnxt_re_cq *cq = NULL;
2302 int rc, entries;
2303 int cqe = attr->cqe;
2304
2305 /* Validate CQ fields */
2306 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2307 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2308 return ERR_PTR(-EINVAL);
2309 }
2310 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2311 if (!cq)
2312 return ERR_PTR(-ENOMEM);
2313
2314 cq->rdev = rdev;
2315 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2316
2317 entries = roundup_pow_of_two(cqe + 1);
2318 if (entries > dev_attr->max_cq_wqes + 1)
2319 entries = dev_attr->max_cq_wqes + 1;
2320
2321 if (context) {
2322 struct bnxt_re_cq_req req;
2323 struct bnxt_re_ucontext *uctx = container_of
2324 (context,
2325 struct bnxt_re_ucontext,
2326 ib_uctx);
2327 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2328 rc = -EFAULT;
2329 goto fail;
2330 }
2331
2332 cq->umem = ib_umem_get(context, req.cq_va,
2333 entries * sizeof(struct cq_base),
2334 IB_ACCESS_LOCAL_WRITE, 1);
2335 if (IS_ERR(cq->umem)) {
2336 rc = PTR_ERR(cq->umem);
2337 goto fail;
2338 }
2339 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2340 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002341 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002342 } else {
2343 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2344 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2345 GFP_KERNEL);
2346 if (!cq->cql) {
2347 rc = -ENOMEM;
2348 goto fail;
2349 }
2350
2351 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2352 cq->qplib_cq.sghead = NULL;
2353 cq->qplib_cq.nmap = 0;
2354 }
2355 cq->qplib_cq.max_wqe = entries;
2356 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2357
2358 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2359 if (rc) {
2360 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2361 goto fail;
2362 }
2363
2364 cq->ib_cq.cqe = entries;
2365 cq->cq_period = cq->qplib_cq.period;
2366 rdev->nq.budget++;
2367
2368 atomic_inc(&rdev->cq_count);
2369
2370 if (context) {
2371 struct bnxt_re_cq_resp resp;
2372
2373 resp.cqid = cq->qplib_cq.id;
2374 resp.tail = cq->qplib_cq.hwq.cons;
2375 resp.phase = cq->qplib_cq.period;
2376 resp.rsvd = 0;
2377 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2378 if (rc) {
2379 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2380 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2381 goto c2fail;
2382 }
2383 }
2384
2385 return &cq->ib_cq;
2386
2387c2fail:
2388 if (context)
2389 ib_umem_release(cq->umem);
2390fail:
2391 kfree(cq->cql);
2392 kfree(cq);
2393 return ERR_PTR(rc);
2394}
2395
2396static u8 __req_to_ib_wc_status(u8 qstatus)
2397{
2398 switch (qstatus) {
2399 case CQ_REQ_STATUS_OK:
2400 return IB_WC_SUCCESS;
2401 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2402 return IB_WC_BAD_RESP_ERR;
2403 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2404 return IB_WC_LOC_LEN_ERR;
2405 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2406 return IB_WC_LOC_QP_OP_ERR;
2407 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2408 return IB_WC_LOC_PROT_ERR;
2409 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2410 return IB_WC_GENERAL_ERR;
2411 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2412 return IB_WC_REM_INV_REQ_ERR;
2413 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2414 return IB_WC_REM_ACCESS_ERR;
2415 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2416 return IB_WC_REM_OP_ERR;
2417 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2418 return IB_WC_RNR_RETRY_EXC_ERR;
2419 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2420 return IB_WC_RETRY_EXC_ERR;
2421 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2422 return IB_WC_WR_FLUSH_ERR;
2423 default:
2424 return IB_WC_GENERAL_ERR;
2425 }
2426 return 0;
2427}
2428
2429static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2430{
2431 switch (qstatus) {
2432 case CQ_RES_RAWETH_QP1_STATUS_OK:
2433 return IB_WC_SUCCESS;
2434 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2435 return IB_WC_LOC_ACCESS_ERR;
2436 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2437 return IB_WC_LOC_LEN_ERR;
2438 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2439 return IB_WC_LOC_PROT_ERR;
2440 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2441 return IB_WC_LOC_QP_OP_ERR;
2442 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2443 return IB_WC_GENERAL_ERR;
2444 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2445 return IB_WC_WR_FLUSH_ERR;
2446 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2447 return IB_WC_WR_FLUSH_ERR;
2448 default:
2449 return IB_WC_GENERAL_ERR;
2450 }
2451}
2452
2453static u8 __rc_to_ib_wc_status(u8 qstatus)
2454{
2455 switch (qstatus) {
2456 case CQ_RES_RC_STATUS_OK:
2457 return IB_WC_SUCCESS;
2458 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2459 return IB_WC_LOC_ACCESS_ERR;
2460 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2461 return IB_WC_LOC_LEN_ERR;
2462 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2463 return IB_WC_LOC_PROT_ERR;
2464 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2465 return IB_WC_LOC_QP_OP_ERR;
2466 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2467 return IB_WC_GENERAL_ERR;
2468 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2469 return IB_WC_REM_INV_REQ_ERR;
2470 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2471 return IB_WC_WR_FLUSH_ERR;
2472 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2473 return IB_WC_WR_FLUSH_ERR;
2474 default:
2475 return IB_WC_GENERAL_ERR;
2476 }
2477}
2478
2479static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2480{
2481 switch (cqe->type) {
2482 case BNXT_QPLIB_SWQE_TYPE_SEND:
2483 wc->opcode = IB_WC_SEND;
2484 break;
2485 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2486 wc->opcode = IB_WC_SEND;
2487 wc->wc_flags |= IB_WC_WITH_IMM;
2488 break;
2489 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2490 wc->opcode = IB_WC_SEND;
2491 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2492 break;
2493 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2494 wc->opcode = IB_WC_RDMA_WRITE;
2495 break;
2496 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2497 wc->opcode = IB_WC_RDMA_WRITE;
2498 wc->wc_flags |= IB_WC_WITH_IMM;
2499 break;
2500 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2501 wc->opcode = IB_WC_RDMA_READ;
2502 break;
2503 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2504 wc->opcode = IB_WC_COMP_SWAP;
2505 break;
2506 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2507 wc->opcode = IB_WC_FETCH_ADD;
2508 break;
2509 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2510 wc->opcode = IB_WC_LOCAL_INV;
2511 break;
2512 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2513 wc->opcode = IB_WC_REG_MR;
2514 break;
2515 default:
2516 wc->opcode = IB_WC_SEND;
2517 break;
2518 }
2519
2520 wc->status = __req_to_ib_wc_status(cqe->status);
2521}
2522
2523static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2524 u16 raweth_qp1_flags2)
2525{
2526 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2527
2528 /* raweth_qp1_flags Bit 9-6 indicates itype */
2529 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2530 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2531 return -1;
2532
2533 if (raweth_qp1_flags2 &
2534 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2535 raweth_qp1_flags2 &
2536 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2537 is_udp = true;
2538 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2539 (raweth_qp1_flags2 &
2540 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2541 (is_ipv6 = true) : (is_ipv4 = true);
2542 return ((is_ipv6) ?
2543 BNXT_RE_ROCEV2_IPV6_PACKET :
2544 BNXT_RE_ROCEV2_IPV4_PACKET);
2545 } else {
2546 return BNXT_RE_ROCE_V1_PACKET;
2547 }
2548}
2549
2550static int bnxt_re_to_ib_nw_type(int nw_type)
2551{
2552 u8 nw_hdr_type = 0xFF;
2553
2554 switch (nw_type) {
2555 case BNXT_RE_ROCE_V1_PACKET:
2556 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2557 break;
2558 case BNXT_RE_ROCEV2_IPV4_PACKET:
2559 nw_hdr_type = RDMA_NETWORK_IPV4;
2560 break;
2561 case BNXT_RE_ROCEV2_IPV6_PACKET:
2562 nw_hdr_type = RDMA_NETWORK_IPV6;
2563 break;
2564 }
2565 return nw_hdr_type;
2566}
2567
2568static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2569 void *rq_hdr_buf)
2570{
2571 u8 *tmp_buf = NULL;
2572 struct ethhdr *eth_hdr;
2573 u16 eth_type;
2574 bool rc = false;
2575
2576 tmp_buf = (u8 *)rq_hdr_buf;
2577 /*
2578 * If dest mac is not same as I/F mac, this could be a
2579 * loopback address or multicast address, check whether
2580 * it is a loopback packet
2581 */
2582 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2583 tmp_buf += 4;
2584 /* Check the ether type */
2585 eth_hdr = (struct ethhdr *)tmp_buf;
2586 eth_type = ntohs(eth_hdr->h_proto);
2587 switch (eth_type) {
2588 case ETH_P_IBOE:
2589 rc = true;
2590 break;
2591 case ETH_P_IP:
2592 case ETH_P_IPV6: {
2593 u32 len;
2594 struct udphdr *udp_hdr;
2595
2596 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2597 sizeof(struct ipv6hdr));
2598 tmp_buf += sizeof(struct ethhdr) + len;
2599 udp_hdr = (struct udphdr *)tmp_buf;
2600 if (ntohs(udp_hdr->dest) ==
2601 ROCE_V2_UDP_DPORT)
2602 rc = true;
2603 break;
2604 }
2605 default:
2606 break;
2607 }
2608 }
2609
2610 return rc;
2611}
2612
2613static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2614 struct bnxt_qplib_cqe *cqe)
2615{
2616 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2617 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2618 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2619 struct ib_send_wr *swr;
2620 struct ib_ud_wr udwr;
2621 struct ib_recv_wr rwr;
2622 int pkt_type = 0;
2623 u32 tbl_idx;
2624 void *rq_hdr_buf;
2625 dma_addr_t rq_hdr_buf_map;
2626 dma_addr_t shrq_hdr_buf_map;
2627 u32 offset = 0;
2628 u32 skip_bytes = 0;
2629 struct ib_sge s_sge[2];
2630 struct ib_sge r_sge[2];
2631 int rc;
2632
2633 memset(&udwr, 0, sizeof(udwr));
2634 memset(&rwr, 0, sizeof(rwr));
2635 memset(&s_sge, 0, sizeof(s_sge));
2636 memset(&r_sge, 0, sizeof(r_sge));
2637
2638 swr = &udwr.wr;
2639 tbl_idx = cqe->wr_id;
2640
2641 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2642 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2643 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2644 tbl_idx);
2645
2646 /* Shadow QP header buffer */
2647 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2648 tbl_idx);
2649 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2650
2651 /* Store this cqe */
2652 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2653 sqp_entry->qp1_qp = qp1_qp;
2654
2655 /* Find packet type from the cqe */
2656
2657 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2658 cqe->raweth_qp1_flags2);
2659 if (pkt_type < 0) {
2660 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2661 return -EINVAL;
2662 }
2663
2664 /* Adjust the offset for the user buffer and post in the rq */
2665
2666 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2667 offset = 20;
2668
2669 /*
2670 * QP1 loopback packet has 4 bytes of internal header before
2671 * ether header. Skip these four bytes.
2672 */
2673 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2674 skip_bytes = 4;
2675
2676 /* First send SGE . Skip the ether header*/
2677 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2678 + skip_bytes;
2679 s_sge[0].lkey = 0xFFFFFFFF;
2680 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2681 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2682
2683 /* Second Send SGE */
2684 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2685 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2686 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2687 s_sge[1].addr += 8;
2688 s_sge[1].lkey = 0xFFFFFFFF;
2689 s_sge[1].length = 256;
2690
2691 /* First recv SGE */
2692
2693 r_sge[0].addr = shrq_hdr_buf_map;
2694 r_sge[0].lkey = 0xFFFFFFFF;
2695 r_sge[0].length = 40;
2696
2697 r_sge[1].addr = sqp_entry->sge.addr + offset;
2698 r_sge[1].lkey = sqp_entry->sge.lkey;
2699 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2700
2701 /* Create receive work request */
2702 rwr.num_sge = 2;
2703 rwr.sg_list = r_sge;
2704 rwr.wr_id = tbl_idx;
2705 rwr.next = NULL;
2706
2707 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2708 if (rc) {
2709 dev_err(rdev_to_dev(rdev),
2710 "Failed to post Rx buffers to shadow QP");
2711 return -ENOMEM;
2712 }
2713
2714 swr->num_sge = 2;
2715 swr->sg_list = s_sge;
2716 swr->wr_id = tbl_idx;
2717 swr->opcode = IB_WR_SEND;
2718 swr->next = NULL;
2719
2720 udwr.ah = &rdev->sqp_ah->ib_ah;
2721 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2722 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2723
2724 /* post data received in the send queue */
2725 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2726
2727 return 0;
2728}
2729
2730static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2731 struct bnxt_qplib_cqe *cqe)
2732{
2733 wc->opcode = IB_WC_RECV;
2734 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2735 wc->wc_flags |= IB_WC_GRH;
2736}
2737
2738static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2739 struct bnxt_qplib_cqe *cqe)
2740{
2741 wc->opcode = IB_WC_RECV;
2742 wc->status = __rc_to_ib_wc_status(cqe->status);
2743
2744 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2745 wc->wc_flags |= IB_WC_WITH_IMM;
2746 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2747 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2748 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2749 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2750 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2751}
2752
2753static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2754 struct ib_wc *wc,
2755 struct bnxt_qplib_cqe *cqe)
2756{
2757 u32 tbl_idx;
2758 struct bnxt_re_dev *rdev = qp->rdev;
2759 struct bnxt_re_qp *qp1_qp = NULL;
2760 struct bnxt_qplib_cqe *orig_cqe = NULL;
2761 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2762 int nw_type;
2763
2764 tbl_idx = cqe->wr_id;
2765
2766 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2767 qp1_qp = sqp_entry->qp1_qp;
2768 orig_cqe = &sqp_entry->cqe;
2769
2770 wc->wr_id = sqp_entry->wrid;
2771 wc->byte_len = orig_cqe->length;
2772 wc->qp = &qp1_qp->ib_qp;
2773
2774 wc->ex.imm_data = orig_cqe->immdata;
2775 wc->src_qp = orig_cqe->src_qp;
2776 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2777 wc->port_num = 1;
2778 wc->vendor_err = orig_cqe->status;
2779
2780 wc->opcode = IB_WC_RECV;
2781 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2782 wc->wc_flags |= IB_WC_GRH;
2783
2784 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2785 orig_cqe->raweth_qp1_flags2);
2786 if (nw_type >= 0) {
2787 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2788 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2789 }
2790}
2791
2792static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2793 struct bnxt_qplib_cqe *cqe)
2794{
2795 wc->opcode = IB_WC_RECV;
2796 wc->status = __rc_to_ib_wc_status(cqe->status);
2797
2798 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2799 wc->wc_flags |= IB_WC_WITH_IMM;
2800 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2801 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2802 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2803 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2804 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2805}
2806
Eddie Wai9152e0b2017-06-14 03:26:23 -07002807static int send_phantom_wqe(struct bnxt_re_qp *qp)
2808{
2809 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2810 unsigned long flags;
2811 int rc = 0;
2812
2813 spin_lock_irqsave(&qp->sq_lock, flags);
2814
2815 rc = bnxt_re_bind_fence_mw(lib_qp);
2816 if (!rc) {
2817 lib_qp->sq.phantom_wqe_cnt++;
2818 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2819 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2820 lib_qp->id, lib_qp->sq.hwq.prod,
2821 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2822 lib_qp->sq.phantom_wqe_cnt);
2823 }
2824
2825 spin_unlock_irqrestore(&qp->sq_lock, flags);
2826 return rc;
2827}
2828
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002829int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2830{
2831 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2832 struct bnxt_re_qp *qp;
2833 struct bnxt_qplib_cqe *cqe;
2834 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002835 struct bnxt_qplib_q *sq;
2836 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002837 u32 tbl_idx;
2838 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2839 unsigned long flags;
2840
2841 spin_lock_irqsave(&cq->cq_lock, flags);
2842 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002843 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002844 if (!cq->cql) {
2845 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2846 goto exit;
2847 }
2848 cqe = &cq->cql[0];
2849 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002850 lib_qp = NULL;
2851 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2852 if (lib_qp) {
2853 sq = &lib_qp->sq;
2854 if (sq->send_phantom) {
2855 qp = container_of(lib_qp,
2856 struct bnxt_re_qp, qplib_qp);
2857 if (send_phantom_wqe(qp) == -ENOMEM)
2858 dev_err(rdev_to_dev(cq->rdev),
2859 "Phantom failed! Scheduled to send again\n");
2860 else
2861 sq->send_phantom = false;
2862 }
2863 }
2864
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002865 if (!ncqe)
2866 break;
2867
2868 for (i = 0; i < ncqe; i++, cqe++) {
2869 /* Transcribe each qplib_wqe back to ib_wc */
2870 memset(wc, 0, sizeof(*wc));
2871
2872 wc->wr_id = cqe->wr_id;
2873 wc->byte_len = cqe->length;
2874 qp = container_of
2875 ((struct bnxt_qplib_qp *)
2876 (unsigned long)(cqe->qp_handle),
2877 struct bnxt_re_qp, qplib_qp);
2878 if (!qp) {
2879 dev_err(rdev_to_dev(cq->rdev),
2880 "POLL CQ : bad QP handle");
2881 continue;
2882 }
2883 wc->qp = &qp->ib_qp;
2884 wc->ex.imm_data = cqe->immdata;
2885 wc->src_qp = cqe->src_qp;
2886 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2887 wc->port_num = 1;
2888 wc->vendor_err = cqe->status;
2889
2890 switch (cqe->opcode) {
2891 case CQ_BASE_CQE_TYPE_REQ:
2892 if (qp->qplib_qp.id ==
2893 qp->rdev->qp1_sqp->qplib_qp.id) {
2894 /* Handle this completion with
2895 * the stored completion
2896 */
2897 memset(wc, 0, sizeof(*wc));
2898 continue;
2899 }
2900 bnxt_re_process_req_wc(wc, cqe);
2901 break;
2902 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2903 if (!cqe->status) {
2904 int rc = 0;
2905
2906 rc = bnxt_re_process_raw_qp_pkt_rx
2907 (qp, cqe);
2908 if (!rc) {
2909 memset(wc, 0, sizeof(*wc));
2910 continue;
2911 }
2912 cqe->status = -1;
2913 }
2914 /* Errors need not be looped back.
2915 * But change the wr_id to the one
2916 * stored in the table
2917 */
2918 tbl_idx = cqe->wr_id;
2919 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2920 wc->wr_id = sqp_entry->wrid;
2921 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2922 break;
2923 case CQ_BASE_CQE_TYPE_RES_RC:
2924 bnxt_re_process_res_rc_wc(wc, cqe);
2925 break;
2926 case CQ_BASE_CQE_TYPE_RES_UD:
2927 if (qp->qplib_qp.id ==
2928 qp->rdev->qp1_sqp->qplib_qp.id) {
2929 /* Handle this completion with
2930 * the stored completion
2931 */
2932 if (cqe->status) {
2933 continue;
2934 } else {
2935 bnxt_re_process_res_shadow_qp_wc
2936 (qp, wc, cqe);
2937 break;
2938 }
2939 }
2940 bnxt_re_process_res_ud_wc(wc, cqe);
2941 break;
2942 default:
2943 dev_err(rdev_to_dev(cq->rdev),
2944 "POLL CQ : type 0x%x not handled",
2945 cqe->opcode);
2946 continue;
2947 }
2948 wc++;
2949 budget--;
2950 }
2951 }
2952exit:
2953 spin_unlock_irqrestore(&cq->cq_lock, flags);
2954 return num_entries - budget;
2955}
2956
2957int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2958 enum ib_cq_notify_flags ib_cqn_flags)
2959{
2960 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2961 int type = 0;
2962
2963 /* Trigger on the very next completion */
2964 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
2965 type = DBR_DBR_TYPE_CQ_ARMALL;
2966 /* Trigger on the next solicited completion */
2967 else if (ib_cqn_flags & IB_CQ_SOLICITED)
2968 type = DBR_DBR_TYPE_CQ_ARMSE;
2969
Selvin Xavier499e4562017-06-29 12:28:18 -07002970 /* Poll to see if there are missed events */
2971 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
2972 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
2973 return 1;
2974
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002975 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
2976
2977 return 0;
2978}
2979
2980/* Memory Regions */
2981struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
2982{
2983 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2984 struct bnxt_re_dev *rdev = pd->rdev;
2985 struct bnxt_re_mr *mr;
2986 u64 pbl = 0;
2987 int rc;
2988
2989 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2990 if (!mr)
2991 return ERR_PTR(-ENOMEM);
2992
2993 mr->rdev = rdev;
2994 mr->qplib_mr.pd = &pd->qplib_pd;
2995 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
2996 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2997
2998 /* Allocate and register 0 as the address */
2999 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3000 if (rc)
3001 goto fail;
3002
3003 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3004 mr->qplib_mr.total_size = -1; /* Infinte length */
3005 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3006 if (rc)
3007 goto fail_mr;
3008
3009 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3010 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3011 IB_ACCESS_REMOTE_ATOMIC))
3012 mr->ib_mr.rkey = mr->ib_mr.lkey;
3013 atomic_inc(&rdev->mr_count);
3014
3015 return &mr->ib_mr;
3016
3017fail_mr:
3018 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3019fail:
3020 kfree(mr);
3021 return ERR_PTR(rc);
3022}
3023
3024int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3025{
3026 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3027 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003028 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003029
Selvin Xavier1c980b02017-05-22 03:15:34 -07003030 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3031 if (rc) {
3032 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3033 return rc;
3034 }
3035
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003036 if (mr->npages && mr->pages) {
3037 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3038 &mr->qplib_frpl);
3039 kfree(mr->pages);
3040 mr->npages = 0;
3041 mr->pages = NULL;
3042 }
Doug Ledford374cb862017-04-25 14:00:59 -04003043 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003044 ib_umem_release(mr->ib_umem);
3045
3046 kfree(mr);
3047 atomic_dec(&rdev->mr_count);
3048 return rc;
3049}
3050
3051static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3052{
3053 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3054
3055 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3056 return -ENOMEM;
3057
3058 mr->pages[mr->npages++] = addr;
3059 return 0;
3060}
3061
3062int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3063 unsigned int *sg_offset)
3064{
3065 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3066
3067 mr->npages = 0;
3068 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3069}
3070
3071struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3072 u32 max_num_sg)
3073{
3074 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3075 struct bnxt_re_dev *rdev = pd->rdev;
3076 struct bnxt_re_mr *mr = NULL;
3077 int rc;
3078
3079 if (type != IB_MR_TYPE_MEM_REG) {
3080 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3081 return ERR_PTR(-EINVAL);
3082 }
3083 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3084 return ERR_PTR(-EINVAL);
3085
3086 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3087 if (!mr)
3088 return ERR_PTR(-ENOMEM);
3089
3090 mr->rdev = rdev;
3091 mr->qplib_mr.pd = &pd->qplib_pd;
3092 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3093 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3094
3095 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3096 if (rc)
3097 goto fail;
3098
3099 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3100 mr->ib_mr.rkey = mr->ib_mr.lkey;
3101
3102 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3103 if (!mr->pages) {
3104 rc = -ENOMEM;
3105 goto fail;
3106 }
3107 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3108 &mr->qplib_frpl, max_num_sg);
3109 if (rc) {
3110 dev_err(rdev_to_dev(rdev),
3111 "Failed to allocate HW FR page list");
3112 goto fail_mr;
3113 }
3114
3115 atomic_inc(&rdev->mr_count);
3116 return &mr->ib_mr;
3117
3118fail_mr:
3119 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3120fail:
3121 kfree(mr->pages);
3122 kfree(mr);
3123 return ERR_PTR(rc);
3124}
3125
Eddie Wai9152e0b2017-06-14 03:26:23 -07003126struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3127 struct ib_udata *udata)
3128{
3129 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3130 struct bnxt_re_dev *rdev = pd->rdev;
3131 struct bnxt_re_mw *mw;
3132 int rc;
3133
3134 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3135 if (!mw)
3136 return ERR_PTR(-ENOMEM);
3137 mw->rdev = rdev;
3138 mw->qplib_mw.pd = &pd->qplib_pd;
3139
3140 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3141 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3142 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3143 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3144 if (rc) {
3145 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3146 goto fail;
3147 }
3148 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3149
3150 atomic_inc(&rdev->mw_count);
3151 return &mw->ib_mw;
3152
3153fail:
3154 kfree(mw);
3155 return ERR_PTR(rc);
3156}
3157
3158int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3159{
3160 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3161 struct bnxt_re_dev *rdev = mw->rdev;
3162 int rc;
3163
3164 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3165 if (rc) {
3166 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3167 return rc;
3168 }
3169
3170 kfree(mw);
3171 atomic_dec(&rdev->mw_count);
3172 return rc;
3173}
3174
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003175/* uverbs */
3176struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3177 u64 virt_addr, int mr_access_flags,
3178 struct ib_udata *udata)
3179{
3180 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3181 struct bnxt_re_dev *rdev = pd->rdev;
3182 struct bnxt_re_mr *mr;
3183 struct ib_umem *umem;
3184 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003185 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003186 struct scatterlist *sg;
3187 int entry;
3188
Selvin Xavier58d4a672017-06-29 12:28:12 -07003189 if (length > BNXT_RE_MAX_MR_SIZE) {
3190 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3191 length, BNXT_RE_MAX_MR_SIZE);
3192 return ERR_PTR(-ENOMEM);
3193 }
3194
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003195 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3196 if (!mr)
3197 return ERR_PTR(-ENOMEM);
3198
3199 mr->rdev = rdev;
3200 mr->qplib_mr.pd = &pd->qplib_pd;
3201 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3202 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3203
3204 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3205 mr_access_flags, 0);
3206 if (IS_ERR(umem)) {
3207 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3208 rc = -EFAULT;
3209 goto free_mr;
3210 }
3211 mr->ib_umem = umem;
3212
3213 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3214 if (rc) {
3215 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3216 goto release_umem;
3217 }
3218 /* The fixed portion of the rkey is the same as the lkey */
3219 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3220
3221 mr->qplib_mr.va = virt_addr;
3222 umem_pgs = ib_umem_page_count(umem);
3223 if (!umem_pgs) {
3224 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3225 rc = -EINVAL;
3226 goto free_mrw;
3227 }
3228 mr->qplib_mr.total_size = length;
3229
3230 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3231 if (!pbl_tbl) {
3232 rc = -EINVAL;
3233 goto free_mrw;
3234 }
3235 pbl_tbl_orig = pbl_tbl;
3236
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003237 if (umem->hugetlb) {
3238 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3239 rc = -EFAULT;
3240 goto fail;
3241 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003242
3243 if (umem->page_shift != PAGE_SHIFT) {
3244 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003245 rc = -EFAULT;
3246 goto fail;
3247 }
3248 /* Map umem buf ptrs to the PBL */
3249 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003250 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003251 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003252 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003253 }
3254 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3255 umem_pgs, false);
3256 if (rc) {
3257 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3258 goto fail;
3259 }
3260
3261 kfree(pbl_tbl_orig);
3262
3263 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3264 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3265 atomic_inc(&rdev->mr_count);
3266
3267 return &mr->ib_mr;
3268fail:
3269 kfree(pbl_tbl_orig);
3270free_mrw:
3271 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3272release_umem:
3273 ib_umem_release(umem);
3274free_mr:
3275 kfree(mr);
3276 return ERR_PTR(rc);
3277}
3278
3279struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3280 struct ib_udata *udata)
3281{
3282 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3283 struct bnxt_re_uctx_resp resp;
3284 struct bnxt_re_ucontext *uctx;
3285 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3286 int rc;
3287
3288 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3289 ibdev->uverbs_abi_ver);
3290
3291 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3292 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3293 BNXT_RE_ABI_VERSION);
3294 return ERR_PTR(-EPERM);
3295 }
3296
3297 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3298 if (!uctx)
3299 return ERR_PTR(-ENOMEM);
3300
3301 uctx->rdev = rdev;
3302
3303 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3304 if (!uctx->shpg) {
3305 rc = -ENOMEM;
3306 goto fail;
3307 }
3308 spin_lock_init(&uctx->sh_lock);
3309
3310 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3311 resp.max_qp = rdev->qplib_ctx.qpc_count;
3312 resp.pg_size = PAGE_SIZE;
3313 resp.cqe_sz = sizeof(struct cq_base);
3314 resp.max_cqd = dev_attr->max_cq_wqes;
3315 resp.rsvd = 0;
3316
3317 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3318 if (rc) {
3319 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3320 rc = -EFAULT;
3321 goto cfail;
3322 }
3323
3324 return &uctx->ib_uctx;
3325cfail:
3326 free_page((unsigned long)uctx->shpg);
3327 uctx->shpg = NULL;
3328fail:
3329 kfree(uctx);
3330 return ERR_PTR(rc);
3331}
3332
3333int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3334{
3335 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3336 struct bnxt_re_ucontext,
3337 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003338
3339 struct bnxt_re_dev *rdev = uctx->rdev;
3340 int rc = 0;
3341
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003342 if (uctx->shpg)
3343 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003344
3345 if (uctx->dpi.dbr) {
3346 /* Free DPI only if this is the first PD allocated by the
3347 * application and mark the context dpi as NULL
3348 */
3349 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3350 &rdev->qplib_res.dpi_tbl,
3351 &uctx->dpi);
3352 if (rc)
3353 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3354 /* Don't fail, continue*/
3355 uctx->dpi.dbr = NULL;
3356 }
3357
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003358 kfree(uctx);
3359 return 0;
3360}
3361
3362/* Helper function to mmap the virtual memory from user app */
3363int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3364{
3365 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3366 struct bnxt_re_ucontext,
3367 ib_uctx);
3368 struct bnxt_re_dev *rdev = uctx->rdev;
3369 u64 pfn;
3370
3371 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3372 return -EINVAL;
3373
3374 if (vma->vm_pgoff) {
3375 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3376 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3377 PAGE_SIZE, vma->vm_page_prot)) {
3378 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3379 return -EAGAIN;
3380 }
3381 } else {
3382 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3383 if (remap_pfn_range(vma, vma->vm_start,
3384 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3385 dev_err(rdev_to_dev(rdev),
3386 "Failed to map shared page");
3387 return -EAGAIN;
3388 }
3389 }
3390
3391 return 0;
3392}