blob: 5dc6e7ce3ab91a34ce4c68705e9f34cd3b64eb60 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800150
151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
153 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
154 ib_attr->max_qp = dev_attr->max_qp;
155 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
156 ib_attr->device_cap_flags =
157 IB_DEVICE_CURR_QP_STATE_MOD
158 | IB_DEVICE_RC_RNR_NAK_GEN
159 | IB_DEVICE_SHUTDOWN_PORT
160 | IB_DEVICE_SYS_IMAGE_GUID
161 | IB_DEVICE_LOCAL_DMA_LKEY
162 | IB_DEVICE_RESIZE_MAX_WR
163 | IB_DEVICE_PORT_ACTIVE_EVENT
164 | IB_DEVICE_N_NOTIFY_CQ
165 | IB_DEVICE_MEM_WINDOW
166 | IB_DEVICE_MEM_WINDOW_TYPE_2B
167 | IB_DEVICE_MEM_MGT_EXTENSIONS;
168 ib_attr->max_sge = dev_attr->max_qp_sges;
169 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
170 ib_attr->max_cq = dev_attr->max_cq;
171 ib_attr->max_cqe = dev_attr->max_cq_wqes;
172 ib_attr->max_mr = dev_attr->max_mr;
173 ib_attr->max_pd = dev_attr->max_pd;
174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700176 if (dev_attr->is_atomic) {
177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
Selvin Xavier86816a02017-05-22 03:15:44 -0700194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800205 return 0;
206}
207
208int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211{
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224}
225
226static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
227{
228 struct ethtool_link_ksettings lksettings;
229 u32 espeed;
230
231 if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
232 memset(&lksettings, 0, sizeof(lksettings));
233 rtnl_lock();
234 netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
235 rtnl_unlock();
236 espeed = lksettings.base.speed;
237 } else {
238 espeed = SPEED_UNKNOWN;
239 }
240 switch (espeed) {
241 case SPEED_1000:
242 *speed = IB_SPEED_SDR;
243 *width = IB_WIDTH_1X;
244 break;
245 case SPEED_10000:
246 *speed = IB_SPEED_QDR;
247 *width = IB_WIDTH_1X;
248 break;
249 case SPEED_20000:
250 *speed = IB_SPEED_DDR;
251 *width = IB_WIDTH_4X;
252 break;
253 case SPEED_25000:
254 *speed = IB_SPEED_EDR;
255 *width = IB_WIDTH_1X;
256 break;
257 case SPEED_40000:
258 *speed = IB_SPEED_QDR;
259 *width = IB_WIDTH_4X;
260 break;
261 case SPEED_50000:
262 break;
263 default:
264 *speed = IB_SPEED_SDR;
265 *width = IB_WIDTH_1X;
266 break;
267 }
268}
269
270/* Port */
271int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
272 struct ib_port_attr *port_attr)
273{
274 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
275 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
276
277 memset(port_attr, 0, sizeof(*port_attr));
278
279 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
280 port_attr->state = IB_PORT_ACTIVE;
281 port_attr->phys_state = 5;
282 } else {
283 port_attr->state = IB_PORT_DOWN;
284 port_attr->phys_state = 3;
285 }
286 port_attr->max_mtu = IB_MTU_4096;
287 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
288 port_attr->gid_tbl_len = dev_attr->max_sgid;
289 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
290 IB_PORT_DEVICE_MGMT_SUP |
291 IB_PORT_VENDOR_CLASS_SUP |
292 IB_PORT_IP_BASED_GIDS;
293
294 /* Max MSG size set to 2G for now */
295 port_attr->max_msg_sz = 0x80000000;
296 port_attr->bad_pkey_cntr = 0;
297 port_attr->qkey_viol_cntr = 0;
298 port_attr->pkey_tbl_len = dev_attr->max_pkey;
299 port_attr->lid = 0;
300 port_attr->sm_lid = 0;
301 port_attr->lmc = 0;
302 port_attr->max_vl_num = 4;
303 port_attr->sm_sl = 0;
304 port_attr->subnet_timeout = 0;
305 port_attr->init_type_reply = 0;
306 /* call the underlying netdev's ethtool hooks to query speed settings
307 * for which we acquire rtnl_lock _only_ if it's registered with
308 * IB stack to avoid race in the NETDEV_UNREG path
309 */
310 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
311 __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
312 &port_attr->active_width);
313 return 0;
314}
315
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800316int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
317 struct ib_port_immutable *immutable)
318{
319 struct ib_port_attr port_attr;
320
321 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
322 return -EINVAL;
323
324 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
325 immutable->gid_tbl_len = port_attr.gid_tbl_len;
326 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
327 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
328 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
329 return 0;
330}
331
332int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
333 u16 index, u16 *pkey)
334{
335 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
336
337 /* Ignore port_num */
338
339 memset(pkey, 0, sizeof(*pkey));
340 return bnxt_qplib_get_pkey(&rdev->qplib_res,
341 &rdev->qplib_res.pkey_tbl, index, pkey);
342}
343
344int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
345 int index, union ib_gid *gid)
346{
347 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
348 int rc = 0;
349
350 /* Ignore port_num */
351 memset(gid, 0, sizeof(*gid));
352 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
353 &rdev->qplib_res.sgid_tbl, index,
354 (struct bnxt_qplib_gid *)gid);
355 return rc;
356}
357
358int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
359 unsigned int index, void **context)
360{
361 int rc = 0;
362 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
363 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
364 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
365
366 /* Delete the entry from the hardware */
367 ctx = *context;
368 if (!ctx)
369 return -EINVAL;
370
371 if (sgid_tbl && sgid_tbl->active) {
372 if (ctx->idx >= sgid_tbl->max)
373 return -EINVAL;
374 ctx->refcnt--;
375 if (!ctx->refcnt) {
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700376 rc = bnxt_qplib_del_sgid(sgid_tbl,
377 &sgid_tbl->tbl[ctx->idx],
378 true);
379 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800380 dev_err(rdev_to_dev(rdev),
381 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700382 } else {
383 ctx_tbl = sgid_tbl->ctx;
384 ctx_tbl[ctx->idx] = NULL;
385 kfree(ctx);
386 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800387 }
388 } else {
389 return -EINVAL;
390 }
391 return rc;
392}
393
394int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
395 unsigned int index, const union ib_gid *gid,
396 const struct ib_gid_attr *attr, void **context)
397{
398 int rc;
399 u32 tbl_idx = 0;
400 u16 vlan_id = 0xFFFF;
401 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
402 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
403 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
404
405 if ((attr->ndev) && is_vlan_dev(attr->ndev))
406 vlan_id = vlan_dev_vlan_id(attr->ndev);
407
408 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
409 rdev->qplib_res.netdev->dev_addr,
410 vlan_id, true, &tbl_idx);
411 if (rc == -EALREADY) {
412 ctx_tbl = sgid_tbl->ctx;
413 ctx_tbl[tbl_idx]->refcnt++;
414 *context = ctx_tbl[tbl_idx];
415 return 0;
416 }
417
418 if (rc < 0) {
419 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
420 return rc;
421 }
422
423 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
424 if (!ctx)
425 return -ENOMEM;
426 ctx_tbl = sgid_tbl->ctx;
427 ctx->idx = tbl_idx;
428 ctx->refcnt = 1;
429 ctx_tbl[tbl_idx] = ctx;
430
431 return rc;
432}
433
434enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
435 u8 port_num)
436{
437 return IB_LINK_LAYER_ETHERNET;
438}
439
Eddie Wai9152e0b2017-06-14 03:26:23 -0700440#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
441
442static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
443{
444 struct bnxt_re_fence_data *fence = &pd->fence;
445 struct ib_mr *ib_mr = &fence->mr->ib_mr;
446 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
447
448 memset(wqe, 0, sizeof(*wqe));
449 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
450 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
451 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
452 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
453 wqe->bind.zero_based = false;
454 wqe->bind.parent_l_key = ib_mr->lkey;
455 wqe->bind.va = (u64)(unsigned long)fence->va;
456 wqe->bind.length = fence->size;
457 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
458 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
459
460 /* Save the initial rkey in fence structure for now;
461 * wqe->bind.r_key will be set at (re)bind time.
462 */
463 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
464}
465
466static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
467{
468 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
469 qplib_qp);
470 struct ib_pd *ib_pd = qp->ib_qp.pd;
471 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
472 struct bnxt_re_fence_data *fence = &pd->fence;
473 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
474 struct bnxt_qplib_swqe wqe;
475 int rc;
476
477 memcpy(&wqe, fence_wqe, sizeof(wqe));
478 wqe.bind.r_key = fence->bind_rkey;
479 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
480
481 dev_dbg(rdev_to_dev(qp->rdev),
482 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
483 wqe.bind.r_key, qp->qplib_qp.id, pd);
484 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
485 if (rc) {
486 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
487 return rc;
488 }
489 bnxt_qplib_post_send_db(&qp->qplib_qp);
490
491 return rc;
492}
493
494static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
495{
496 struct bnxt_re_fence_data *fence = &pd->fence;
497 struct bnxt_re_dev *rdev = pd->rdev;
498 struct device *dev = &rdev->en_dev->pdev->dev;
499 struct bnxt_re_mr *mr = fence->mr;
500
501 if (fence->mw) {
502 bnxt_re_dealloc_mw(fence->mw);
503 fence->mw = NULL;
504 }
505 if (mr) {
506 if (mr->ib_mr.rkey)
507 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
508 true);
509 if (mr->ib_mr.lkey)
510 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
511 kfree(mr);
512 fence->mr = NULL;
513 }
514 if (fence->dma_addr) {
515 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
516 DMA_BIDIRECTIONAL);
517 fence->dma_addr = 0;
518 }
519}
520
521static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
522{
523 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
524 struct bnxt_re_fence_data *fence = &pd->fence;
525 struct bnxt_re_dev *rdev = pd->rdev;
526 struct device *dev = &rdev->en_dev->pdev->dev;
527 struct bnxt_re_mr *mr = NULL;
528 dma_addr_t dma_addr = 0;
529 struct ib_mw *mw;
530 u64 pbl_tbl;
531 int rc;
532
533 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
534 DMA_BIDIRECTIONAL);
535 rc = dma_mapping_error(dev, dma_addr);
536 if (rc) {
537 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
538 rc = -EIO;
539 fence->dma_addr = 0;
540 goto fail;
541 }
542 fence->dma_addr = dma_addr;
543
544 /* Allocate a MR */
545 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
546 if (!mr) {
547 rc = -ENOMEM;
548 goto fail;
549 }
550 fence->mr = mr;
551 mr->rdev = rdev;
552 mr->qplib_mr.pd = &pd->qplib_pd;
553 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
554 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
555 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
556 if (rc) {
557 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
558 goto fail;
559 }
560
561 /* Register MR */
562 mr->ib_mr.lkey = mr->qplib_mr.lkey;
563 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
564 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
565 pbl_tbl = dma_addr;
566 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
567 BNXT_RE_FENCE_PBL_SIZE, false);
568 if (rc) {
569 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
570 goto fail;
571 }
572 mr->ib_mr.rkey = mr->qplib_mr.rkey;
573
574 /* Create a fence MW only for kernel consumers */
575 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300576 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700577 dev_err(rdev_to_dev(rdev),
578 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300579 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700580 goto fail;
581 }
582 fence->mw = mw;
583
584 bnxt_re_create_fence_wqe(pd);
585 return 0;
586
587fail:
588 bnxt_re_destroy_fence_mr(pd);
589 return rc;
590}
591
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800592/* Protection Domains */
593int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
594{
595 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
596 struct bnxt_re_dev *rdev = pd->rdev;
597 int rc;
598
Eddie Wai9152e0b2017-06-14 03:26:23 -0700599 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800600
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700601 if (pd->qplib_pd.id) {
602 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
603 &rdev->qplib_res.pd_tbl,
604 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800605 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700606 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800607 }
608
609 kfree(pd);
610 return 0;
611}
612
613struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
614 struct ib_ucontext *ucontext,
615 struct ib_udata *udata)
616{
617 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
618 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
619 struct bnxt_re_ucontext,
620 ib_uctx);
621 struct bnxt_re_pd *pd;
622 int rc;
623
624 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
625 if (!pd)
626 return ERR_PTR(-ENOMEM);
627
628 pd->rdev = rdev;
629 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
630 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
631 rc = -ENOMEM;
632 goto fail;
633 }
634
635 if (udata) {
636 struct bnxt_re_pd_resp resp;
637
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700638 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800639 /* Allocate DPI in alloc_pd to avoid failing of
640 * ibv_devinfo and family of application when DPIs
641 * are depleted.
642 */
643 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700644 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800645 rc = -ENOMEM;
646 goto dbfail;
647 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800648 }
649
650 resp.pdid = pd->qplib_pd.id;
651 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700652 resp.dpi = ucntx->dpi.dpi;
653 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800654
655 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
656 if (rc) {
657 dev_err(rdev_to_dev(rdev),
658 "Failed to copy user response\n");
659 goto dbfail;
660 }
661 }
662
Eddie Wai9152e0b2017-06-14 03:26:23 -0700663 if (!udata)
664 if (bnxt_re_create_fence_mr(pd))
665 dev_warn(rdev_to_dev(rdev),
666 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800667 return &pd->ib_pd;
668dbfail:
669 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
670 &pd->qplib_pd);
671fail:
672 kfree(pd);
673 return ERR_PTR(rc);
674}
675
676/* Address Handles */
677int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
678{
679 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
680 struct bnxt_re_dev *rdev = ah->rdev;
681 int rc;
682
683 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
684 if (rc) {
685 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
686 return rc;
687 }
688 kfree(ah);
689 return 0;
690}
691
692struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400693 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800694 struct ib_udata *udata)
695{
696 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
697 struct bnxt_re_dev *rdev = pd->rdev;
698 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400699 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800700 int rc;
701 u16 vlan_tag;
702 u8 nw_type;
703
704 struct ib_gid_attr sgid_attr;
705
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400706 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800707 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
708 return ERR_PTR(-EINVAL);
709 }
710 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
711 if (!ah)
712 return ERR_PTR(-ENOMEM);
713
714 ah->rdev = rdev;
715 ah->qplib_ah.pd = &pd->qplib_pd;
716
717 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400718 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800719 sizeof(union ib_gid));
720 /*
721 * If RoCE V2 is enabled, stack will have two entries for
722 * each GID entry. Avoiding this duplicte entry in HW. Dividing
723 * the GID index by 2 for RoCE V2
724 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400725 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
726 ah->qplib_ah.host_sgid_index = grh->sgid_index;
727 ah->qplib_ah.traffic_class = grh->traffic_class;
728 ah->qplib_ah.flow_label = grh->flow_label;
729 ah->qplib_ah.hop_limit = grh->hop_limit;
730 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800731 if (ib_pd->uobject &&
732 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400733 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800734 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400735 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800736 union ib_gid sgid;
737
738 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400739 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800740 &sgid_attr);
741 if (rc) {
742 dev_err(rdev_to_dev(rdev),
743 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400744 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800745 goto fail;
746 }
747 if (sgid_attr.ndev) {
748 if (is_vlan_dev(sgid_attr.ndev))
749 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
750 dev_put(sgid_attr.ndev);
751 }
752 /* Get network header type for this GID */
753 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
754 switch (nw_type) {
755 case RDMA_NETWORK_IPV4:
756 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
757 break;
758 case RDMA_NETWORK_IPV6:
759 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
760 break;
761 default:
762 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
763 break;
764 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400765 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400766 ah_attr->roce.dmac, &vlan_tag,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800767 &sgid_attr.ndev->ifindex,
768 NULL);
769 if (rc) {
770 dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
771 goto fail;
772 }
773 }
774
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400775 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800776 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
777 if (rc) {
778 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
779 goto fail;
780 }
781
782 /* Write AVID to shared page. */
783 if (ib_pd->uobject) {
784 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
785 struct bnxt_re_ucontext *uctx;
786 unsigned long flag;
787 u32 *wrptr;
788
789 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
790 spin_lock_irqsave(&uctx->sh_lock, flag);
791 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
792 *wrptr = ah->qplib_ah.id;
793 wmb(); /* make sure cache is updated. */
794 spin_unlock_irqrestore(&uctx->sh_lock, flag);
795 }
796
797 return &ah->ib_ah;
798
799fail:
800 kfree(ah);
801 return ERR_PTR(rc);
802}
803
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400804int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800805{
806 return 0;
807}
808
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400809int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800810{
811 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
812
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400813 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400814 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400815 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400816 rdma_ah_set_grh(ah_attr, NULL, 0,
817 ah->qplib_ah.host_sgid_index,
818 0, ah->qplib_ah.traffic_class);
819 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
820 rdma_ah_set_port_num(ah_attr, 1);
821 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800822 return 0;
823}
824
825/* Queue Pairs */
826int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
827{
828 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
829 struct bnxt_re_dev *rdev = qp->rdev;
830 int rc;
831
832 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
833 if (rc) {
834 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
835 return rc;
836 }
837 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
838 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
839 &rdev->sqp_ah->qplib_ah);
840 if (rc) {
841 dev_err(rdev_to_dev(rdev),
842 "Failed to destroy HW AH for shadow QP");
843 return rc;
844 }
845
846 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
847 &rdev->qp1_sqp->qplib_qp);
848 if (rc) {
849 dev_err(rdev_to_dev(rdev),
850 "Failed to destroy Shadow QP");
851 return rc;
852 }
853 mutex_lock(&rdev->qp_lock);
854 list_del(&rdev->qp1_sqp->list);
855 atomic_dec(&rdev->qp_count);
856 mutex_unlock(&rdev->qp_lock);
857
858 kfree(rdev->sqp_ah);
859 kfree(rdev->qp1_sqp);
860 }
861
Doug Ledford374cb862017-04-25 14:00:59 -0400862 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800863 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400864 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800865 ib_umem_release(qp->sumem);
866
867 mutex_lock(&rdev->qp_lock);
868 list_del(&qp->list);
869 atomic_dec(&rdev->qp_count);
870 mutex_unlock(&rdev->qp_lock);
871 kfree(qp);
872 return 0;
873}
874
875static u8 __from_ib_qp_type(enum ib_qp_type type)
876{
877 switch (type) {
878 case IB_QPT_GSI:
879 return CMDQ_CREATE_QP1_TYPE_GSI;
880 case IB_QPT_RC:
881 return CMDQ_CREATE_QP_TYPE_RC;
882 case IB_QPT_UD:
883 return CMDQ_CREATE_QP_TYPE_UD;
884 default:
885 return IB_QPT_MAX;
886 }
887}
888
889static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
890 struct bnxt_re_qp *qp, struct ib_udata *udata)
891{
892 struct bnxt_re_qp_req ureq;
893 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
894 struct ib_umem *umem;
895 int bytes = 0;
896 struct ib_ucontext *context = pd->ib_pd.uobject->context;
897 struct bnxt_re_ucontext *cntx = container_of(context,
898 struct bnxt_re_ucontext,
899 ib_uctx);
900 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
901 return -EFAULT;
902
903 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
904 /* Consider mapping PSN search memory only for RC QPs. */
905 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
906 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
907 bytes = PAGE_ALIGN(bytes);
908 umem = ib_umem_get(context, ureq.qpsva, bytes,
909 IB_ACCESS_LOCAL_WRITE, 1);
910 if (IS_ERR(umem))
911 return PTR_ERR(umem);
912
913 qp->sumem = umem;
914 qplib_qp->sq.sglist = umem->sg_head.sgl;
915 qplib_qp->sq.nmap = umem->nmap;
916 qplib_qp->qp_handle = ureq.qp_handle;
917
918 if (!qp->qplib_qp.srq) {
919 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
920 bytes = PAGE_ALIGN(bytes);
921 umem = ib_umem_get(context, ureq.qprva, bytes,
922 IB_ACCESS_LOCAL_WRITE, 1);
923 if (IS_ERR(umem))
924 goto rqfail;
925 qp->rumem = umem;
926 qplib_qp->rq.sglist = umem->sg_head.sgl;
927 qplib_qp->rq.nmap = umem->nmap;
928 }
929
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700930 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800931 return 0;
932rqfail:
933 ib_umem_release(qp->sumem);
934 qp->sumem = NULL;
935 qplib_qp->sq.sglist = NULL;
936 qplib_qp->sq.nmap = 0;
937
938 return PTR_ERR(umem);
939}
940
941static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
942 (struct bnxt_re_pd *pd,
943 struct bnxt_qplib_res *qp1_res,
944 struct bnxt_qplib_qp *qp1_qp)
945{
946 struct bnxt_re_dev *rdev = pd->rdev;
947 struct bnxt_re_ah *ah;
948 union ib_gid sgid;
949 int rc;
950
951 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
952 if (!ah)
953 return NULL;
954
955 memset(ah, 0, sizeof(*ah));
956 ah->rdev = rdev;
957 ah->qplib_ah.pd = &pd->qplib_pd;
958
959 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
960 if (rc)
961 goto fail;
962
963 /* supply the dgid data same as sgid */
964 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
965 sizeof(union ib_gid));
966 ah->qplib_ah.sgid_index = 0;
967
968 ah->qplib_ah.traffic_class = 0;
969 ah->qplib_ah.flow_label = 0;
970 ah->qplib_ah.hop_limit = 1;
971 ah->qplib_ah.sl = 0;
972 /* Have DMAC same as SMAC */
973 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
974
975 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
976 if (rc) {
977 dev_err(rdev_to_dev(rdev),
978 "Failed to allocate HW AH for Shadow QP");
979 goto fail;
980 }
981
982 return ah;
983
984fail:
985 kfree(ah);
986 return NULL;
987}
988
989static struct bnxt_re_qp *bnxt_re_create_shadow_qp
990 (struct bnxt_re_pd *pd,
991 struct bnxt_qplib_res *qp1_res,
992 struct bnxt_qplib_qp *qp1_qp)
993{
994 struct bnxt_re_dev *rdev = pd->rdev;
995 struct bnxt_re_qp *qp;
996 int rc;
997
998 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
999 if (!qp)
1000 return NULL;
1001
1002 memset(qp, 0, sizeof(*qp));
1003 qp->rdev = rdev;
1004
1005 /* Initialize the shadow QP structure from the QP1 values */
1006 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1007
1008 qp->qplib_qp.pd = &pd->qplib_pd;
1009 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1010 qp->qplib_qp.type = IB_QPT_UD;
1011
1012 qp->qplib_qp.max_inline_data = 0;
1013 qp->qplib_qp.sig_type = true;
1014
1015 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1016 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1017 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001018 /* Q full delta can be 1 since it is internal QP */
1019 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001020
1021 qp->qplib_qp.scq = qp1_qp->scq;
1022 qp->qplib_qp.rcq = qp1_qp->rcq;
1023
1024 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1025 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -07001026 /* Q full delta can be 1 since it is internal QP */
1027 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001028
1029 qp->qplib_qp.mtu = qp1_qp->mtu;
1030
1031 qp->qplib_qp.sq_hdr_buf_size = 0;
1032 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1033 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1034
1035 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1036 if (rc)
1037 goto fail;
1038
1039 rdev->sqp_id = qp->qplib_qp.id;
1040
1041 spin_lock_init(&qp->sq_lock);
1042 INIT_LIST_HEAD(&qp->list);
1043 mutex_lock(&rdev->qp_lock);
1044 list_add_tail(&qp->list, &rdev->qp_list);
1045 atomic_inc(&rdev->qp_count);
1046 mutex_unlock(&rdev->qp_lock);
1047 return qp;
1048fail:
1049 kfree(qp);
1050 return NULL;
1051}
1052
1053struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1054 struct ib_qp_init_attr *qp_init_attr,
1055 struct ib_udata *udata)
1056{
1057 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1058 struct bnxt_re_dev *rdev = pd->rdev;
1059 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1060 struct bnxt_re_qp *qp;
1061 struct bnxt_re_cq *cq;
1062 int rc, entries;
1063
1064 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1065 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1066 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1067 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1068 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1069 return ERR_PTR(-EINVAL);
1070
1071 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1072 if (!qp)
1073 return ERR_PTR(-ENOMEM);
1074
1075 qp->rdev = rdev;
1076 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1077 qp->qplib_qp.pd = &pd->qplib_pd;
1078 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1079 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1080 if (qp->qplib_qp.type == IB_QPT_MAX) {
1081 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1082 qp->qplib_qp.type);
1083 rc = -EINVAL;
1084 goto fail;
1085 }
1086 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1087 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1088 IB_SIGNAL_ALL_WR) ? true : false);
1089
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001090 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1091 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1092 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1093
1094 if (qp_init_attr->send_cq) {
1095 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1096 ib_cq);
1097 if (!cq) {
1098 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1099 rc = -EINVAL;
1100 goto fail;
1101 }
1102 qp->qplib_qp.scq = &cq->qplib_cq;
1103 }
1104
1105 if (qp_init_attr->recv_cq) {
1106 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1107 ib_cq);
1108 if (!cq) {
1109 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1110 rc = -EINVAL;
1111 goto fail;
1112 }
1113 qp->qplib_qp.rcq = &cq->qplib_cq;
1114 }
1115
1116 if (qp_init_attr->srq) {
1117 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1118 rc = -ENOTSUPP;
1119 goto fail;
1120 } else {
1121 /* Allocate 1 more than what's provided so posting max doesn't
1122 * mean empty
1123 */
1124 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1125 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1126 dev_attr->max_qp_wqes + 1);
1127
Eddie Wai9152e0b2017-06-14 03:26:23 -07001128 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1129 qp_init_attr->cap.max_recv_wr;
1130
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001131 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1132 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1133 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1134 }
1135
1136 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1137
1138 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001139 /* Allocate 1 more than what's provided */
1140 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1141 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1142 dev_attr->max_qp_wqes + 1);
1143 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1144 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001145 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1146 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1147 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1148 qp->qplib_qp.sq.max_sge++;
1149 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1150 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1151
1152 qp->qplib_qp.rq_hdr_buf_size =
1153 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1154
1155 qp->qplib_qp.sq_hdr_buf_size =
1156 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1157 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1158 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1159 if (rc) {
1160 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1161 goto fail;
1162 }
1163 /* Create a shadow QP to handle the QP1 traffic */
1164 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1165 &qp->qplib_qp);
1166 if (!rdev->qp1_sqp) {
1167 rc = -EINVAL;
1168 dev_err(rdev_to_dev(rdev),
1169 "Failed to create Shadow QP for QP1");
1170 goto qp_destroy;
1171 }
1172 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1173 &qp->qplib_qp);
1174 if (!rdev->sqp_ah) {
1175 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1176 &rdev->qp1_sqp->qplib_qp);
1177 rc = -EINVAL;
1178 dev_err(rdev_to_dev(rdev),
1179 "Failed to create AH entry for ShadowQP");
1180 goto qp_destroy;
1181 }
1182
1183 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001184 /* Allocate 128 + 1 more than what's provided */
1185 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1186 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1187 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1188 dev_attr->max_qp_wqes +
1189 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1190 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1191
1192 /*
1193 * Reserving one slot for Phantom WQE. Application can
1194 * post one extra entry in this case. But allowing this to avoid
1195 * unexpected Queue full condition
1196 */
1197
1198 qp->qplib_qp.sq.q_full_delta -= 1;
1199
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001200 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1201 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1202 if (udata) {
1203 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1204 if (rc)
1205 goto fail;
1206 } else {
1207 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1208 }
1209
1210 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1211 if (rc) {
1212 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1213 goto fail;
1214 }
1215 }
1216
1217 qp->ib_qp.qp_num = qp->qplib_qp.id;
1218 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001219 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001220
1221 if (udata) {
1222 struct bnxt_re_qp_resp resp;
1223
1224 resp.qpid = qp->ib_qp.qp_num;
1225 resp.rsvd = 0;
1226 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1227 if (rc) {
1228 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1229 goto qp_destroy;
1230 }
1231 }
1232 INIT_LIST_HEAD(&qp->list);
1233 mutex_lock(&rdev->qp_lock);
1234 list_add_tail(&qp->list, &rdev->qp_list);
1235 atomic_inc(&rdev->qp_count);
1236 mutex_unlock(&rdev->qp_lock);
1237
1238 return &qp->ib_qp;
1239qp_destroy:
1240 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1241fail:
1242 kfree(qp);
1243 return ERR_PTR(rc);
1244}
1245
1246static u8 __from_ib_qp_state(enum ib_qp_state state)
1247{
1248 switch (state) {
1249 case IB_QPS_RESET:
1250 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1251 case IB_QPS_INIT:
1252 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1253 case IB_QPS_RTR:
1254 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1255 case IB_QPS_RTS:
1256 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1257 case IB_QPS_SQD:
1258 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1259 case IB_QPS_SQE:
1260 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1261 case IB_QPS_ERR:
1262 default:
1263 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1264 }
1265}
1266
1267static enum ib_qp_state __to_ib_qp_state(u8 state)
1268{
1269 switch (state) {
1270 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1271 return IB_QPS_RESET;
1272 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1273 return IB_QPS_INIT;
1274 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1275 return IB_QPS_RTR;
1276 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1277 return IB_QPS_RTS;
1278 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1279 return IB_QPS_SQD;
1280 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1281 return IB_QPS_SQE;
1282 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1283 default:
1284 return IB_QPS_ERR;
1285 }
1286}
1287
1288static u32 __from_ib_mtu(enum ib_mtu mtu)
1289{
1290 switch (mtu) {
1291 case IB_MTU_256:
1292 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1293 case IB_MTU_512:
1294 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1295 case IB_MTU_1024:
1296 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1297 case IB_MTU_2048:
1298 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1299 case IB_MTU_4096:
1300 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1301 default:
1302 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1303 }
1304}
1305
1306static enum ib_mtu __to_ib_mtu(u32 mtu)
1307{
1308 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1309 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1310 return IB_MTU_256;
1311 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1312 return IB_MTU_512;
1313 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1314 return IB_MTU_1024;
1315 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1316 return IB_MTU_2048;
1317 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1318 return IB_MTU_4096;
1319 default:
1320 return IB_MTU_2048;
1321 }
1322}
1323
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001324static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1325 struct bnxt_re_qp *qp1_qp,
1326 int qp_attr_mask)
1327{
1328 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1329 int rc = 0;
1330
1331 if (qp_attr_mask & IB_QP_STATE) {
1332 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1333 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1334 }
1335 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1336 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1337 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1338 }
1339
1340 if (qp_attr_mask & IB_QP_QKEY) {
1341 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1342 /* Using a Random QKEY */
1343 qp->qplib_qp.qkey = 0x81818181;
1344 }
1345 if (qp_attr_mask & IB_QP_SQ_PSN) {
1346 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1347 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1348 }
1349
1350 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1351 if (rc)
1352 dev_err(rdev_to_dev(rdev),
1353 "Failed to modify Shadow QP for QP1");
1354 return rc;
1355}
1356
1357int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1358 int qp_attr_mask, struct ib_udata *udata)
1359{
1360 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1361 struct bnxt_re_dev *rdev = qp->rdev;
1362 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1363 enum ib_qp_state curr_qp_state, new_qp_state;
1364 int rc, entries;
1365 int status;
1366 union ib_gid sgid;
1367 struct ib_gid_attr sgid_attr;
1368 u8 nw_type;
1369
1370 qp->qplib_qp.modify_flags = 0;
1371 if (qp_attr_mask & IB_QP_STATE) {
1372 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1373 new_qp_state = qp_attr->qp_state;
1374 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1375 ib_qp->qp_type, qp_attr_mask,
1376 IB_LINK_LAYER_ETHERNET)) {
1377 dev_err(rdev_to_dev(rdev),
1378 "Invalid attribute mask: %#x specified ",
1379 qp_attr_mask);
1380 dev_err(rdev_to_dev(rdev),
1381 "for qpn: %#x type: %#x",
1382 ib_qp->qp_num, ib_qp->qp_type);
1383 dev_err(rdev_to_dev(rdev),
1384 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1385 curr_qp_state, new_qp_state);
1386 return -EINVAL;
1387 }
1388 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1389 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1390 }
1391 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1392 qp->qplib_qp.modify_flags |=
1393 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1394 qp->qplib_qp.en_sqd_async_notify = true;
1395 }
1396 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1397 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1398 qp->qplib_qp.access =
1399 __from_ib_access_flags(qp_attr->qp_access_flags);
1400 /* LOCAL_WRITE access must be set to allow RC receive */
1401 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1402 }
1403 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1404 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1405 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1406 }
1407 if (qp_attr_mask & IB_QP_QKEY) {
1408 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1409 qp->qplib_qp.qkey = qp_attr->qkey;
1410 }
1411 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001412 const struct ib_global_route *grh =
1413 rdma_ah_read_grh(&qp_attr->ah_attr);
1414
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001415 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1416 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1417 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1418 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1419 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1420 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1421 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001422 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001423 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001424 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001425 /* If RoCE V2 is enabled, stack will have two entries for
1426 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1427 * the GID index by 2 for RoCE V2
1428 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001429 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1430 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1431 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1432 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1433 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001434 ether_addr_copy(qp->qplib_qp.ah.dmac,
1435 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001436
1437 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001438 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001439 &sgid, &sgid_attr);
1440 if (!status && sgid_attr.ndev) {
1441 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1442 ETH_ALEN);
1443 dev_put(sgid_attr.ndev);
1444 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1445 &sgid);
1446 switch (nw_type) {
1447 case RDMA_NETWORK_IPV4:
1448 qp->qplib_qp.nw_type =
1449 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1450 break;
1451 case RDMA_NETWORK_IPV6:
1452 qp->qplib_qp.nw_type =
1453 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1454 break;
1455 default:
1456 qp->qplib_qp.nw_type =
1457 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1458 break;
1459 }
1460 }
1461 }
1462
1463 if (qp_attr_mask & IB_QP_PATH_MTU) {
1464 qp->qplib_qp.modify_flags |=
1465 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1466 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1467 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1468 qp->qplib_qp.modify_flags |=
1469 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1470 qp->qplib_qp.path_mtu =
1471 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1472 }
1473
1474 if (qp_attr_mask & IB_QP_TIMEOUT) {
1475 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1476 qp->qplib_qp.timeout = qp_attr->timeout;
1477 }
1478 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1479 qp->qplib_qp.modify_flags |=
1480 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1481 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1482 }
1483 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1484 qp->qplib_qp.modify_flags |=
1485 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1486 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1487 }
1488 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1489 qp->qplib_qp.modify_flags |=
1490 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1491 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1492 }
1493 if (qp_attr_mask & IB_QP_RQ_PSN) {
1494 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1495 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1496 }
1497 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1498 qp->qplib_qp.modify_flags |=
1499 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001500 /* Cap the max_rd_atomic to device max */
1501 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1502 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001503 }
1504 if (qp_attr_mask & IB_QP_SQ_PSN) {
1505 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1506 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1507 }
1508 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001509 if (qp_attr->max_dest_rd_atomic >
1510 dev_attr->max_qp_init_rd_atom) {
1511 dev_err(rdev_to_dev(rdev),
1512 "max_dest_rd_atomic requested%d is > dev_max%d",
1513 qp_attr->max_dest_rd_atomic,
1514 dev_attr->max_qp_init_rd_atom);
1515 return -EINVAL;
1516 }
1517
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001518 qp->qplib_qp.modify_flags |=
1519 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1520 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1521 }
1522 if (qp_attr_mask & IB_QP_CAP) {
1523 qp->qplib_qp.modify_flags |=
1524 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1525 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1526 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1527 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1528 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1529 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1530 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1531 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1532 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1533 (qp_attr->cap.max_inline_data >=
1534 dev_attr->max_inline_data)) {
1535 dev_err(rdev_to_dev(rdev),
1536 "Create QP failed - max exceeded");
1537 return -EINVAL;
1538 }
1539 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1540 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1541 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001542 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1543 qp_attr->cap.max_send_wr;
1544 /*
1545 * Reserving one slot for Phantom WQE. Some application can
1546 * post one extra entry in this case. Allowing this to avoid
1547 * unexpected Queue full condition
1548 */
1549 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001550 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1551 if (qp->qplib_qp.rq.max_wqe) {
1552 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1553 qp->qplib_qp.rq.max_wqe =
1554 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001555 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1556 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001557 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1558 } else {
1559 /* SRQ was used prior, just ignore the RQ caps */
1560 }
1561 }
1562 if (qp_attr_mask & IB_QP_DEST_QPN) {
1563 qp->qplib_qp.modify_flags |=
1564 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1565 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1566 }
1567 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1568 if (rc) {
1569 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1570 return rc;
1571 }
1572 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1573 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1574 return rc;
1575}
1576
1577int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1578 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1579{
1580 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1581 struct bnxt_re_dev *rdev = qp->rdev;
1582 struct bnxt_qplib_qp qplib_qp;
1583 int rc;
1584
1585 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1586 qplib_qp.id = qp->qplib_qp.id;
1587 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1588
1589 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1590 if (rc) {
1591 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1592 return rc;
1593 }
1594 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1595 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1596 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1597 qp_attr->pkey_index = qplib_qp.pkey_index;
1598 qp_attr->qkey = qplib_qp.qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001599 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001600 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
1601 qplib_qp.ah.host_sgid_index,
1602 qplib_qp.ah.hop_limit,
1603 qplib_qp.ah.traffic_class);
1604 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
1605 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001606 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001607 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1608 qp_attr->timeout = qplib_qp.timeout;
1609 qp_attr->retry_cnt = qplib_qp.retry_cnt;
1610 qp_attr->rnr_retry = qplib_qp.rnr_retry;
1611 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1612 qp_attr->rq_psn = qplib_qp.rq.psn;
1613 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1614 qp_attr->sq_psn = qplib_qp.sq.psn;
1615 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1616 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1617 IB_SIGNAL_REQ_WR;
1618 qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1619
1620 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1621 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1622 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1623 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1624 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1625 qp_init_attr->cap = qp_attr->cap;
1626
1627 return 0;
1628}
1629
1630/* Routine for sending QP1 packets for RoCE V1 an V2
1631 */
1632static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1633 struct ib_send_wr *wr,
1634 struct bnxt_qplib_swqe *wqe,
1635 int payload_size)
1636{
1637 struct ib_device *ibdev = &qp->rdev->ibdev;
1638 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1639 ib_ah);
1640 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1641 struct bnxt_qplib_sge sge;
1642 union ib_gid sgid;
1643 u8 nw_type;
1644 u16 ether_type;
1645 struct ib_gid_attr sgid_attr;
1646 union ib_gid dgid;
1647 bool is_eth = false;
1648 bool is_vlan = false;
1649 bool is_grh = false;
1650 bool is_udp = false;
1651 u8 ip_version = 0;
1652 u16 vlan_id = 0xFFFF;
1653 void *buf;
1654 int i, rc = 0, size;
1655
1656 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1657
1658 rc = ib_get_cached_gid(ibdev, 1,
1659 qplib_ah->host_sgid_index, &sgid,
1660 &sgid_attr);
1661 if (rc) {
1662 dev_err(rdev_to_dev(qp->rdev),
1663 "Failed to query gid at index %d",
1664 qplib_ah->host_sgid_index);
1665 return rc;
1666 }
1667 if (sgid_attr.ndev) {
1668 if (is_vlan_dev(sgid_attr.ndev))
1669 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1670 dev_put(sgid_attr.ndev);
1671 }
1672 /* Get network header type for this GID */
1673 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1674 switch (nw_type) {
1675 case RDMA_NETWORK_IPV4:
1676 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1677 break;
1678 case RDMA_NETWORK_IPV6:
1679 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1680 break;
1681 default:
1682 nw_type = BNXT_RE_ROCE_V1_PACKET;
1683 break;
1684 }
1685 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1686 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1687 if (is_udp) {
1688 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1689 ip_version = 4;
1690 ether_type = ETH_P_IP;
1691 } else {
1692 ip_version = 6;
1693 ether_type = ETH_P_IPV6;
1694 }
1695 is_grh = false;
1696 } else {
1697 ether_type = ETH_P_IBOE;
1698 is_grh = true;
1699 }
1700
1701 is_eth = true;
1702 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1703
1704 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1705 ip_version, is_udp, 0, &qp->qp1_hdr);
1706
1707 /* ETH */
1708 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1709 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1710
1711 /* For vlan, check the sgid for vlan existence */
1712
1713 if (!is_vlan) {
1714 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1715 } else {
1716 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1717 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1718 }
1719
1720 if (is_grh || (ip_version == 6)) {
1721 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1722 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1723 sizeof(sgid));
1724 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1725 }
1726
1727 if (ip_version == 4) {
1728 qp->qp1_hdr.ip4.tos = 0;
1729 qp->qp1_hdr.ip4.id = 0;
1730 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1731 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1732
1733 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1734 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1735 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1736 }
1737
1738 if (is_udp) {
1739 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1740 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1741 qp->qp1_hdr.udp.csum = 0;
1742 }
1743
1744 /* BTH */
1745 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1746 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1747 qp->qp1_hdr.immediate_present = 1;
1748 } else {
1749 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1750 }
1751 if (wr->send_flags & IB_SEND_SOLICITED)
1752 qp->qp1_hdr.bth.solicited_event = 1;
1753 /* pad_count */
1754 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1755
1756 /* P_key for QP1 is for all members */
1757 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1758 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1759 qp->qp1_hdr.bth.ack_req = 0;
1760 qp->send_psn++;
1761 qp->send_psn &= BTH_PSN_MASK;
1762 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1763 /* DETH */
1764 /* Use the priviledged Q_Key for QP1 */
1765 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1766 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1767
1768 /* Pack the QP1 to the transmit buffer */
1769 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1770 if (buf) {
1771 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1772 for (i = wqe->num_sge; i; i--) {
1773 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1774 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1775 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1776 }
1777
1778 /*
1779 * Max Header buf size for IPV6 RoCE V2 is 86,
1780 * which is same as the QP1 SQ header buffer.
1781 * Header buf size for IPV4 RoCE V2 can be 66.
1782 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1783 * Subtract 20 bytes from QP1 SQ header buf size
1784 */
1785 if (is_udp && ip_version == 4)
1786 sge.size -= 20;
1787 /*
1788 * Max Header buf size for RoCE V1 is 78.
1789 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1790 * Subtract 8 bytes from QP1 SQ header buf size
1791 */
1792 if (!is_udp)
1793 sge.size -= 8;
1794
1795 /* Subtract 4 bytes for non vlan packets */
1796 if (!is_vlan)
1797 sge.size -= 4;
1798
1799 wqe->sg_list[0].addr = sge.addr;
1800 wqe->sg_list[0].lkey = sge.lkey;
1801 wqe->sg_list[0].size = sge.size;
1802 wqe->num_sge++;
1803
1804 } else {
1805 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1806 rc = -ENOMEM;
1807 }
1808 return rc;
1809}
1810
1811/* For the MAD layer, it only provides the recv SGE the size of
1812 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1813 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1814 * receive packet (334 bytes) with no VLAN and then copy the GRH
1815 * and the MAD datagram out to the provided SGE.
1816 */
1817static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1818 struct ib_recv_wr *wr,
1819 struct bnxt_qplib_swqe *wqe,
1820 int payload_size)
1821{
1822 struct bnxt_qplib_sge ref, sge;
1823 u32 rq_prod_index;
1824 struct bnxt_re_sqp_entries *sqp_entry;
1825
1826 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1827
1828 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1829 return -ENOMEM;
1830
1831 /* Create 1 SGE to receive the entire
1832 * ethernet packet
1833 */
1834 /* Save the reference from ULP */
1835 ref.addr = wqe->sg_list[0].addr;
1836 ref.lkey = wqe->sg_list[0].lkey;
1837 ref.size = wqe->sg_list[0].size;
1838
1839 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1840
1841 /* SGE 1 */
1842 wqe->sg_list[0].addr = sge.addr;
1843 wqe->sg_list[0].lkey = sge.lkey;
1844 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1845 sge.size -= wqe->sg_list[0].size;
1846
1847 sqp_entry->sge.addr = ref.addr;
1848 sqp_entry->sge.lkey = ref.lkey;
1849 sqp_entry->sge.size = ref.size;
1850 /* Store the wrid for reporting completion */
1851 sqp_entry->wrid = wqe->wr_id;
1852 /* change the wqe->wrid to table index */
1853 wqe->wr_id = rq_prod_index;
1854 return 0;
1855}
1856
1857static int is_ud_qp(struct bnxt_re_qp *qp)
1858{
1859 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1860}
1861
1862static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1863 struct ib_send_wr *wr,
1864 struct bnxt_qplib_swqe *wqe)
1865{
1866 struct bnxt_re_ah *ah = NULL;
1867
1868 if (is_ud_qp(qp)) {
1869 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1870 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1871 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1872 wqe->send.avid = ah->qplib_ah.id;
1873 }
1874 switch (wr->opcode) {
1875 case IB_WR_SEND:
1876 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1877 break;
1878 case IB_WR_SEND_WITH_IMM:
1879 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1880 wqe->send.imm_data = wr->ex.imm_data;
1881 break;
1882 case IB_WR_SEND_WITH_INV:
1883 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1884 wqe->send.inv_key = wr->ex.invalidate_rkey;
1885 break;
1886 default:
1887 return -EINVAL;
1888 }
1889 if (wr->send_flags & IB_SEND_SIGNALED)
1890 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1891 if (wr->send_flags & IB_SEND_FENCE)
1892 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1893 if (wr->send_flags & IB_SEND_SOLICITED)
1894 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1895 if (wr->send_flags & IB_SEND_INLINE)
1896 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1897
1898 return 0;
1899}
1900
1901static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1902 struct bnxt_qplib_swqe *wqe)
1903{
1904 switch (wr->opcode) {
1905 case IB_WR_RDMA_WRITE:
1906 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1907 break;
1908 case IB_WR_RDMA_WRITE_WITH_IMM:
1909 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1910 wqe->rdma.imm_data = wr->ex.imm_data;
1911 break;
1912 case IB_WR_RDMA_READ:
1913 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1914 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1915 break;
1916 default:
1917 return -EINVAL;
1918 }
1919 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1920 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1921 if (wr->send_flags & IB_SEND_SIGNALED)
1922 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1923 if (wr->send_flags & IB_SEND_FENCE)
1924 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1925 if (wr->send_flags & IB_SEND_SOLICITED)
1926 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1927 if (wr->send_flags & IB_SEND_INLINE)
1928 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1929
1930 return 0;
1931}
1932
1933static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1934 struct bnxt_qplib_swqe *wqe)
1935{
1936 switch (wr->opcode) {
1937 case IB_WR_ATOMIC_CMP_AND_SWP:
1938 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1939 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1940 break;
1941 case IB_WR_ATOMIC_FETCH_AND_ADD:
1942 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1943 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1944 break;
1945 default:
1946 return -EINVAL;
1947 }
1948 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1949 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1950 if (wr->send_flags & IB_SEND_SIGNALED)
1951 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1952 if (wr->send_flags & IB_SEND_FENCE)
1953 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1954 if (wr->send_flags & IB_SEND_SOLICITED)
1955 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1956 return 0;
1957}
1958
1959static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1960 struct bnxt_qplib_swqe *wqe)
1961{
1962 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1963 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1964
1965 if (wr->send_flags & IB_SEND_SIGNALED)
1966 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1967 if (wr->send_flags & IB_SEND_FENCE)
1968 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1969 if (wr->send_flags & IB_SEND_SOLICITED)
1970 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1971
1972 return 0;
1973}
1974
1975static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1976 struct bnxt_qplib_swqe *wqe)
1977{
1978 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1979 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1980 int access = wr->access;
1981
1982 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1983 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1984 wqe->frmr.page_list = mr->pages;
1985 wqe->frmr.page_list_len = mr->npages;
1986 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1987 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1988
1989 if (wr->wr.send_flags & IB_SEND_FENCE)
1990 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1991 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1992 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1993
1994 if (access & IB_ACCESS_LOCAL_WRITE)
1995 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1996 if (access & IB_ACCESS_REMOTE_READ)
1997 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1998 if (access & IB_ACCESS_REMOTE_WRITE)
1999 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2000 if (access & IB_ACCESS_REMOTE_ATOMIC)
2001 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2002 if (access & IB_ACCESS_MW_BIND)
2003 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2004
2005 wqe->frmr.l_key = wr->key;
2006 wqe->frmr.length = wr->mr->length;
2007 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2008 wqe->frmr.va = wr->mr->iova;
2009 return 0;
2010}
2011
2012static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2013 struct ib_send_wr *wr,
2014 struct bnxt_qplib_swqe *wqe)
2015{
2016 /* Copy the inline data to the data field */
2017 u8 *in_data;
2018 u32 i, sge_len;
2019 void *sge_addr;
2020
2021 in_data = wqe->inline_data;
2022 for (i = 0; i < wr->num_sge; i++) {
2023 sge_addr = (void *)(unsigned long)
2024 wr->sg_list[i].addr;
2025 sge_len = wr->sg_list[i].length;
2026
2027 if ((sge_len + wqe->inline_len) >
2028 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2029 dev_err(rdev_to_dev(rdev),
2030 "Inline data size requested > supported value");
2031 return -EINVAL;
2032 }
2033 sge_len = wr->sg_list[i].length;
2034
2035 memcpy(in_data, sge_addr, sge_len);
2036 in_data += wr->sg_list[i].length;
2037 wqe->inline_len += wr->sg_list[i].length;
2038 }
2039 return wqe->inline_len;
2040}
2041
2042static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2043 struct ib_send_wr *wr,
2044 struct bnxt_qplib_swqe *wqe)
2045{
2046 int payload_sz = 0;
2047
2048 if (wr->send_flags & IB_SEND_INLINE)
2049 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2050 else
2051 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2052 wqe->num_sge);
2053
2054 return payload_sz;
2055}
2056
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002057static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2058{
2059 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2060 qp->ib_qp.qp_type == IB_QPT_GSI ||
2061 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2062 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2063 int qp_attr_mask;
2064 struct ib_qp_attr qp_attr;
2065
2066 qp_attr_mask = IB_QP_STATE;
2067 qp_attr.qp_state = IB_QPS_RTS;
2068 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2069 qp->qplib_qp.wqe_cnt = 0;
2070 }
2071}
2072
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002073static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2074 struct bnxt_re_qp *qp,
2075 struct ib_send_wr *wr)
2076{
2077 struct bnxt_qplib_swqe wqe;
2078 int rc = 0, payload_sz = 0;
2079 unsigned long flags;
2080
2081 spin_lock_irqsave(&qp->sq_lock, flags);
2082 memset(&wqe, 0, sizeof(wqe));
2083 while (wr) {
2084 /* House keeping */
2085 memset(&wqe, 0, sizeof(wqe));
2086
2087 /* Common */
2088 wqe.num_sge = wr->num_sge;
2089 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2090 dev_err(rdev_to_dev(rdev),
2091 "Limit exceeded for Send SGEs");
2092 rc = -EINVAL;
2093 goto bad;
2094 }
2095
2096 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2097 if (payload_sz < 0) {
2098 rc = -EINVAL;
2099 goto bad;
2100 }
2101 wqe.wr_id = wr->wr_id;
2102
2103 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2104
2105 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2106 if (!rc)
2107 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2108bad:
2109 if (rc) {
2110 dev_err(rdev_to_dev(rdev),
2111 "Post send failed opcode = %#x rc = %d",
2112 wr->opcode, rc);
2113 break;
2114 }
2115 wr = wr->next;
2116 }
2117 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002118 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002119 spin_unlock_irqrestore(&qp->sq_lock, flags);
2120 return rc;
2121}
2122
2123int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2124 struct ib_send_wr **bad_wr)
2125{
2126 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2127 struct bnxt_qplib_swqe wqe;
2128 int rc = 0, payload_sz = 0;
2129 unsigned long flags;
2130
2131 spin_lock_irqsave(&qp->sq_lock, flags);
2132 while (wr) {
2133 /* House keeping */
2134 memset(&wqe, 0, sizeof(wqe));
2135
2136 /* Common */
2137 wqe.num_sge = wr->num_sge;
2138 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2139 dev_err(rdev_to_dev(qp->rdev),
2140 "Limit exceeded for Send SGEs");
2141 rc = -EINVAL;
2142 goto bad;
2143 }
2144
2145 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2146 if (payload_sz < 0) {
2147 rc = -EINVAL;
2148 goto bad;
2149 }
2150 wqe.wr_id = wr->wr_id;
2151
2152 switch (wr->opcode) {
2153 case IB_WR_SEND:
2154 case IB_WR_SEND_WITH_IMM:
2155 if (ib_qp->qp_type == IB_QPT_GSI) {
2156 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2157 payload_sz);
2158 if (rc)
2159 goto bad;
2160 wqe.rawqp1.lflags |=
2161 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2162 }
2163 switch (wr->send_flags) {
2164 case IB_SEND_IP_CSUM:
2165 wqe.rawqp1.lflags |=
2166 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2167 break;
2168 default:
2169 break;
2170 }
2171 /* Fall thru to build the wqe */
2172 case IB_WR_SEND_WITH_INV:
2173 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2174 break;
2175 case IB_WR_RDMA_WRITE:
2176 case IB_WR_RDMA_WRITE_WITH_IMM:
2177 case IB_WR_RDMA_READ:
2178 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2179 break;
2180 case IB_WR_ATOMIC_CMP_AND_SWP:
2181 case IB_WR_ATOMIC_FETCH_AND_ADD:
2182 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2183 break;
2184 case IB_WR_RDMA_READ_WITH_INV:
2185 dev_err(rdev_to_dev(qp->rdev),
2186 "RDMA Read with Invalidate is not supported");
2187 rc = -EINVAL;
2188 goto bad;
2189 case IB_WR_LOCAL_INV:
2190 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2191 break;
2192 case IB_WR_REG_MR:
2193 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2194 break;
2195 default:
2196 /* Unsupported WRs */
2197 dev_err(rdev_to_dev(qp->rdev),
2198 "WR (%#x) is not supported", wr->opcode);
2199 rc = -EINVAL;
2200 goto bad;
2201 }
2202 if (!rc)
2203 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2204bad:
2205 if (rc) {
2206 dev_err(rdev_to_dev(qp->rdev),
2207 "post_send failed op:%#x qps = %#x rc = %d\n",
2208 wr->opcode, qp->qplib_qp.state, rc);
2209 *bad_wr = wr;
2210 break;
2211 }
2212 wr = wr->next;
2213 }
2214 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002215 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002216 spin_unlock_irqrestore(&qp->sq_lock, flags);
2217
2218 return rc;
2219}
2220
2221static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2222 struct bnxt_re_qp *qp,
2223 struct ib_recv_wr *wr)
2224{
2225 struct bnxt_qplib_swqe wqe;
2226 int rc = 0, payload_sz = 0;
2227
2228 memset(&wqe, 0, sizeof(wqe));
2229 while (wr) {
2230 /* House keeping */
2231 memset(&wqe, 0, sizeof(wqe));
2232
2233 /* Common */
2234 wqe.num_sge = wr->num_sge;
2235 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2236 dev_err(rdev_to_dev(rdev),
2237 "Limit exceeded for Receive SGEs");
2238 rc = -EINVAL;
2239 break;
2240 }
2241 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2242 wr->num_sge);
2243 wqe.wr_id = wr->wr_id;
2244 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2245
2246 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2247 if (rc)
2248 break;
2249
2250 wr = wr->next;
2251 }
2252 if (!rc)
2253 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2254 return rc;
2255}
2256
2257int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2258 struct ib_recv_wr **bad_wr)
2259{
2260 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2261 struct bnxt_qplib_swqe wqe;
2262 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002263 unsigned long flags;
2264 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002265
Devesh Sharma018cf592017-05-22 03:15:40 -07002266 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002267 while (wr) {
2268 /* House keeping */
2269 memset(&wqe, 0, sizeof(wqe));
2270
2271 /* Common */
2272 wqe.num_sge = wr->num_sge;
2273 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2274 dev_err(rdev_to_dev(qp->rdev),
2275 "Limit exceeded for Receive SGEs");
2276 rc = -EINVAL;
2277 *bad_wr = wr;
2278 break;
2279 }
2280
2281 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2282 wr->num_sge);
2283 wqe.wr_id = wr->wr_id;
2284 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2285
2286 if (ib_qp->qp_type == IB_QPT_GSI)
2287 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2288 payload_sz);
2289 if (!rc)
2290 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2291 if (rc) {
2292 *bad_wr = wr;
2293 break;
2294 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002295
2296 /* Ring DB if the RQEs posted reaches a threshold value */
2297 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2298 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2299 count = 0;
2300 }
2301
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002302 wr = wr->next;
2303 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002304
2305 if (count)
2306 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2307
2308 spin_unlock_irqrestore(&qp->rq_lock, flags);
2309
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002310 return rc;
2311}
2312
2313/* Completion Queues */
2314int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2315{
2316 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2317 struct bnxt_re_dev *rdev = cq->rdev;
2318 int rc;
2319
2320 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2321 if (rc) {
2322 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2323 return rc;
2324 }
Doug Ledford374cb862017-04-25 14:00:59 -04002325 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002326 ib_umem_release(cq->umem);
2327
2328 if (cq) {
2329 kfree(cq->cql);
2330 kfree(cq);
2331 }
2332 atomic_dec(&rdev->cq_count);
2333 rdev->nq.budget--;
2334 return 0;
2335}
2336
2337struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2338 const struct ib_cq_init_attr *attr,
2339 struct ib_ucontext *context,
2340 struct ib_udata *udata)
2341{
2342 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2343 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2344 struct bnxt_re_cq *cq = NULL;
2345 int rc, entries;
2346 int cqe = attr->cqe;
2347
2348 /* Validate CQ fields */
2349 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2350 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2351 return ERR_PTR(-EINVAL);
2352 }
2353 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2354 if (!cq)
2355 return ERR_PTR(-ENOMEM);
2356
2357 cq->rdev = rdev;
2358 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2359
2360 entries = roundup_pow_of_two(cqe + 1);
2361 if (entries > dev_attr->max_cq_wqes + 1)
2362 entries = dev_attr->max_cq_wqes + 1;
2363
2364 if (context) {
2365 struct bnxt_re_cq_req req;
2366 struct bnxt_re_ucontext *uctx = container_of
2367 (context,
2368 struct bnxt_re_ucontext,
2369 ib_uctx);
2370 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2371 rc = -EFAULT;
2372 goto fail;
2373 }
2374
2375 cq->umem = ib_umem_get(context, req.cq_va,
2376 entries * sizeof(struct cq_base),
2377 IB_ACCESS_LOCAL_WRITE, 1);
2378 if (IS_ERR(cq->umem)) {
2379 rc = PTR_ERR(cq->umem);
2380 goto fail;
2381 }
2382 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2383 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002384 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002385 } else {
2386 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2387 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2388 GFP_KERNEL);
2389 if (!cq->cql) {
2390 rc = -ENOMEM;
2391 goto fail;
2392 }
2393
2394 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2395 cq->qplib_cq.sghead = NULL;
2396 cq->qplib_cq.nmap = 0;
2397 }
2398 cq->qplib_cq.max_wqe = entries;
2399 cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2400
2401 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2402 if (rc) {
2403 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2404 goto fail;
2405 }
2406
2407 cq->ib_cq.cqe = entries;
2408 cq->cq_period = cq->qplib_cq.period;
2409 rdev->nq.budget++;
2410
2411 atomic_inc(&rdev->cq_count);
2412
2413 if (context) {
2414 struct bnxt_re_cq_resp resp;
2415
2416 resp.cqid = cq->qplib_cq.id;
2417 resp.tail = cq->qplib_cq.hwq.cons;
2418 resp.phase = cq->qplib_cq.period;
2419 resp.rsvd = 0;
2420 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2421 if (rc) {
2422 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2423 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2424 goto c2fail;
2425 }
2426 }
2427
2428 return &cq->ib_cq;
2429
2430c2fail:
2431 if (context)
2432 ib_umem_release(cq->umem);
2433fail:
2434 kfree(cq->cql);
2435 kfree(cq);
2436 return ERR_PTR(rc);
2437}
2438
2439static u8 __req_to_ib_wc_status(u8 qstatus)
2440{
2441 switch (qstatus) {
2442 case CQ_REQ_STATUS_OK:
2443 return IB_WC_SUCCESS;
2444 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2445 return IB_WC_BAD_RESP_ERR;
2446 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2447 return IB_WC_LOC_LEN_ERR;
2448 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2449 return IB_WC_LOC_QP_OP_ERR;
2450 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2451 return IB_WC_LOC_PROT_ERR;
2452 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2453 return IB_WC_GENERAL_ERR;
2454 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2455 return IB_WC_REM_INV_REQ_ERR;
2456 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2457 return IB_WC_REM_ACCESS_ERR;
2458 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2459 return IB_WC_REM_OP_ERR;
2460 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2461 return IB_WC_RNR_RETRY_EXC_ERR;
2462 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2463 return IB_WC_RETRY_EXC_ERR;
2464 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2465 return IB_WC_WR_FLUSH_ERR;
2466 default:
2467 return IB_WC_GENERAL_ERR;
2468 }
2469 return 0;
2470}
2471
2472static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2473{
2474 switch (qstatus) {
2475 case CQ_RES_RAWETH_QP1_STATUS_OK:
2476 return IB_WC_SUCCESS;
2477 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2478 return IB_WC_LOC_ACCESS_ERR;
2479 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2480 return IB_WC_LOC_LEN_ERR;
2481 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2482 return IB_WC_LOC_PROT_ERR;
2483 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2484 return IB_WC_LOC_QP_OP_ERR;
2485 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2486 return IB_WC_GENERAL_ERR;
2487 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2488 return IB_WC_WR_FLUSH_ERR;
2489 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2490 return IB_WC_WR_FLUSH_ERR;
2491 default:
2492 return IB_WC_GENERAL_ERR;
2493 }
2494}
2495
2496static u8 __rc_to_ib_wc_status(u8 qstatus)
2497{
2498 switch (qstatus) {
2499 case CQ_RES_RC_STATUS_OK:
2500 return IB_WC_SUCCESS;
2501 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2502 return IB_WC_LOC_ACCESS_ERR;
2503 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2504 return IB_WC_LOC_LEN_ERR;
2505 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2506 return IB_WC_LOC_PROT_ERR;
2507 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2508 return IB_WC_LOC_QP_OP_ERR;
2509 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2510 return IB_WC_GENERAL_ERR;
2511 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2512 return IB_WC_REM_INV_REQ_ERR;
2513 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2514 return IB_WC_WR_FLUSH_ERR;
2515 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2516 return IB_WC_WR_FLUSH_ERR;
2517 default:
2518 return IB_WC_GENERAL_ERR;
2519 }
2520}
2521
2522static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2523{
2524 switch (cqe->type) {
2525 case BNXT_QPLIB_SWQE_TYPE_SEND:
2526 wc->opcode = IB_WC_SEND;
2527 break;
2528 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2529 wc->opcode = IB_WC_SEND;
2530 wc->wc_flags |= IB_WC_WITH_IMM;
2531 break;
2532 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2533 wc->opcode = IB_WC_SEND;
2534 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2535 break;
2536 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2537 wc->opcode = IB_WC_RDMA_WRITE;
2538 break;
2539 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2540 wc->opcode = IB_WC_RDMA_WRITE;
2541 wc->wc_flags |= IB_WC_WITH_IMM;
2542 break;
2543 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2544 wc->opcode = IB_WC_RDMA_READ;
2545 break;
2546 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2547 wc->opcode = IB_WC_COMP_SWAP;
2548 break;
2549 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2550 wc->opcode = IB_WC_FETCH_ADD;
2551 break;
2552 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2553 wc->opcode = IB_WC_LOCAL_INV;
2554 break;
2555 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2556 wc->opcode = IB_WC_REG_MR;
2557 break;
2558 default:
2559 wc->opcode = IB_WC_SEND;
2560 break;
2561 }
2562
2563 wc->status = __req_to_ib_wc_status(cqe->status);
2564}
2565
2566static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2567 u16 raweth_qp1_flags2)
2568{
2569 bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2570
2571 /* raweth_qp1_flags Bit 9-6 indicates itype */
2572 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2573 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2574 return -1;
2575
2576 if (raweth_qp1_flags2 &
2577 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2578 raweth_qp1_flags2 &
2579 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2580 is_udp = true;
2581 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2582 (raweth_qp1_flags2 &
2583 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2584 (is_ipv6 = true) : (is_ipv4 = true);
2585 return ((is_ipv6) ?
2586 BNXT_RE_ROCEV2_IPV6_PACKET :
2587 BNXT_RE_ROCEV2_IPV4_PACKET);
2588 } else {
2589 return BNXT_RE_ROCE_V1_PACKET;
2590 }
2591}
2592
2593static int bnxt_re_to_ib_nw_type(int nw_type)
2594{
2595 u8 nw_hdr_type = 0xFF;
2596
2597 switch (nw_type) {
2598 case BNXT_RE_ROCE_V1_PACKET:
2599 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2600 break;
2601 case BNXT_RE_ROCEV2_IPV4_PACKET:
2602 nw_hdr_type = RDMA_NETWORK_IPV4;
2603 break;
2604 case BNXT_RE_ROCEV2_IPV6_PACKET:
2605 nw_hdr_type = RDMA_NETWORK_IPV6;
2606 break;
2607 }
2608 return nw_hdr_type;
2609}
2610
2611static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2612 void *rq_hdr_buf)
2613{
2614 u8 *tmp_buf = NULL;
2615 struct ethhdr *eth_hdr;
2616 u16 eth_type;
2617 bool rc = false;
2618
2619 tmp_buf = (u8 *)rq_hdr_buf;
2620 /*
2621 * If dest mac is not same as I/F mac, this could be a
2622 * loopback address or multicast address, check whether
2623 * it is a loopback packet
2624 */
2625 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2626 tmp_buf += 4;
2627 /* Check the ether type */
2628 eth_hdr = (struct ethhdr *)tmp_buf;
2629 eth_type = ntohs(eth_hdr->h_proto);
2630 switch (eth_type) {
2631 case ETH_P_IBOE:
2632 rc = true;
2633 break;
2634 case ETH_P_IP:
2635 case ETH_P_IPV6: {
2636 u32 len;
2637 struct udphdr *udp_hdr;
2638
2639 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2640 sizeof(struct ipv6hdr));
2641 tmp_buf += sizeof(struct ethhdr) + len;
2642 udp_hdr = (struct udphdr *)tmp_buf;
2643 if (ntohs(udp_hdr->dest) ==
2644 ROCE_V2_UDP_DPORT)
2645 rc = true;
2646 break;
2647 }
2648 default:
2649 break;
2650 }
2651 }
2652
2653 return rc;
2654}
2655
2656static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2657 struct bnxt_qplib_cqe *cqe)
2658{
2659 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2660 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2661 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2662 struct ib_send_wr *swr;
2663 struct ib_ud_wr udwr;
2664 struct ib_recv_wr rwr;
2665 int pkt_type = 0;
2666 u32 tbl_idx;
2667 void *rq_hdr_buf;
2668 dma_addr_t rq_hdr_buf_map;
2669 dma_addr_t shrq_hdr_buf_map;
2670 u32 offset = 0;
2671 u32 skip_bytes = 0;
2672 struct ib_sge s_sge[2];
2673 struct ib_sge r_sge[2];
2674 int rc;
2675
2676 memset(&udwr, 0, sizeof(udwr));
2677 memset(&rwr, 0, sizeof(rwr));
2678 memset(&s_sge, 0, sizeof(s_sge));
2679 memset(&r_sge, 0, sizeof(r_sge));
2680
2681 swr = &udwr.wr;
2682 tbl_idx = cqe->wr_id;
2683
2684 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2685 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2686 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2687 tbl_idx);
2688
2689 /* Shadow QP header buffer */
2690 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2691 tbl_idx);
2692 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2693
2694 /* Store this cqe */
2695 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2696 sqp_entry->qp1_qp = qp1_qp;
2697
2698 /* Find packet type from the cqe */
2699
2700 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2701 cqe->raweth_qp1_flags2);
2702 if (pkt_type < 0) {
2703 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2704 return -EINVAL;
2705 }
2706
2707 /* Adjust the offset for the user buffer and post in the rq */
2708
2709 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2710 offset = 20;
2711
2712 /*
2713 * QP1 loopback packet has 4 bytes of internal header before
2714 * ether header. Skip these four bytes.
2715 */
2716 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2717 skip_bytes = 4;
2718
2719 /* First send SGE . Skip the ether header*/
2720 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2721 + skip_bytes;
2722 s_sge[0].lkey = 0xFFFFFFFF;
2723 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2724 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2725
2726 /* Second Send SGE */
2727 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2728 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2729 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2730 s_sge[1].addr += 8;
2731 s_sge[1].lkey = 0xFFFFFFFF;
2732 s_sge[1].length = 256;
2733
2734 /* First recv SGE */
2735
2736 r_sge[0].addr = shrq_hdr_buf_map;
2737 r_sge[0].lkey = 0xFFFFFFFF;
2738 r_sge[0].length = 40;
2739
2740 r_sge[1].addr = sqp_entry->sge.addr + offset;
2741 r_sge[1].lkey = sqp_entry->sge.lkey;
2742 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2743
2744 /* Create receive work request */
2745 rwr.num_sge = 2;
2746 rwr.sg_list = r_sge;
2747 rwr.wr_id = tbl_idx;
2748 rwr.next = NULL;
2749
2750 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2751 if (rc) {
2752 dev_err(rdev_to_dev(rdev),
2753 "Failed to post Rx buffers to shadow QP");
2754 return -ENOMEM;
2755 }
2756
2757 swr->num_sge = 2;
2758 swr->sg_list = s_sge;
2759 swr->wr_id = tbl_idx;
2760 swr->opcode = IB_WR_SEND;
2761 swr->next = NULL;
2762
2763 udwr.ah = &rdev->sqp_ah->ib_ah;
2764 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2765 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2766
2767 /* post data received in the send queue */
2768 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2769
2770 return 0;
2771}
2772
2773static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2774 struct bnxt_qplib_cqe *cqe)
2775{
2776 wc->opcode = IB_WC_RECV;
2777 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2778 wc->wc_flags |= IB_WC_GRH;
2779}
2780
2781static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2782 struct bnxt_qplib_cqe *cqe)
2783{
2784 wc->opcode = IB_WC_RECV;
2785 wc->status = __rc_to_ib_wc_status(cqe->status);
2786
2787 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2788 wc->wc_flags |= IB_WC_WITH_IMM;
2789 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2790 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2791 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2792 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2793 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2794}
2795
2796static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2797 struct ib_wc *wc,
2798 struct bnxt_qplib_cqe *cqe)
2799{
2800 u32 tbl_idx;
2801 struct bnxt_re_dev *rdev = qp->rdev;
2802 struct bnxt_re_qp *qp1_qp = NULL;
2803 struct bnxt_qplib_cqe *orig_cqe = NULL;
2804 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2805 int nw_type;
2806
2807 tbl_idx = cqe->wr_id;
2808
2809 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2810 qp1_qp = sqp_entry->qp1_qp;
2811 orig_cqe = &sqp_entry->cqe;
2812
2813 wc->wr_id = sqp_entry->wrid;
2814 wc->byte_len = orig_cqe->length;
2815 wc->qp = &qp1_qp->ib_qp;
2816
2817 wc->ex.imm_data = orig_cqe->immdata;
2818 wc->src_qp = orig_cqe->src_qp;
2819 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2820 wc->port_num = 1;
2821 wc->vendor_err = orig_cqe->status;
2822
2823 wc->opcode = IB_WC_RECV;
2824 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2825 wc->wc_flags |= IB_WC_GRH;
2826
2827 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2828 orig_cqe->raweth_qp1_flags2);
2829 if (nw_type >= 0) {
2830 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2831 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2832 }
2833}
2834
2835static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2836 struct bnxt_qplib_cqe *cqe)
2837{
2838 wc->opcode = IB_WC_RECV;
2839 wc->status = __rc_to_ib_wc_status(cqe->status);
2840
2841 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2842 wc->wc_flags |= IB_WC_WITH_IMM;
2843 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2844 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2845 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2846 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2847 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2848}
2849
Eddie Wai9152e0b2017-06-14 03:26:23 -07002850static int send_phantom_wqe(struct bnxt_re_qp *qp)
2851{
2852 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2853 unsigned long flags;
2854 int rc = 0;
2855
2856 spin_lock_irqsave(&qp->sq_lock, flags);
2857
2858 rc = bnxt_re_bind_fence_mw(lib_qp);
2859 if (!rc) {
2860 lib_qp->sq.phantom_wqe_cnt++;
2861 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2862 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2863 lib_qp->id, lib_qp->sq.hwq.prod,
2864 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2865 lib_qp->sq.phantom_wqe_cnt);
2866 }
2867
2868 spin_unlock_irqrestore(&qp->sq_lock, flags);
2869 return rc;
2870}
2871
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002872int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2873{
2874 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2875 struct bnxt_re_qp *qp;
2876 struct bnxt_qplib_cqe *cqe;
2877 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002878 struct bnxt_qplib_q *sq;
2879 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002880 u32 tbl_idx;
2881 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2882 unsigned long flags;
2883
2884 spin_lock_irqsave(&cq->cq_lock, flags);
2885 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002886 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002887 if (!cq->cql) {
2888 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2889 goto exit;
2890 }
2891 cqe = &cq->cql[0];
2892 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002893 lib_qp = NULL;
2894 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2895 if (lib_qp) {
2896 sq = &lib_qp->sq;
2897 if (sq->send_phantom) {
2898 qp = container_of(lib_qp,
2899 struct bnxt_re_qp, qplib_qp);
2900 if (send_phantom_wqe(qp) == -ENOMEM)
2901 dev_err(rdev_to_dev(cq->rdev),
2902 "Phantom failed! Scheduled to send again\n");
2903 else
2904 sq->send_phantom = false;
2905 }
2906 }
2907
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002908 if (!ncqe)
2909 break;
2910
2911 for (i = 0; i < ncqe; i++, cqe++) {
2912 /* Transcribe each qplib_wqe back to ib_wc */
2913 memset(wc, 0, sizeof(*wc));
2914
2915 wc->wr_id = cqe->wr_id;
2916 wc->byte_len = cqe->length;
2917 qp = container_of
2918 ((struct bnxt_qplib_qp *)
2919 (unsigned long)(cqe->qp_handle),
2920 struct bnxt_re_qp, qplib_qp);
2921 if (!qp) {
2922 dev_err(rdev_to_dev(cq->rdev),
2923 "POLL CQ : bad QP handle");
2924 continue;
2925 }
2926 wc->qp = &qp->ib_qp;
2927 wc->ex.imm_data = cqe->immdata;
2928 wc->src_qp = cqe->src_qp;
2929 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2930 wc->port_num = 1;
2931 wc->vendor_err = cqe->status;
2932
2933 switch (cqe->opcode) {
2934 case CQ_BASE_CQE_TYPE_REQ:
2935 if (qp->qplib_qp.id ==
2936 qp->rdev->qp1_sqp->qplib_qp.id) {
2937 /* Handle this completion with
2938 * the stored completion
2939 */
2940 memset(wc, 0, sizeof(*wc));
2941 continue;
2942 }
2943 bnxt_re_process_req_wc(wc, cqe);
2944 break;
2945 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2946 if (!cqe->status) {
2947 int rc = 0;
2948
2949 rc = bnxt_re_process_raw_qp_pkt_rx
2950 (qp, cqe);
2951 if (!rc) {
2952 memset(wc, 0, sizeof(*wc));
2953 continue;
2954 }
2955 cqe->status = -1;
2956 }
2957 /* Errors need not be looped back.
2958 * But change the wr_id to the one
2959 * stored in the table
2960 */
2961 tbl_idx = cqe->wr_id;
2962 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2963 wc->wr_id = sqp_entry->wrid;
2964 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2965 break;
2966 case CQ_BASE_CQE_TYPE_RES_RC:
2967 bnxt_re_process_res_rc_wc(wc, cqe);
2968 break;
2969 case CQ_BASE_CQE_TYPE_RES_UD:
2970 if (qp->qplib_qp.id ==
2971 qp->rdev->qp1_sqp->qplib_qp.id) {
2972 /* Handle this completion with
2973 * the stored completion
2974 */
2975 if (cqe->status) {
2976 continue;
2977 } else {
2978 bnxt_re_process_res_shadow_qp_wc
2979 (qp, wc, cqe);
2980 break;
2981 }
2982 }
2983 bnxt_re_process_res_ud_wc(wc, cqe);
2984 break;
2985 default:
2986 dev_err(rdev_to_dev(cq->rdev),
2987 "POLL CQ : type 0x%x not handled",
2988 cqe->opcode);
2989 continue;
2990 }
2991 wc++;
2992 budget--;
2993 }
2994 }
2995exit:
2996 spin_unlock_irqrestore(&cq->cq_lock, flags);
2997 return num_entries - budget;
2998}
2999
3000int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3001 enum ib_cq_notify_flags ib_cqn_flags)
3002{
3003 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3004 int type = 0;
3005
3006 /* Trigger on the very next completion */
3007 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3008 type = DBR_DBR_TYPE_CQ_ARMALL;
3009 /* Trigger on the next solicited completion */
3010 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3011 type = DBR_DBR_TYPE_CQ_ARMSE;
3012
Selvin Xavier499e4562017-06-29 12:28:18 -07003013 /* Poll to see if there are missed events */
3014 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3015 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3016 return 1;
3017
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003018 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3019
3020 return 0;
3021}
3022
3023/* Memory Regions */
3024struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3025{
3026 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3027 struct bnxt_re_dev *rdev = pd->rdev;
3028 struct bnxt_re_mr *mr;
3029 u64 pbl = 0;
3030 int rc;
3031
3032 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3033 if (!mr)
3034 return ERR_PTR(-ENOMEM);
3035
3036 mr->rdev = rdev;
3037 mr->qplib_mr.pd = &pd->qplib_pd;
3038 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3039 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3040
3041 /* Allocate and register 0 as the address */
3042 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3043 if (rc)
3044 goto fail;
3045
3046 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3047 mr->qplib_mr.total_size = -1; /* Infinte length */
3048 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
3049 if (rc)
3050 goto fail_mr;
3051
3052 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3053 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3054 IB_ACCESS_REMOTE_ATOMIC))
3055 mr->ib_mr.rkey = mr->ib_mr.lkey;
3056 atomic_inc(&rdev->mr_count);
3057
3058 return &mr->ib_mr;
3059
3060fail_mr:
3061 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3062fail:
3063 kfree(mr);
3064 return ERR_PTR(rc);
3065}
3066
3067int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3068{
3069 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3070 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003071 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003072
Selvin Xavier1c980b02017-05-22 03:15:34 -07003073 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3074 if (rc) {
3075 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3076 return rc;
3077 }
3078
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003079 if (mr->npages && mr->pages) {
3080 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3081 &mr->qplib_frpl);
3082 kfree(mr->pages);
3083 mr->npages = 0;
3084 mr->pages = NULL;
3085 }
Doug Ledford374cb862017-04-25 14:00:59 -04003086 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003087 ib_umem_release(mr->ib_umem);
3088
3089 kfree(mr);
3090 atomic_dec(&rdev->mr_count);
3091 return rc;
3092}
3093
3094static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3095{
3096 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3097
3098 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3099 return -ENOMEM;
3100
3101 mr->pages[mr->npages++] = addr;
3102 return 0;
3103}
3104
3105int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3106 unsigned int *sg_offset)
3107{
3108 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3109
3110 mr->npages = 0;
3111 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3112}
3113
3114struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3115 u32 max_num_sg)
3116{
3117 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3118 struct bnxt_re_dev *rdev = pd->rdev;
3119 struct bnxt_re_mr *mr = NULL;
3120 int rc;
3121
3122 if (type != IB_MR_TYPE_MEM_REG) {
3123 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3124 return ERR_PTR(-EINVAL);
3125 }
3126 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3127 return ERR_PTR(-EINVAL);
3128
3129 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3130 if (!mr)
3131 return ERR_PTR(-ENOMEM);
3132
3133 mr->rdev = rdev;
3134 mr->qplib_mr.pd = &pd->qplib_pd;
3135 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3136 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3137
3138 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3139 if (rc)
3140 goto fail;
3141
3142 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3143 mr->ib_mr.rkey = mr->ib_mr.lkey;
3144
3145 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3146 if (!mr->pages) {
3147 rc = -ENOMEM;
3148 goto fail;
3149 }
3150 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3151 &mr->qplib_frpl, max_num_sg);
3152 if (rc) {
3153 dev_err(rdev_to_dev(rdev),
3154 "Failed to allocate HW FR page list");
3155 goto fail_mr;
3156 }
3157
3158 atomic_inc(&rdev->mr_count);
3159 return &mr->ib_mr;
3160
3161fail_mr:
3162 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3163fail:
3164 kfree(mr->pages);
3165 kfree(mr);
3166 return ERR_PTR(rc);
3167}
3168
Eddie Wai9152e0b2017-06-14 03:26:23 -07003169struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3170 struct ib_udata *udata)
3171{
3172 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3173 struct bnxt_re_dev *rdev = pd->rdev;
3174 struct bnxt_re_mw *mw;
3175 int rc;
3176
3177 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3178 if (!mw)
3179 return ERR_PTR(-ENOMEM);
3180 mw->rdev = rdev;
3181 mw->qplib_mw.pd = &pd->qplib_pd;
3182
3183 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3184 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3185 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3186 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3187 if (rc) {
3188 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3189 goto fail;
3190 }
3191 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3192
3193 atomic_inc(&rdev->mw_count);
3194 return &mw->ib_mw;
3195
3196fail:
3197 kfree(mw);
3198 return ERR_PTR(rc);
3199}
3200
3201int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3202{
3203 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3204 struct bnxt_re_dev *rdev = mw->rdev;
3205 int rc;
3206
3207 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3208 if (rc) {
3209 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3210 return rc;
3211 }
3212
3213 kfree(mw);
3214 atomic_dec(&rdev->mw_count);
3215 return rc;
3216}
3217
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003218/* uverbs */
3219struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3220 u64 virt_addr, int mr_access_flags,
3221 struct ib_udata *udata)
3222{
3223 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3224 struct bnxt_re_dev *rdev = pd->rdev;
3225 struct bnxt_re_mr *mr;
3226 struct ib_umem *umem;
3227 u64 *pbl_tbl, *pbl_tbl_orig;
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003228 int i, umem_pgs, pages, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003229 struct scatterlist *sg;
3230 int entry;
3231
Selvin Xavier58d4a672017-06-29 12:28:12 -07003232 if (length > BNXT_RE_MAX_MR_SIZE) {
3233 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3234 length, BNXT_RE_MAX_MR_SIZE);
3235 return ERR_PTR(-ENOMEM);
3236 }
3237
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003238 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3239 if (!mr)
3240 return ERR_PTR(-ENOMEM);
3241
3242 mr->rdev = rdev;
3243 mr->qplib_mr.pd = &pd->qplib_pd;
3244 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3245 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3246
3247 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3248 mr_access_flags, 0);
3249 if (IS_ERR(umem)) {
3250 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3251 rc = -EFAULT;
3252 goto free_mr;
3253 }
3254 mr->ib_umem = umem;
3255
3256 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3257 if (rc) {
3258 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3259 goto release_umem;
3260 }
3261 /* The fixed portion of the rkey is the same as the lkey */
3262 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3263
3264 mr->qplib_mr.va = virt_addr;
3265 umem_pgs = ib_umem_page_count(umem);
3266 if (!umem_pgs) {
3267 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3268 rc = -EINVAL;
3269 goto free_mrw;
3270 }
3271 mr->qplib_mr.total_size = length;
3272
3273 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3274 if (!pbl_tbl) {
3275 rc = -EINVAL;
3276 goto free_mrw;
3277 }
3278 pbl_tbl_orig = pbl_tbl;
3279
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003280 if (umem->hugetlb) {
3281 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3282 rc = -EFAULT;
3283 goto fail;
3284 }
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003285
3286 if (umem->page_shift != PAGE_SHIFT) {
3287 dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003288 rc = -EFAULT;
3289 goto fail;
3290 }
3291 /* Map umem buf ptrs to the PBL */
3292 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003293 pages = sg_dma_len(sg) >> umem->page_shift;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003294 for (i = 0; i < pages; i++, pbl_tbl++)
Artemy Kovalyov3e7e1192017-04-05 09:23:50 +03003295 *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003296 }
3297 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3298 umem_pgs, false);
3299 if (rc) {
3300 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3301 goto fail;
3302 }
3303
3304 kfree(pbl_tbl_orig);
3305
3306 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3307 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3308 atomic_inc(&rdev->mr_count);
3309
3310 return &mr->ib_mr;
3311fail:
3312 kfree(pbl_tbl_orig);
3313free_mrw:
3314 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3315release_umem:
3316 ib_umem_release(umem);
3317free_mr:
3318 kfree(mr);
3319 return ERR_PTR(rc);
3320}
3321
3322struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3323 struct ib_udata *udata)
3324{
3325 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3326 struct bnxt_re_uctx_resp resp;
3327 struct bnxt_re_ucontext *uctx;
3328 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3329 int rc;
3330
3331 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3332 ibdev->uverbs_abi_ver);
3333
3334 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3335 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3336 BNXT_RE_ABI_VERSION);
3337 return ERR_PTR(-EPERM);
3338 }
3339
3340 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3341 if (!uctx)
3342 return ERR_PTR(-ENOMEM);
3343
3344 uctx->rdev = rdev;
3345
3346 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3347 if (!uctx->shpg) {
3348 rc = -ENOMEM;
3349 goto fail;
3350 }
3351 spin_lock_init(&uctx->sh_lock);
3352
3353 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3354 resp.max_qp = rdev->qplib_ctx.qpc_count;
3355 resp.pg_size = PAGE_SIZE;
3356 resp.cqe_sz = sizeof(struct cq_base);
3357 resp.max_cqd = dev_attr->max_cq_wqes;
3358 resp.rsvd = 0;
3359
3360 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3361 if (rc) {
3362 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3363 rc = -EFAULT;
3364 goto cfail;
3365 }
3366
3367 return &uctx->ib_uctx;
3368cfail:
3369 free_page((unsigned long)uctx->shpg);
3370 uctx->shpg = NULL;
3371fail:
3372 kfree(uctx);
3373 return ERR_PTR(rc);
3374}
3375
3376int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3377{
3378 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3379 struct bnxt_re_ucontext,
3380 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003381
3382 struct bnxt_re_dev *rdev = uctx->rdev;
3383 int rc = 0;
3384
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003385 if (uctx->shpg)
3386 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003387
3388 if (uctx->dpi.dbr) {
3389 /* Free DPI only if this is the first PD allocated by the
3390 * application and mark the context dpi as NULL
3391 */
3392 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3393 &rdev->qplib_res.dpi_tbl,
3394 &uctx->dpi);
3395 if (rc)
3396 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3397 /* Don't fail, continue*/
3398 uctx->dpi.dbr = NULL;
3399 }
3400
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003401 kfree(uctx);
3402 return 0;
3403}
3404
3405/* Helper function to mmap the virtual memory from user app */
3406int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3407{
3408 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3409 struct bnxt_re_ucontext,
3410 ib_uctx);
3411 struct bnxt_re_dev *rdev = uctx->rdev;
3412 u64 pfn;
3413
3414 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3415 return -EINVAL;
3416
3417 if (vma->vm_pgoff) {
3418 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3419 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3420 PAGE_SIZE, vma->vm_page_prot)) {
3421 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3422 return -EAGAIN;
3423 }
3424 } else {
3425 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3426 if (remap_pfn_range(vma, vma->vm_start,
3427 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3428 dev_err(rdev_to_dev(rdev),
3429 "Failed to map shared page");
3430 return -EAGAIN;
3431 }
3432 }
3433
3434 return 0;
3435}