blob: c8c4a57397eecbf86e0757274edab3497a39f9e5 [file] [log] [blame]
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51
52#include "bnxt_ulp.h"
53
54#include "roce_hsi.h"
55#include "qplib_res.h"
56#include "qplib_sp.h"
57#include "qplib_fp.h"
58#include "qplib_rcfw.h"
59
60#include "bnxt_re.h"
61#include "ib_verbs.h"
62#include <rdma/bnxt_re-abi.h>
63
Eddie Wai9152e0b2017-06-14 03:26:23 -070064static int __from_ib_access_flags(int iflags)
65{
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83};
84
85static enum ib_access_flags __to_ib_access_flags(int qflags)
86{
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104};
105
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800106static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108{
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118}
119
120/* Device */
121struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122{
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134}
135
136int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139{
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
Selvin Xavier2fc68542018-01-11 11:52:08 -0500144 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145 min(sizeof(dev_attr->fw_ver),
146 sizeof(ib_attr->fw_ver)));
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800147 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148 (u8 *)&ib_attr->sys_image_guid);
Selvin Xavier58d4a672017-06-29 12:28:12 -0700149 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
Somnath Kotur872f3572018-01-11 11:52:09 -0500150 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800151
152 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155 ib_attr->max_qp = dev_attr->max_qp;
156 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157 ib_attr->device_cap_flags =
158 IB_DEVICE_CURR_QP_STATE_MOD
159 | IB_DEVICE_RC_RNR_NAK_GEN
160 | IB_DEVICE_SHUTDOWN_PORT
161 | IB_DEVICE_SYS_IMAGE_GUID
162 | IB_DEVICE_LOCAL_DMA_LKEY
163 | IB_DEVICE_RESIZE_MAX_WR
164 | IB_DEVICE_PORT_ACTIVE_EVENT
165 | IB_DEVICE_N_NOTIFY_CQ
166 | IB_DEVICE_MEM_WINDOW
167 | IB_DEVICE_MEM_WINDOW_TYPE_2B
168 | IB_DEVICE_MEM_MGT_EXTENSIONS;
169 ib_attr->max_sge = dev_attr->max_qp_sges;
170 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171 ib_attr->max_cq = dev_attr->max_cq;
172 ib_attr->max_cqe = dev_attr->max_cq_wqes;
173 ib_attr->max_mr = dev_attr->max_mr;
174 ib_attr->max_pd = dev_attr->max_pd;
175 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
Eddie Waia25d1122017-06-29 12:28:13 -0700176 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
Devesh Sharma254cd252017-06-29 12:28:16 -0700177 if (dev_attr->is_atomic) {
178 ib_attr->atomic_cap = IB_ATOMIC_HCA;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
180 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800181
182 ib_attr->max_ee_rd_atom = 0;
183 ib_attr->max_res_rd_atom = 0;
184 ib_attr->max_ee_init_rd_atom = 0;
185 ib_attr->max_ee = 0;
186 ib_attr->max_rdd = 0;
187 ib_attr->max_mw = dev_attr->max_mw;
188 ib_attr->max_raw_ipv6_qp = 0;
189 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190 ib_attr->max_mcast_grp = 0;
191 ib_attr->max_mcast_qp_attach = 0;
192 ib_attr->max_total_mcast_qp_attach = 0;
193 ib_attr->max_ah = dev_attr->max_ah;
194
Selvin Xavier86816a02017-05-22 03:15:44 -0700195 ib_attr->max_fmr = 0;
196 ib_attr->max_map_per_fmr = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800197
198 ib_attr->max_srq = dev_attr->max_srq;
199 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
200 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
201
202 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
203
204 ib_attr->max_pkeys = 1;
Selvin Xavier601577b2017-06-29 12:28:19 -0700205 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800206 return 0;
207}
208
209int bnxt_re_modify_device(struct ib_device *ibdev,
210 int device_modify_mask,
211 struct ib_device_modify *device_modify)
212{
213 switch (device_modify_mask) {
214 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
215 /* Modify the GUID requires the modification of the GID table */
216 /* GUID should be made as READ-ONLY */
217 break;
218 case IB_DEVICE_MODIFY_NODE_DESC:
219 /* Node Desc should be made as READ-ONLY */
220 break;
221 default:
222 break;
223 }
224 return 0;
225}
226
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800227/* Port */
228int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
229 struct ib_port_attr *port_attr)
230{
231 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
232 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
233
234 memset(port_attr, 0, sizeof(*port_attr));
235
236 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
237 port_attr->state = IB_PORT_ACTIVE;
238 port_attr->phys_state = 5;
239 } else {
240 port_attr->state = IB_PORT_DOWN;
241 port_attr->phys_state = 3;
242 }
243 port_attr->max_mtu = IB_MTU_4096;
244 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
245 port_attr->gid_tbl_len = dev_attr->max_sgid;
246 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
247 IB_PORT_DEVICE_MGMT_SUP |
248 IB_PORT_VENDOR_CLASS_SUP |
249 IB_PORT_IP_BASED_GIDS;
250
Somnath Kotur872f3572018-01-11 11:52:09 -0500251 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800252 port_attr->bad_pkey_cntr = 0;
253 port_attr->qkey_viol_cntr = 0;
254 port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 port_attr->lid = 0;
256 port_attr->sm_lid = 0;
257 port_attr->lmc = 0;
258 port_attr->max_vl_num = 4;
259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0;
Somnath Kotur74828b12017-08-31 09:27:33 +0530262 port_attr->active_speed = rdev->active_speed;
263 port_attr->active_width = rdev->active_width;
264
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800265 return 0;
266}
267
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800268int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269 struct ib_port_immutable *immutable)
270{
271 struct ib_port_attr port_attr;
272
273 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 return -EINVAL;
275
276 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277 immutable->gid_tbl_len = port_attr.gid_tbl_len;
278 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281 return 0;
282}
283
Selvin Xavier2fc68542018-01-11 11:52:08 -0500284void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
285{
286 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
287
288 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
289 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
290 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
291}
292
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800293int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
294 u16 index, u16 *pkey)
295{
296 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
297
298 /* Ignore port_num */
299
300 memset(pkey, 0, sizeof(*pkey));
301 return bnxt_qplib_get_pkey(&rdev->qplib_res,
302 &rdev->qplib_res.pkey_tbl, index, pkey);
303}
304
305int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
306 int index, union ib_gid *gid)
307{
308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
309 int rc = 0;
310
311 /* Ignore port_num */
312 memset(gid, 0, sizeof(*gid));
313 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
314 &rdev->qplib_res.sgid_tbl, index,
315 (struct bnxt_qplib_gid *)gid);
316 return rc;
317}
318
319int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
320 unsigned int index, void **context)
321{
322 int rc = 0;
323 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
324 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
325 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
Somnath Kotur89aaca52017-08-31 09:27:35 +0530326 struct bnxt_qplib_gid *gid_to_del;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800327
328 /* Delete the entry from the hardware */
329 ctx = *context;
330 if (!ctx)
331 return -EINVAL;
332
333 if (sgid_tbl && sgid_tbl->active) {
334 if (ctx->idx >= sgid_tbl->max)
335 return -EINVAL;
Somnath Kotur89aaca52017-08-31 09:27:35 +0530336 gid_to_del = &sgid_tbl->tbl[ctx->idx];
337 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
338 * or via the ib_unregister_device path. In the former case QP1
339 * may not be destroyed yet, in which case just return as FW
340 * needs that entry to be present and will fail it's deletion.
341 * We could get invoked again after QP1 is destroyed OR get an
342 * ADD_GID call with a different GID value for the same index
343 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
344 */
345 if (ctx->idx == 0 &&
346 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
347 ctx->refcnt == 1 && rdev->qp1_sqp) {
348 dev_dbg(rdev_to_dev(rdev),
349 "Trying to delete GID0 while QP1 is alive\n");
350 return -EFAULT;
351 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800352 ctx->refcnt--;
353 if (!ctx->refcnt) {
Somnath Kotur89aaca52017-08-31 09:27:35 +0530354 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700355 if (rc) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800356 dev_err(rdev_to_dev(rdev),
357 "Failed to remove GID: %#x", rc);
Selvin Xavier4a62c5e2017-06-29 12:28:11 -0700358 } else {
359 ctx_tbl = sgid_tbl->ctx;
360 ctx_tbl[ctx->idx] = NULL;
361 kfree(ctx);
362 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800363 }
364 } else {
365 return -EINVAL;
366 }
367 return rc;
368}
369
370int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
371 unsigned int index, const union ib_gid *gid,
372 const struct ib_gid_attr *attr, void **context)
373{
374 int rc;
375 u32 tbl_idx = 0;
376 u16 vlan_id = 0xFFFF;
377 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
378 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
379 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
380
381 if ((attr->ndev) && is_vlan_dev(attr->ndev))
382 vlan_id = vlan_dev_vlan_id(attr->ndev);
383
384 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
385 rdev->qplib_res.netdev->dev_addr,
386 vlan_id, true, &tbl_idx);
387 if (rc == -EALREADY) {
388 ctx_tbl = sgid_tbl->ctx;
389 ctx_tbl[tbl_idx]->refcnt++;
390 *context = ctx_tbl[tbl_idx];
391 return 0;
392 }
393
394 if (rc < 0) {
395 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
396 return rc;
397 }
398
399 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
400 if (!ctx)
401 return -ENOMEM;
402 ctx_tbl = sgid_tbl->ctx;
403 ctx->idx = tbl_idx;
404 ctx->refcnt = 1;
405 ctx_tbl[tbl_idx] = ctx;
Sriharsha Basavapatna063fb5b2017-11-03 02:39:04 +0530406 *context = ctx;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800407
408 return rc;
409}
410
411enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
412 u8 port_num)
413{
414 return IB_LINK_LAYER_ETHERNET;
415}
416
Eddie Wai9152e0b2017-06-14 03:26:23 -0700417#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
418
419static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
420{
421 struct bnxt_re_fence_data *fence = &pd->fence;
422 struct ib_mr *ib_mr = &fence->mr->ib_mr;
423 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
424
425 memset(wqe, 0, sizeof(*wqe));
426 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
427 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
428 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
429 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
430 wqe->bind.zero_based = false;
431 wqe->bind.parent_l_key = ib_mr->lkey;
432 wqe->bind.va = (u64)(unsigned long)fence->va;
433 wqe->bind.length = fence->size;
434 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
435 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
436
437 /* Save the initial rkey in fence structure for now;
438 * wqe->bind.r_key will be set at (re)bind time.
439 */
440 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
441}
442
443static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
444{
445 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
446 qplib_qp);
447 struct ib_pd *ib_pd = qp->ib_qp.pd;
448 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
449 struct bnxt_re_fence_data *fence = &pd->fence;
450 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
451 struct bnxt_qplib_swqe wqe;
452 int rc;
453
454 memcpy(&wqe, fence_wqe, sizeof(wqe));
455 wqe.bind.r_key = fence->bind_rkey;
456 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
457
458 dev_dbg(rdev_to_dev(qp->rdev),
459 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
460 wqe.bind.r_key, qp->qplib_qp.id, pd);
461 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
462 if (rc) {
463 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
464 return rc;
465 }
466 bnxt_qplib_post_send_db(&qp->qplib_qp);
467
468 return rc;
469}
470
471static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
472{
473 struct bnxt_re_fence_data *fence = &pd->fence;
474 struct bnxt_re_dev *rdev = pd->rdev;
475 struct device *dev = &rdev->en_dev->pdev->dev;
476 struct bnxt_re_mr *mr = fence->mr;
477
478 if (fence->mw) {
479 bnxt_re_dealloc_mw(fence->mw);
480 fence->mw = NULL;
481 }
482 if (mr) {
483 if (mr->ib_mr.rkey)
484 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
485 true);
486 if (mr->ib_mr.lkey)
487 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
488 kfree(mr);
489 fence->mr = NULL;
490 }
491 if (fence->dma_addr) {
492 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
493 DMA_BIDIRECTIONAL);
494 fence->dma_addr = 0;
495 }
496}
497
498static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
499{
500 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
501 struct bnxt_re_fence_data *fence = &pd->fence;
502 struct bnxt_re_dev *rdev = pd->rdev;
503 struct device *dev = &rdev->en_dev->pdev->dev;
504 struct bnxt_re_mr *mr = NULL;
505 dma_addr_t dma_addr = 0;
506 struct ib_mw *mw;
507 u64 pbl_tbl;
508 int rc;
509
510 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
511 DMA_BIDIRECTIONAL);
512 rc = dma_mapping_error(dev, dma_addr);
513 if (rc) {
514 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
515 rc = -EIO;
516 fence->dma_addr = 0;
517 goto fail;
518 }
519 fence->dma_addr = dma_addr;
520
521 /* Allocate a MR */
522 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
523 if (!mr) {
524 rc = -ENOMEM;
525 goto fail;
526 }
527 fence->mr = mr;
528 mr->rdev = rdev;
529 mr->qplib_mr.pd = &pd->qplib_pd;
530 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
531 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
532 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
533 if (rc) {
534 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
535 goto fail;
536 }
537
538 /* Register MR */
539 mr->ib_mr.lkey = mr->qplib_mr.lkey;
540 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
541 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
542 pbl_tbl = dma_addr;
543 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
Somnath Kotur872f3572018-01-11 11:52:09 -0500544 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700545 if (rc) {
546 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
547 goto fail;
548 }
549 mr->ib_mr.rkey = mr->qplib_mr.rkey;
550
551 /* Create a fence MW only for kernel consumers */
552 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300553 if (IS_ERR(mw)) {
Eddie Wai9152e0b2017-06-14 03:26:23 -0700554 dev_err(rdev_to_dev(rdev),
555 "Failed to create fence-MW for PD: %p\n", pd);
Dan Carpenter653f0a72017-07-10 10:22:47 +0300556 rc = PTR_ERR(mw);
Eddie Wai9152e0b2017-06-14 03:26:23 -0700557 goto fail;
558 }
559 fence->mw = mw;
560
561 bnxt_re_create_fence_wqe(pd);
562 return 0;
563
564fail:
565 bnxt_re_destroy_fence_mr(pd);
566 return rc;
567}
568
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800569/* Protection Domains */
570int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
571{
572 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
573 struct bnxt_re_dev *rdev = pd->rdev;
574 int rc;
575
Eddie Wai9152e0b2017-06-14 03:26:23 -0700576 bnxt_re_destroy_fence_mr(pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800577
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700578 if (pd->qplib_pd.id) {
579 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
580 &rdev->qplib_res.pd_tbl,
581 &pd->qplib_pd);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800582 if (rc)
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700583 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800584 }
585
586 kfree(pd);
587 return 0;
588}
589
590struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
591 struct ib_ucontext *ucontext,
592 struct ib_udata *udata)
593{
594 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
595 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
596 struct bnxt_re_ucontext,
597 ib_uctx);
598 struct bnxt_re_pd *pd;
599 int rc;
600
601 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
602 if (!pd)
603 return ERR_PTR(-ENOMEM);
604
605 pd->rdev = rdev;
606 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
607 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
608 rc = -ENOMEM;
609 goto fail;
610 }
611
612 if (udata) {
613 struct bnxt_re_pd_resp resp;
614
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700615 if (!ucntx->dpi.dbr) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800616 /* Allocate DPI in alloc_pd to avoid failing of
617 * ibv_devinfo and family of application when DPIs
618 * are depleted.
619 */
620 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700621 &ucntx->dpi, ucntx)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800622 rc = -ENOMEM;
623 goto dbfail;
624 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800625 }
626
627 resp.pdid = pd->qplib_pd.id;
628 /* Still allow mapping this DBR to the new user PD. */
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700629 resp.dpi = ucntx->dpi.dpi;
630 resp.dbr = (u64)ucntx->dpi.umdbr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800631
632 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
633 if (rc) {
634 dev_err(rdev_to_dev(rdev),
635 "Failed to copy user response\n");
636 goto dbfail;
637 }
638 }
639
Eddie Wai9152e0b2017-06-14 03:26:23 -0700640 if (!udata)
641 if (bnxt_re_create_fence_mr(pd))
642 dev_warn(rdev_to_dev(rdev),
643 "Failed to create Fence-MR\n");
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800644 return &pd->ib_pd;
645dbfail:
646 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
647 &pd->qplib_pd);
648fail:
649 kfree(pd);
650 return ERR_PTR(rc);
651}
652
653/* Address Handles */
654int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
655{
656 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
657 struct bnxt_re_dev *rdev = ah->rdev;
658 int rc;
659
660 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
661 if (rc) {
662 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
663 return rc;
664 }
665 kfree(ah);
666 return 0;
667}
668
669struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400670 struct rdma_ah_attr *ah_attr,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800671 struct ib_udata *udata)
672{
673 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
674 struct bnxt_re_dev *rdev = pd->rdev;
675 struct bnxt_re_ah *ah;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400676 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800677 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800678 u8 nw_type;
679
680 struct ib_gid_attr sgid_attr;
681
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400682 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800683 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
684 return ERR_PTR(-EINVAL);
685 }
686 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
687 if (!ah)
688 return ERR_PTR(-ENOMEM);
689
690 ah->rdev = rdev;
691 ah->qplib_ah.pd = &pd->qplib_pd;
692
693 /* Supply the configuration for the HW */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400694 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800695 sizeof(union ib_gid));
696 /*
697 * If RoCE V2 is enabled, stack will have two entries for
698 * each GID entry. Avoiding this duplicte entry in HW. Dividing
699 * the GID index by 2 for RoCE V2
700 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400701 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
702 ah->qplib_ah.host_sgid_index = grh->sgid_index;
703 ah->qplib_ah.traffic_class = grh->traffic_class;
704 ah->qplib_ah.flow_label = grh->flow_label;
705 ah->qplib_ah.hop_limit = grh->hop_limit;
706 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800707 if (ib_pd->uobject &&
708 !rdma_is_multicast_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400709 grh->dgid.raw) &&
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800710 !rdma_link_local_addr((struct in6_addr *)
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400711 grh->dgid.raw)) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800712 union ib_gid sgid;
713
714 rc = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400715 grh->sgid_index, &sgid,
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800716 &sgid_attr);
717 if (rc) {
718 dev_err(rdev_to_dev(rdev),
719 "Failed to query gid at index %d",
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400720 grh->sgid_index);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800721 goto fail;
722 }
Leon Romanovskye32d2d72017-10-29 17:05:22 +0200723 if (sgid_attr.ndev)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800724 dev_put(sgid_attr.ndev);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800725 /* Get network header type for this GID */
726 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
727 switch (nw_type) {
728 case RDMA_NETWORK_IPV4:
729 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
730 break;
731 case RDMA_NETWORK_IPV6:
732 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
733 break;
734 default:
735 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
736 break;
737 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800738 }
739
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400740 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800741 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
742 if (rc) {
743 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
744 goto fail;
745 }
746
747 /* Write AVID to shared page. */
748 if (ib_pd->uobject) {
749 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
750 struct bnxt_re_ucontext *uctx;
751 unsigned long flag;
752 u32 *wrptr;
753
754 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
755 spin_lock_irqsave(&uctx->sh_lock, flag);
756 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
757 *wrptr = ah->qplib_ah.id;
758 wmb(); /* make sure cache is updated. */
759 spin_unlock_irqrestore(&uctx->sh_lock, flag);
760 }
761
762 return &ah->ib_ah;
763
764fail:
765 kfree(ah);
766 return ERR_PTR(rc);
767}
768
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400769int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800770{
771 return 0;
772}
773
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400774int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800775{
776 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
777
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400778 ah_attr->type = ib_ah->type;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400779 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400780 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400781 rdma_ah_set_grh(ah_attr, NULL, 0,
782 ah->qplib_ah.host_sgid_index,
783 0, ah->qplib_ah.traffic_class);
784 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
785 rdma_ah_set_port_num(ah_attr, 1);
786 rdma_ah_set_static_rate(ah_attr, 0);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800787 return 0;
788}
789
790/* Queue Pairs */
791int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
792{
793 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
794 struct bnxt_re_dev *rdev = qp->rdev;
795 int rc;
796
Selvin Xavierc88a7852017-11-06 08:07:31 -0800797 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
Selvin Xavierf218d672017-06-29 12:28:15 -0700798 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800799 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
800 if (rc) {
801 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
802 return rc;
803 }
804 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
805 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
806 &rdev->sqp_ah->qplib_ah);
807 if (rc) {
808 dev_err(rdev_to_dev(rdev),
809 "Failed to destroy HW AH for shadow QP");
810 return rc;
811 }
812
Selvin Xavierf218d672017-06-29 12:28:15 -0700813 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800814 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
815 &rdev->qp1_sqp->qplib_qp);
816 if (rc) {
817 dev_err(rdev_to_dev(rdev),
818 "Failed to destroy Shadow QP");
819 return rc;
820 }
821 mutex_lock(&rdev->qp_lock);
822 list_del(&rdev->qp1_sqp->list);
823 atomic_dec(&rdev->qp_count);
824 mutex_unlock(&rdev->qp_lock);
825
826 kfree(rdev->sqp_ah);
827 kfree(rdev->qp1_sqp);
Somnath Kotur89aaca52017-08-31 09:27:35 +0530828 rdev->qp1_sqp = NULL;
829 rdev->sqp_ah = NULL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800830 }
831
Doug Ledford374cb862017-04-25 14:00:59 -0400832 if (!IS_ERR_OR_NULL(qp->rumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800833 ib_umem_release(qp->rumem);
Doug Ledford374cb862017-04-25 14:00:59 -0400834 if (!IS_ERR_OR_NULL(qp->sumem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800835 ib_umem_release(qp->sumem);
836
837 mutex_lock(&rdev->qp_lock);
838 list_del(&qp->list);
839 atomic_dec(&rdev->qp_count);
840 mutex_unlock(&rdev->qp_lock);
841 kfree(qp);
842 return 0;
843}
844
845static u8 __from_ib_qp_type(enum ib_qp_type type)
846{
847 switch (type) {
848 case IB_QPT_GSI:
849 return CMDQ_CREATE_QP1_TYPE_GSI;
850 case IB_QPT_RC:
851 return CMDQ_CREATE_QP_TYPE_RC;
852 case IB_QPT_UD:
853 return CMDQ_CREATE_QP_TYPE_UD;
854 default:
855 return IB_QPT_MAX;
856 }
857}
858
859static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
860 struct bnxt_re_qp *qp, struct ib_udata *udata)
861{
862 struct bnxt_re_qp_req ureq;
863 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
864 struct ib_umem *umem;
865 int bytes = 0;
866 struct ib_ucontext *context = pd->ib_pd.uobject->context;
867 struct bnxt_re_ucontext *cntx = container_of(context,
868 struct bnxt_re_ucontext,
869 ib_uctx);
870 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
871 return -EFAULT;
872
873 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
874 /* Consider mapping PSN search memory only for RC QPs. */
875 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
876 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
877 bytes = PAGE_ALIGN(bytes);
878 umem = ib_umem_get(context, ureq.qpsva, bytes,
879 IB_ACCESS_LOCAL_WRITE, 1);
880 if (IS_ERR(umem))
881 return PTR_ERR(umem);
882
883 qp->sumem = umem;
884 qplib_qp->sq.sglist = umem->sg_head.sgl;
885 qplib_qp->sq.nmap = umem->nmap;
886 qplib_qp->qp_handle = ureq.qp_handle;
887
888 if (!qp->qplib_qp.srq) {
889 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
890 bytes = PAGE_ALIGN(bytes);
891 umem = ib_umem_get(context, ureq.qprva, bytes,
892 IB_ACCESS_LOCAL_WRITE, 1);
893 if (IS_ERR(umem))
894 goto rqfail;
895 qp->rumem = umem;
896 qplib_qp->rq.sglist = umem->sg_head.sgl;
897 qplib_qp->rq.nmap = umem->nmap;
898 }
899
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -0700900 qplib_qp->dpi = &cntx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800901 return 0;
902rqfail:
903 ib_umem_release(qp->sumem);
904 qp->sumem = NULL;
905 qplib_qp->sq.sglist = NULL;
906 qplib_qp->sq.nmap = 0;
907
908 return PTR_ERR(umem);
909}
910
911static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
912 (struct bnxt_re_pd *pd,
913 struct bnxt_qplib_res *qp1_res,
914 struct bnxt_qplib_qp *qp1_qp)
915{
916 struct bnxt_re_dev *rdev = pd->rdev;
917 struct bnxt_re_ah *ah;
918 union ib_gid sgid;
919 int rc;
920
921 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
922 if (!ah)
923 return NULL;
924
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800925 ah->rdev = rdev;
926 ah->qplib_ah.pd = &pd->qplib_pd;
927
928 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
929 if (rc)
930 goto fail;
931
932 /* supply the dgid data same as sgid */
933 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
934 sizeof(union ib_gid));
935 ah->qplib_ah.sgid_index = 0;
936
937 ah->qplib_ah.traffic_class = 0;
938 ah->qplib_ah.flow_label = 0;
939 ah->qplib_ah.hop_limit = 1;
940 ah->qplib_ah.sl = 0;
941 /* Have DMAC same as SMAC */
942 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
943
944 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
945 if (rc) {
946 dev_err(rdev_to_dev(rdev),
947 "Failed to allocate HW AH for Shadow QP");
948 goto fail;
949 }
950
951 return ah;
952
953fail:
954 kfree(ah);
955 return NULL;
956}
957
958static struct bnxt_re_qp *bnxt_re_create_shadow_qp
959 (struct bnxt_re_pd *pd,
960 struct bnxt_qplib_res *qp1_res,
961 struct bnxt_qplib_qp *qp1_qp)
962{
963 struct bnxt_re_dev *rdev = pd->rdev;
964 struct bnxt_re_qp *qp;
965 int rc;
966
967 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
968 if (!qp)
969 return NULL;
970
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800971 qp->rdev = rdev;
972
973 /* Initialize the shadow QP structure from the QP1 values */
974 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
975
976 qp->qplib_qp.pd = &pd->qplib_pd;
977 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
978 qp->qplib_qp.type = IB_QPT_UD;
979
980 qp->qplib_qp.max_inline_data = 0;
981 qp->qplib_qp.sig_type = true;
982
983 /* Shadow QP SQ depth should be same as QP1 RQ depth */
984 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
985 qp->qplib_qp.sq.max_sge = 2;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700986 /* Q full delta can be 1 since it is internal QP */
987 qp->qplib_qp.sq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800988
989 qp->qplib_qp.scq = qp1_qp->scq;
990 qp->qplib_qp.rcq = qp1_qp->rcq;
991
992 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
993 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
Eddie Wai9152e0b2017-06-14 03:26:23 -0700994 /* Q full delta can be 1 since it is internal QP */
995 qp->qplib_qp.rq.q_full_delta = 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -0800996
997 qp->qplib_qp.mtu = qp1_qp->mtu;
998
999 qp->qplib_qp.sq_hdr_buf_size = 0;
1000 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1001 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1002
1003 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1004 if (rc)
1005 goto fail;
1006
1007 rdev->sqp_id = qp->qplib_qp.id;
1008
1009 spin_lock_init(&qp->sq_lock);
1010 INIT_LIST_HEAD(&qp->list);
1011 mutex_lock(&rdev->qp_lock);
1012 list_add_tail(&qp->list, &rdev->qp_list);
1013 atomic_inc(&rdev->qp_count);
1014 mutex_unlock(&rdev->qp_lock);
1015 return qp;
1016fail:
1017 kfree(qp);
1018 return NULL;
1019}
1020
1021struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1022 struct ib_qp_init_attr *qp_init_attr,
1023 struct ib_udata *udata)
1024{
1025 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1026 struct bnxt_re_dev *rdev = pd->rdev;
1027 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1028 struct bnxt_re_qp *qp;
1029 struct bnxt_re_cq *cq;
1030 int rc, entries;
1031
1032 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1033 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1034 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1035 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1036 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1037 return ERR_PTR(-EINVAL);
1038
1039 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1040 if (!qp)
1041 return ERR_PTR(-ENOMEM);
1042
1043 qp->rdev = rdev;
1044 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1045 qp->qplib_qp.pd = &pd->qplib_pd;
1046 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1047 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1048 if (qp->qplib_qp.type == IB_QPT_MAX) {
1049 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1050 qp->qplib_qp.type);
1051 rc = -EINVAL;
1052 goto fail;
1053 }
1054 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1055 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1056 IB_SIGNAL_ALL_WR) ? true : false);
1057
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001058 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1059 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1060 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1061
1062 if (qp_init_attr->send_cq) {
1063 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1064 ib_cq);
1065 if (!cq) {
1066 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1067 rc = -EINVAL;
1068 goto fail;
1069 }
1070 qp->qplib_qp.scq = &cq->qplib_cq;
1071 }
1072
1073 if (qp_init_attr->recv_cq) {
1074 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1075 ib_cq);
1076 if (!cq) {
1077 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1078 rc = -EINVAL;
1079 goto fail;
1080 }
1081 qp->qplib_qp.rcq = &cq->qplib_cq;
1082 }
1083
1084 if (qp_init_attr->srq) {
1085 dev_err(rdev_to_dev(rdev), "SRQ not supported");
1086 rc = -ENOTSUPP;
1087 goto fail;
1088 } else {
1089 /* Allocate 1 more than what's provided so posting max doesn't
1090 * mean empty
1091 */
1092 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1093 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1094 dev_attr->max_qp_wqes + 1);
1095
Eddie Wai9152e0b2017-06-14 03:26:23 -07001096 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1097 qp_init_attr->cap.max_recv_wr;
1098
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001099 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1100 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1101 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1102 }
1103
1104 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1105
1106 if (qp_init_attr->qp_type == IB_QPT_GSI) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001107 /* Allocate 1 more than what's provided */
1108 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1109 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1110 dev_attr->max_qp_wqes + 1);
1111 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1112 qp_init_attr->cap.max_send_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001113 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1114 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1115 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1116 qp->qplib_qp.sq.max_sge++;
1117 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1118 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1119
1120 qp->qplib_qp.rq_hdr_buf_size =
1121 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1122
1123 qp->qplib_qp.sq_hdr_buf_size =
1124 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1125 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1126 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1127 if (rc) {
1128 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1129 goto fail;
1130 }
1131 /* Create a shadow QP to handle the QP1 traffic */
1132 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1133 &qp->qplib_qp);
1134 if (!rdev->qp1_sqp) {
1135 rc = -EINVAL;
1136 dev_err(rdev_to_dev(rdev),
1137 "Failed to create Shadow QP for QP1");
1138 goto qp_destroy;
1139 }
1140 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1141 &qp->qplib_qp);
1142 if (!rdev->sqp_ah) {
1143 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1144 &rdev->qp1_sqp->qplib_qp);
1145 rc = -EINVAL;
1146 dev_err(rdev_to_dev(rdev),
1147 "Failed to create AH entry for ShadowQP");
1148 goto qp_destroy;
1149 }
1150
1151 } else {
Eddie Wai9152e0b2017-06-14 03:26:23 -07001152 /* Allocate 128 + 1 more than what's provided */
1153 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1154 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1155 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1156 dev_attr->max_qp_wqes +
1157 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1158 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1159
1160 /*
1161 * Reserving one slot for Phantom WQE. Application can
1162 * post one extra entry in this case. But allowing this to avoid
1163 * unexpected Queue full condition
1164 */
1165
1166 qp->qplib_qp.sq.q_full_delta -= 1;
1167
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001168 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1169 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1170 if (udata) {
1171 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1172 if (rc)
1173 goto fail;
1174 } else {
1175 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1176 }
1177
1178 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1179 if (rc) {
1180 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1181 goto fail;
1182 }
1183 }
1184
1185 qp->ib_qp.qp_num = qp->qplib_qp.id;
1186 spin_lock_init(&qp->sq_lock);
Devesh Sharma018cf592017-05-22 03:15:40 -07001187 spin_lock_init(&qp->rq_lock);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001188
1189 if (udata) {
1190 struct bnxt_re_qp_resp resp;
1191
1192 resp.qpid = qp->ib_qp.qp_num;
1193 resp.rsvd = 0;
1194 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1195 if (rc) {
1196 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1197 goto qp_destroy;
1198 }
1199 }
1200 INIT_LIST_HEAD(&qp->list);
1201 mutex_lock(&rdev->qp_lock);
1202 list_add_tail(&qp->list, &rdev->qp_list);
1203 atomic_inc(&rdev->qp_count);
1204 mutex_unlock(&rdev->qp_lock);
1205
1206 return &qp->ib_qp;
1207qp_destroy:
1208 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1209fail:
1210 kfree(qp);
1211 return ERR_PTR(rc);
1212}
1213
1214static u8 __from_ib_qp_state(enum ib_qp_state state)
1215{
1216 switch (state) {
1217 case IB_QPS_RESET:
1218 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1219 case IB_QPS_INIT:
1220 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1221 case IB_QPS_RTR:
1222 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1223 case IB_QPS_RTS:
1224 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1225 case IB_QPS_SQD:
1226 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1227 case IB_QPS_SQE:
1228 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1229 case IB_QPS_ERR:
1230 default:
1231 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1232 }
1233}
1234
1235static enum ib_qp_state __to_ib_qp_state(u8 state)
1236{
1237 switch (state) {
1238 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1239 return IB_QPS_RESET;
1240 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1241 return IB_QPS_INIT;
1242 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1243 return IB_QPS_RTR;
1244 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1245 return IB_QPS_RTS;
1246 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1247 return IB_QPS_SQD;
1248 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1249 return IB_QPS_SQE;
1250 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1251 default:
1252 return IB_QPS_ERR;
1253 }
1254}
1255
1256static u32 __from_ib_mtu(enum ib_mtu mtu)
1257{
1258 switch (mtu) {
1259 case IB_MTU_256:
1260 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1261 case IB_MTU_512:
1262 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1263 case IB_MTU_1024:
1264 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1265 case IB_MTU_2048:
1266 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1267 case IB_MTU_4096:
1268 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1269 default:
1270 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1271 }
1272}
1273
1274static enum ib_mtu __to_ib_mtu(u32 mtu)
1275{
1276 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1277 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1278 return IB_MTU_256;
1279 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1280 return IB_MTU_512;
1281 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1282 return IB_MTU_1024;
1283 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1284 return IB_MTU_2048;
1285 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1286 return IB_MTU_4096;
1287 default:
1288 return IB_MTU_2048;
1289 }
1290}
1291
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001292static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1293 struct bnxt_re_qp *qp1_qp,
1294 int qp_attr_mask)
1295{
1296 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1297 int rc = 0;
1298
1299 if (qp_attr_mask & IB_QP_STATE) {
1300 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1301 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1302 }
1303 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1304 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1305 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1306 }
1307
1308 if (qp_attr_mask & IB_QP_QKEY) {
1309 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1310 /* Using a Random QKEY */
1311 qp->qplib_qp.qkey = 0x81818181;
1312 }
1313 if (qp_attr_mask & IB_QP_SQ_PSN) {
1314 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1315 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1316 }
1317
1318 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1319 if (rc)
1320 dev_err(rdev_to_dev(rdev),
1321 "Failed to modify Shadow QP for QP1");
1322 return rc;
1323}
1324
1325int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1326 int qp_attr_mask, struct ib_udata *udata)
1327{
1328 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1329 struct bnxt_re_dev *rdev = qp->rdev;
1330 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1331 enum ib_qp_state curr_qp_state, new_qp_state;
1332 int rc, entries;
1333 int status;
1334 union ib_gid sgid;
1335 struct ib_gid_attr sgid_attr;
1336 u8 nw_type;
1337
1338 qp->qplib_qp.modify_flags = 0;
1339 if (qp_attr_mask & IB_QP_STATE) {
1340 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1341 new_qp_state = qp_attr->qp_state;
1342 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1343 ib_qp->qp_type, qp_attr_mask,
1344 IB_LINK_LAYER_ETHERNET)) {
1345 dev_err(rdev_to_dev(rdev),
1346 "Invalid attribute mask: %#x specified ",
1347 qp_attr_mask);
1348 dev_err(rdev_to_dev(rdev),
1349 "for qpn: %#x type: %#x",
1350 ib_qp->qp_num, ib_qp->qp_type);
1351 dev_err(rdev_to_dev(rdev),
1352 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1353 curr_qp_state, new_qp_state);
1354 return -EINVAL;
1355 }
1356 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1357 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
Selvin Xavierf218d672017-06-29 12:28:15 -07001358
1359 if (!qp->sumem &&
1360 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1361 dev_dbg(rdev_to_dev(rdev),
1362 "Move QP = %p to flush list\n",
1363 qp);
1364 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1365 }
1366 if (!qp->sumem &&
1367 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1368 dev_dbg(rdev_to_dev(rdev),
1369 "Move QP = %p out of flush list\n",
1370 qp);
1371 bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1372 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001373 }
1374 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1375 qp->qplib_qp.modify_flags |=
1376 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1377 qp->qplib_qp.en_sqd_async_notify = true;
1378 }
1379 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1380 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1381 qp->qplib_qp.access =
1382 __from_ib_access_flags(qp_attr->qp_access_flags);
1383 /* LOCAL_WRITE access must be set to allow RC receive */
1384 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1385 }
1386 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1387 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1388 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1389 }
1390 if (qp_attr_mask & IB_QP_QKEY) {
1391 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1392 qp->qplib_qp.qkey = qp_attr->qkey;
1393 }
1394 if (qp_attr_mask & IB_QP_AV) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001395 const struct ib_global_route *grh =
1396 rdma_ah_read_grh(&qp_attr->ah_attr);
1397
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001398 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1399 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1400 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1401 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1402 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1403 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1404 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001405 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001406 sizeof(qp->qplib_qp.ah.dgid.data));
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001407 qp->qplib_qp.ah.flow_label = grh->flow_label;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001408 /* If RoCE V2 is enabled, stack will have two entries for
1409 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1410 * the GID index by 2 for RoCE V2
1411 */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001412 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1413 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1414 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1415 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1416 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001417 ether_addr_copy(qp->qplib_qp.ah.dmac,
1418 qp_attr->ah_attr.roce.dmac);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001419
1420 status = ib_get_cached_gid(&rdev->ibdev, 1,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001421 grh->sgid_index,
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001422 &sgid, &sgid_attr);
1423 if (!status && sgid_attr.ndev) {
1424 memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1425 ETH_ALEN);
1426 dev_put(sgid_attr.ndev);
1427 nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1428 &sgid);
1429 switch (nw_type) {
1430 case RDMA_NETWORK_IPV4:
1431 qp->qplib_qp.nw_type =
1432 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1433 break;
1434 case RDMA_NETWORK_IPV6:
1435 qp->qplib_qp.nw_type =
1436 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1437 break;
1438 default:
1439 qp->qplib_qp.nw_type =
1440 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1441 break;
1442 }
1443 }
1444 }
1445
1446 if (qp_attr_mask & IB_QP_PATH_MTU) {
1447 qp->qplib_qp.modify_flags |=
1448 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1449 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301450 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001451 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1452 qp->qplib_qp.modify_flags |=
1453 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1454 qp->qplib_qp.path_mtu =
1455 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
Devesh Sharmaf9b941b2017-08-31 09:27:28 +05301456 qp->qplib_qp.mtu =
1457 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001458 }
1459
1460 if (qp_attr_mask & IB_QP_TIMEOUT) {
1461 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1462 qp->qplib_qp.timeout = qp_attr->timeout;
1463 }
1464 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1465 qp->qplib_qp.modify_flags |=
1466 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1467 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1468 }
1469 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1470 qp->qplib_qp.modify_flags |=
1471 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1472 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1473 }
1474 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1475 qp->qplib_qp.modify_flags |=
1476 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1477 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1478 }
1479 if (qp_attr_mask & IB_QP_RQ_PSN) {
1480 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1481 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1482 }
1483 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1484 qp->qplib_qp.modify_flags |=
1485 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
Eddie Waia25d1122017-06-29 12:28:13 -07001486 /* Cap the max_rd_atomic to device max */
1487 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1488 dev_attr->max_qp_rd_atom);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001489 }
1490 if (qp_attr_mask & IB_QP_SQ_PSN) {
1491 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1492 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1493 }
1494 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
Eddie Waia25d1122017-06-29 12:28:13 -07001495 if (qp_attr->max_dest_rd_atomic >
1496 dev_attr->max_qp_init_rd_atom) {
1497 dev_err(rdev_to_dev(rdev),
1498 "max_dest_rd_atomic requested%d is > dev_max%d",
1499 qp_attr->max_dest_rd_atomic,
1500 dev_attr->max_qp_init_rd_atom);
1501 return -EINVAL;
1502 }
1503
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001504 qp->qplib_qp.modify_flags |=
1505 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1506 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1507 }
1508 if (qp_attr_mask & IB_QP_CAP) {
1509 qp->qplib_qp.modify_flags |=
1510 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1511 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1512 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1513 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1514 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1515 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1516 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1517 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1518 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1519 (qp_attr->cap.max_inline_data >=
1520 dev_attr->max_inline_data)) {
1521 dev_err(rdev_to_dev(rdev),
1522 "Create QP failed - max exceeded");
1523 return -EINVAL;
1524 }
1525 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1526 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1527 dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001528 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1529 qp_attr->cap.max_send_wr;
1530 /*
1531 * Reserving one slot for Phantom WQE. Some application can
1532 * post one extra entry in this case. Allowing this to avoid
1533 * unexpected Queue full condition
1534 */
1535 qp->qplib_qp.sq.q_full_delta -= 1;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001536 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1537 if (qp->qplib_qp.rq.max_wqe) {
1538 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1539 qp->qplib_qp.rq.max_wqe =
1540 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
Eddie Wai9152e0b2017-06-14 03:26:23 -07001541 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1542 qp_attr->cap.max_recv_wr;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001543 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1544 } else {
1545 /* SRQ was used prior, just ignore the RQ caps */
1546 }
1547 }
1548 if (qp_attr_mask & IB_QP_DEST_QPN) {
1549 qp->qplib_qp.modify_flags |=
1550 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1551 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1552 }
1553 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1554 if (rc) {
1555 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1556 return rc;
1557 }
1558 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1559 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1560 return rc;
1561}
1562
1563int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1564 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1565{
1566 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1567 struct bnxt_re_dev *rdev = qp->rdev;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001568 struct bnxt_qplib_qp *qplib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001569 int rc;
1570
Leon Romanovskye13547b2017-09-19 13:22:13 +03001571 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1572 if (!qplib_qp)
1573 return -ENOMEM;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001574
Leon Romanovskye13547b2017-09-19 13:22:13 +03001575 qplib_qp->id = qp->qplib_qp.id;
1576 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1577
1578 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001579 if (rc) {
1580 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
Leon Romanovskye13547b2017-09-19 13:22:13 +03001581 goto out;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001582 }
Leon Romanovskye13547b2017-09-19 13:22:13 +03001583 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1584 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1585 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1586 qp_attr->pkey_index = qplib_qp->pkey_index;
1587 qp_attr->qkey = qplib_qp->qkey;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001588 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Leon Romanovskye13547b2017-09-19 13:22:13 +03001589 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1590 qplib_qp->ah.host_sgid_index,
1591 qplib_qp->ah.hop_limit,
1592 qplib_qp->ah.traffic_class);
1593 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1594 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1595 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1596 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1597 qp_attr->timeout = qplib_qp->timeout;
1598 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1599 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1600 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1601 qp_attr->rq_psn = qplib_qp->rq.psn;
1602 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1603 qp_attr->sq_psn = qplib_qp->sq.psn;
1604 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1605 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1606 IB_SIGNAL_REQ_WR;
1607 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001608
1609 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1610 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1611 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1612 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1613 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1614 qp_init_attr->cap = qp_attr->cap;
1615
Leon Romanovskye13547b2017-09-19 13:22:13 +03001616out:
1617 kfree(qplib_qp);
1618 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001619}
1620
1621/* Routine for sending QP1 packets for RoCE V1 an V2
1622 */
1623static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1624 struct ib_send_wr *wr,
1625 struct bnxt_qplib_swqe *wqe,
1626 int payload_size)
1627{
1628 struct ib_device *ibdev = &qp->rdev->ibdev;
1629 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1630 ib_ah);
1631 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1632 struct bnxt_qplib_sge sge;
1633 union ib_gid sgid;
1634 u8 nw_type;
1635 u16 ether_type;
1636 struct ib_gid_attr sgid_attr;
1637 union ib_gid dgid;
1638 bool is_eth = false;
1639 bool is_vlan = false;
1640 bool is_grh = false;
1641 bool is_udp = false;
1642 u8 ip_version = 0;
1643 u16 vlan_id = 0xFFFF;
1644 void *buf;
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07001645 int i, rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001646
1647 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1648
1649 rc = ib_get_cached_gid(ibdev, 1,
1650 qplib_ah->host_sgid_index, &sgid,
1651 &sgid_attr);
1652 if (rc) {
1653 dev_err(rdev_to_dev(qp->rdev),
1654 "Failed to query gid at index %d",
1655 qplib_ah->host_sgid_index);
1656 return rc;
1657 }
1658 if (sgid_attr.ndev) {
1659 if (is_vlan_dev(sgid_attr.ndev))
1660 vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1661 dev_put(sgid_attr.ndev);
1662 }
1663 /* Get network header type for this GID */
1664 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1665 switch (nw_type) {
1666 case RDMA_NETWORK_IPV4:
1667 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1668 break;
1669 case RDMA_NETWORK_IPV6:
1670 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1671 break;
1672 default:
1673 nw_type = BNXT_RE_ROCE_V1_PACKET;
1674 break;
1675 }
1676 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1677 is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1678 if (is_udp) {
1679 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1680 ip_version = 4;
1681 ether_type = ETH_P_IP;
1682 } else {
1683 ip_version = 6;
1684 ether_type = ETH_P_IPV6;
1685 }
1686 is_grh = false;
1687 } else {
1688 ether_type = ETH_P_IBOE;
1689 is_grh = true;
1690 }
1691
1692 is_eth = true;
1693 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1694
1695 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1696 ip_version, is_udp, 0, &qp->qp1_hdr);
1697
1698 /* ETH */
1699 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1700 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1701
1702 /* For vlan, check the sgid for vlan existence */
1703
1704 if (!is_vlan) {
1705 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1706 } else {
1707 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1708 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1709 }
1710
1711 if (is_grh || (ip_version == 6)) {
1712 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1713 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1714 sizeof(sgid));
1715 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1716 }
1717
1718 if (ip_version == 4) {
1719 qp->qp1_hdr.ip4.tos = 0;
1720 qp->qp1_hdr.ip4.id = 0;
1721 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1722 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1723
1724 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1725 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1726 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1727 }
1728
1729 if (is_udp) {
1730 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1731 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1732 qp->qp1_hdr.udp.csum = 0;
1733 }
1734
1735 /* BTH */
1736 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1737 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1738 qp->qp1_hdr.immediate_present = 1;
1739 } else {
1740 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1741 }
1742 if (wr->send_flags & IB_SEND_SOLICITED)
1743 qp->qp1_hdr.bth.solicited_event = 1;
1744 /* pad_count */
1745 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1746
1747 /* P_key for QP1 is for all members */
1748 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1749 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1750 qp->qp1_hdr.bth.ack_req = 0;
1751 qp->send_psn++;
1752 qp->send_psn &= BTH_PSN_MASK;
1753 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1754 /* DETH */
1755 /* Use the priviledged Q_Key for QP1 */
1756 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1757 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1758
1759 /* Pack the QP1 to the transmit buffer */
1760 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1761 if (buf) {
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07001762 ib_ud_header_pack(&qp->qp1_hdr, buf);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001763 for (i = wqe->num_sge; i; i--) {
1764 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1765 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1766 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1767 }
1768
1769 /*
1770 * Max Header buf size for IPV6 RoCE V2 is 86,
1771 * which is same as the QP1 SQ header buffer.
1772 * Header buf size for IPV4 RoCE V2 can be 66.
1773 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1774 * Subtract 20 bytes from QP1 SQ header buf size
1775 */
1776 if (is_udp && ip_version == 4)
1777 sge.size -= 20;
1778 /*
1779 * Max Header buf size for RoCE V1 is 78.
1780 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1781 * Subtract 8 bytes from QP1 SQ header buf size
1782 */
1783 if (!is_udp)
1784 sge.size -= 8;
1785
1786 /* Subtract 4 bytes for non vlan packets */
1787 if (!is_vlan)
1788 sge.size -= 4;
1789
1790 wqe->sg_list[0].addr = sge.addr;
1791 wqe->sg_list[0].lkey = sge.lkey;
1792 wqe->sg_list[0].size = sge.size;
1793 wqe->num_sge++;
1794
1795 } else {
1796 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1797 rc = -ENOMEM;
1798 }
1799 return rc;
1800}
1801
1802/* For the MAD layer, it only provides the recv SGE the size of
1803 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
1804 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
1805 * receive packet (334 bytes) with no VLAN and then copy the GRH
1806 * and the MAD datagram out to the provided SGE.
1807 */
1808static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1809 struct ib_recv_wr *wr,
1810 struct bnxt_qplib_swqe *wqe,
1811 int payload_size)
1812{
1813 struct bnxt_qplib_sge ref, sge;
1814 u32 rq_prod_index;
1815 struct bnxt_re_sqp_entries *sqp_entry;
1816
1817 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1818
1819 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1820 return -ENOMEM;
1821
1822 /* Create 1 SGE to receive the entire
1823 * ethernet packet
1824 */
1825 /* Save the reference from ULP */
1826 ref.addr = wqe->sg_list[0].addr;
1827 ref.lkey = wqe->sg_list[0].lkey;
1828 ref.size = wqe->sg_list[0].size;
1829
1830 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1831
1832 /* SGE 1 */
1833 wqe->sg_list[0].addr = sge.addr;
1834 wqe->sg_list[0].lkey = sge.lkey;
1835 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1836 sge.size -= wqe->sg_list[0].size;
1837
1838 sqp_entry->sge.addr = ref.addr;
1839 sqp_entry->sge.lkey = ref.lkey;
1840 sqp_entry->sge.size = ref.size;
1841 /* Store the wrid for reporting completion */
1842 sqp_entry->wrid = wqe->wr_id;
1843 /* change the wqe->wrid to table index */
1844 wqe->wr_id = rq_prod_index;
1845 return 0;
1846}
1847
1848static int is_ud_qp(struct bnxt_re_qp *qp)
1849{
1850 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1851}
1852
1853static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1854 struct ib_send_wr *wr,
1855 struct bnxt_qplib_swqe *wqe)
1856{
1857 struct bnxt_re_ah *ah = NULL;
1858
1859 if (is_ud_qp(qp)) {
1860 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1861 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1862 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1863 wqe->send.avid = ah->qplib_ah.id;
1864 }
1865 switch (wr->opcode) {
1866 case IB_WR_SEND:
1867 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1868 break;
1869 case IB_WR_SEND_WITH_IMM:
1870 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1871 wqe->send.imm_data = wr->ex.imm_data;
1872 break;
1873 case IB_WR_SEND_WITH_INV:
1874 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1875 wqe->send.inv_key = wr->ex.invalidate_rkey;
1876 break;
1877 default:
1878 return -EINVAL;
1879 }
1880 if (wr->send_flags & IB_SEND_SIGNALED)
1881 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1882 if (wr->send_flags & IB_SEND_FENCE)
1883 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1884 if (wr->send_flags & IB_SEND_SOLICITED)
1885 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1886 if (wr->send_flags & IB_SEND_INLINE)
1887 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1888
1889 return 0;
1890}
1891
1892static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1893 struct bnxt_qplib_swqe *wqe)
1894{
1895 switch (wr->opcode) {
1896 case IB_WR_RDMA_WRITE:
1897 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1898 break;
1899 case IB_WR_RDMA_WRITE_WITH_IMM:
1900 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1901 wqe->rdma.imm_data = wr->ex.imm_data;
1902 break;
1903 case IB_WR_RDMA_READ:
1904 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1905 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1906 break;
1907 default:
1908 return -EINVAL;
1909 }
1910 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1911 wqe->rdma.r_key = rdma_wr(wr)->rkey;
1912 if (wr->send_flags & IB_SEND_SIGNALED)
1913 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1914 if (wr->send_flags & IB_SEND_FENCE)
1915 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1916 if (wr->send_flags & IB_SEND_SOLICITED)
1917 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1918 if (wr->send_flags & IB_SEND_INLINE)
1919 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1920
1921 return 0;
1922}
1923
1924static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1925 struct bnxt_qplib_swqe *wqe)
1926{
1927 switch (wr->opcode) {
1928 case IB_WR_ATOMIC_CMP_AND_SWP:
1929 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
Devesh Sharma55311d02017-08-31 09:27:30 +05301930 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08001931 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1932 break;
1933 case IB_WR_ATOMIC_FETCH_AND_ADD:
1934 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1935 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1936 break;
1937 default:
1938 return -EINVAL;
1939 }
1940 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1941 wqe->atomic.r_key = atomic_wr(wr)->rkey;
1942 if (wr->send_flags & IB_SEND_SIGNALED)
1943 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1944 if (wr->send_flags & IB_SEND_FENCE)
1945 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1946 if (wr->send_flags & IB_SEND_SOLICITED)
1947 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1948 return 0;
1949}
1950
1951static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1952 struct bnxt_qplib_swqe *wqe)
1953{
1954 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1955 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1956
1957 if (wr->send_flags & IB_SEND_SIGNALED)
1958 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1959 if (wr->send_flags & IB_SEND_FENCE)
1960 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1961 if (wr->send_flags & IB_SEND_SOLICITED)
1962 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1963
1964 return 0;
1965}
1966
1967static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1968 struct bnxt_qplib_swqe *wqe)
1969{
1970 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1971 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1972 int access = wr->access;
1973
1974 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1975 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1976 wqe->frmr.page_list = mr->pages;
1977 wqe->frmr.page_list_len = mr->npages;
1978 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1979 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1980
1981 if (wr->wr.send_flags & IB_SEND_FENCE)
1982 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1983 if (wr->wr.send_flags & IB_SEND_SIGNALED)
1984 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1985
1986 if (access & IB_ACCESS_LOCAL_WRITE)
1987 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1988 if (access & IB_ACCESS_REMOTE_READ)
1989 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1990 if (access & IB_ACCESS_REMOTE_WRITE)
1991 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1992 if (access & IB_ACCESS_REMOTE_ATOMIC)
1993 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1994 if (access & IB_ACCESS_MW_BIND)
1995 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1996
1997 wqe->frmr.l_key = wr->key;
1998 wqe->frmr.length = wr->mr->length;
1999 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2000 wqe->frmr.va = wr->mr->iova;
2001 return 0;
2002}
2003
2004static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2005 struct ib_send_wr *wr,
2006 struct bnxt_qplib_swqe *wqe)
2007{
2008 /* Copy the inline data to the data field */
2009 u8 *in_data;
2010 u32 i, sge_len;
2011 void *sge_addr;
2012
2013 in_data = wqe->inline_data;
2014 for (i = 0; i < wr->num_sge; i++) {
2015 sge_addr = (void *)(unsigned long)
2016 wr->sg_list[i].addr;
2017 sge_len = wr->sg_list[i].length;
2018
2019 if ((sge_len + wqe->inline_len) >
2020 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2021 dev_err(rdev_to_dev(rdev),
2022 "Inline data size requested > supported value");
2023 return -EINVAL;
2024 }
2025 sge_len = wr->sg_list[i].length;
2026
2027 memcpy(in_data, sge_addr, sge_len);
2028 in_data += wr->sg_list[i].length;
2029 wqe->inline_len += wr->sg_list[i].length;
2030 }
2031 return wqe->inline_len;
2032}
2033
2034static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2035 struct ib_send_wr *wr,
2036 struct bnxt_qplib_swqe *wqe)
2037{
2038 int payload_sz = 0;
2039
2040 if (wr->send_flags & IB_SEND_INLINE)
2041 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2042 else
2043 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2044 wqe->num_sge);
2045
2046 return payload_sz;
2047}
2048
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002049static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2050{
2051 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2052 qp->ib_qp.qp_type == IB_QPT_GSI ||
2053 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2054 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2055 int qp_attr_mask;
2056 struct ib_qp_attr qp_attr;
2057
2058 qp_attr_mask = IB_QP_STATE;
2059 qp_attr.qp_state = IB_QPS_RTS;
2060 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2061 qp->qplib_qp.wqe_cnt = 0;
2062 }
2063}
2064
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002065static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2066 struct bnxt_re_qp *qp,
2067 struct ib_send_wr *wr)
2068{
2069 struct bnxt_qplib_swqe wqe;
2070 int rc = 0, payload_sz = 0;
2071 unsigned long flags;
2072
2073 spin_lock_irqsave(&qp->sq_lock, flags);
2074 memset(&wqe, 0, sizeof(wqe));
2075 while (wr) {
2076 /* House keeping */
2077 memset(&wqe, 0, sizeof(wqe));
2078
2079 /* Common */
2080 wqe.num_sge = wr->num_sge;
2081 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2082 dev_err(rdev_to_dev(rdev),
2083 "Limit exceeded for Send SGEs");
2084 rc = -EINVAL;
2085 goto bad;
2086 }
2087
2088 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2089 if (payload_sz < 0) {
2090 rc = -EINVAL;
2091 goto bad;
2092 }
2093 wqe.wr_id = wr->wr_id;
2094
2095 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2096
2097 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2098 if (!rc)
2099 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2100bad:
2101 if (rc) {
2102 dev_err(rdev_to_dev(rdev),
2103 "Post send failed opcode = %#x rc = %d",
2104 wr->opcode, rc);
2105 break;
2106 }
2107 wr = wr->next;
2108 }
2109 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002110 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002111 spin_unlock_irqrestore(&qp->sq_lock, flags);
2112 return rc;
2113}
2114
2115int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2116 struct ib_send_wr **bad_wr)
2117{
2118 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2119 struct bnxt_qplib_swqe wqe;
2120 int rc = 0, payload_sz = 0;
2121 unsigned long flags;
2122
2123 spin_lock_irqsave(&qp->sq_lock, flags);
2124 while (wr) {
2125 /* House keeping */
2126 memset(&wqe, 0, sizeof(wqe));
2127
2128 /* Common */
2129 wqe.num_sge = wr->num_sge;
2130 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2131 dev_err(rdev_to_dev(qp->rdev),
2132 "Limit exceeded for Send SGEs");
2133 rc = -EINVAL;
2134 goto bad;
2135 }
2136
2137 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2138 if (payload_sz < 0) {
2139 rc = -EINVAL;
2140 goto bad;
2141 }
2142 wqe.wr_id = wr->wr_id;
2143
2144 switch (wr->opcode) {
2145 case IB_WR_SEND:
2146 case IB_WR_SEND_WITH_IMM:
2147 if (ib_qp->qp_type == IB_QPT_GSI) {
2148 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2149 payload_sz);
2150 if (rc)
2151 goto bad;
2152 wqe.rawqp1.lflags |=
2153 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2154 }
2155 switch (wr->send_flags) {
2156 case IB_SEND_IP_CSUM:
2157 wqe.rawqp1.lflags |=
2158 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2159 break;
2160 default:
2161 break;
2162 }
2163 /* Fall thru to build the wqe */
2164 case IB_WR_SEND_WITH_INV:
2165 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2166 break;
2167 case IB_WR_RDMA_WRITE:
2168 case IB_WR_RDMA_WRITE_WITH_IMM:
2169 case IB_WR_RDMA_READ:
2170 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2171 break;
2172 case IB_WR_ATOMIC_CMP_AND_SWP:
2173 case IB_WR_ATOMIC_FETCH_AND_ADD:
2174 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2175 break;
2176 case IB_WR_RDMA_READ_WITH_INV:
2177 dev_err(rdev_to_dev(qp->rdev),
2178 "RDMA Read with Invalidate is not supported");
2179 rc = -EINVAL;
2180 goto bad;
2181 case IB_WR_LOCAL_INV:
2182 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2183 break;
2184 case IB_WR_REG_MR:
2185 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2186 break;
2187 default:
2188 /* Unsupported WRs */
2189 dev_err(rdev_to_dev(qp->rdev),
2190 "WR (%#x) is not supported", wr->opcode);
2191 rc = -EINVAL;
2192 goto bad;
2193 }
2194 if (!rc)
2195 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2196bad:
2197 if (rc) {
2198 dev_err(rdev_to_dev(qp->rdev),
2199 "post_send failed op:%#x qps = %#x rc = %d\n",
2200 wr->opcode, qp->qplib_qp.state, rc);
2201 *bad_wr = wr;
2202 break;
2203 }
2204 wr = wr->next;
2205 }
2206 bnxt_qplib_post_send_db(&qp->qplib_qp);
Somnath Kotur3fb755b2017-05-22 03:15:36 -07002207 bnxt_ud_qp_hw_stall_workaround(qp);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002208 spin_unlock_irqrestore(&qp->sq_lock, flags);
2209
2210 return rc;
2211}
2212
2213static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2214 struct bnxt_re_qp *qp,
2215 struct ib_recv_wr *wr)
2216{
2217 struct bnxt_qplib_swqe wqe;
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002218 int rc = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002219
2220 memset(&wqe, 0, sizeof(wqe));
2221 while (wr) {
2222 /* House keeping */
2223 memset(&wqe, 0, sizeof(wqe));
2224
2225 /* Common */
2226 wqe.num_sge = wr->num_sge;
2227 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2228 dev_err(rdev_to_dev(rdev),
2229 "Limit exceeded for Receive SGEs");
2230 rc = -EINVAL;
2231 break;
2232 }
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002233 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002234 wqe.wr_id = wr->wr_id;
2235 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2236
2237 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2238 if (rc)
2239 break;
2240
2241 wr = wr->next;
2242 }
2243 if (!rc)
2244 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2245 return rc;
2246}
2247
2248int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2249 struct ib_recv_wr **bad_wr)
2250{
2251 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2252 struct bnxt_qplib_swqe wqe;
2253 int rc = 0, payload_sz = 0;
Devesh Sharma018cf592017-05-22 03:15:40 -07002254 unsigned long flags;
2255 u32 count = 0;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002256
Devesh Sharma018cf592017-05-22 03:15:40 -07002257 spin_lock_irqsave(&qp->rq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002258 while (wr) {
2259 /* House keeping */
2260 memset(&wqe, 0, sizeof(wqe));
2261
2262 /* Common */
2263 wqe.num_sge = wr->num_sge;
2264 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2265 dev_err(rdev_to_dev(qp->rdev),
2266 "Limit exceeded for Receive SGEs");
2267 rc = -EINVAL;
2268 *bad_wr = wr;
2269 break;
2270 }
2271
2272 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2273 wr->num_sge);
2274 wqe.wr_id = wr->wr_id;
2275 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2276
2277 if (ib_qp->qp_type == IB_QPT_GSI)
2278 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2279 payload_sz);
2280 if (!rc)
2281 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2282 if (rc) {
2283 *bad_wr = wr;
2284 break;
2285 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002286
2287 /* Ring DB if the RQEs posted reaches a threshold value */
2288 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2289 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2290 count = 0;
2291 }
2292
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002293 wr = wr->next;
2294 }
Devesh Sharma018cf592017-05-22 03:15:40 -07002295
2296 if (count)
2297 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2298
2299 spin_unlock_irqrestore(&qp->rq_lock, flags);
2300
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002301 return rc;
2302}
2303
2304/* Completion Queues */
2305int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2306{
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002307 int rc;
Selvin Xavierccd9d0d2018-01-11 11:52:07 -05002308 struct bnxt_re_cq *cq;
2309 struct bnxt_qplib_nq *nq;
2310 struct bnxt_re_dev *rdev;
2311
2312 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2313 rdev = cq->rdev;
2314 nq = cq->qplib_cq.nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002315
2316 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2317 if (rc) {
2318 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2319 return rc;
2320 }
Doug Ledford374cb862017-04-25 14:00:59 -04002321 if (!IS_ERR_OR_NULL(cq->umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002322 ib_umem_release(cq->umem);
2323
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002324 atomic_dec(&rdev->cq_count);
Selvin Xavier6a5df912017-08-02 01:46:18 -07002325 nq->budget--;
Selvin Xavierccd9d0d2018-01-11 11:52:07 -05002326 kfree(cq->cql);
2327 kfree(cq);
2328
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002329 return 0;
2330}
2331
2332struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2333 const struct ib_cq_init_attr *attr,
2334 struct ib_ucontext *context,
2335 struct ib_udata *udata)
2336{
2337 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2338 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2339 struct bnxt_re_cq *cq = NULL;
2340 int rc, entries;
2341 int cqe = attr->cqe;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002342 struct bnxt_qplib_nq *nq = NULL;
2343 unsigned int nq_alloc_cnt;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002344
2345 /* Validate CQ fields */
2346 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2347 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2348 return ERR_PTR(-EINVAL);
2349 }
2350 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2351 if (!cq)
2352 return ERR_PTR(-ENOMEM);
2353
2354 cq->rdev = rdev;
2355 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2356
2357 entries = roundup_pow_of_two(cqe + 1);
2358 if (entries > dev_attr->max_cq_wqes + 1)
2359 entries = dev_attr->max_cq_wqes + 1;
2360
2361 if (context) {
2362 struct bnxt_re_cq_req req;
2363 struct bnxt_re_ucontext *uctx = container_of
2364 (context,
2365 struct bnxt_re_ucontext,
2366 ib_uctx);
2367 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2368 rc = -EFAULT;
2369 goto fail;
2370 }
2371
2372 cq->umem = ib_umem_get(context, req.cq_va,
2373 entries * sizeof(struct cq_base),
2374 IB_ACCESS_LOCAL_WRITE, 1);
2375 if (IS_ERR(cq->umem)) {
2376 rc = PTR_ERR(cq->umem);
2377 goto fail;
2378 }
2379 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2380 cq->qplib_cq.nmap = cq->umem->nmap;
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07002381 cq->qplib_cq.dpi = &uctx->dpi;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002382 } else {
2383 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2384 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2385 GFP_KERNEL);
2386 if (!cq->cql) {
2387 rc = -ENOMEM;
2388 goto fail;
2389 }
2390
2391 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2392 cq->qplib_cq.sghead = NULL;
2393 cq->qplib_cq.nmap = 0;
2394 }
Selvin Xavier6a5df912017-08-02 01:46:18 -07002395 /*
2396 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2397 * used for getting the NQ index.
2398 */
2399 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2400 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002401 cq->qplib_cq.max_wqe = entries;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002402 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2403 cq->qplib_cq.nq = nq;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002404
2405 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2406 if (rc) {
2407 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2408 goto fail;
2409 }
2410
2411 cq->ib_cq.cqe = entries;
2412 cq->cq_period = cq->qplib_cq.period;
Selvin Xavier6a5df912017-08-02 01:46:18 -07002413 nq->budget++;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002414
2415 atomic_inc(&rdev->cq_count);
2416
2417 if (context) {
2418 struct bnxt_re_cq_resp resp;
2419
2420 resp.cqid = cq->qplib_cq.id;
2421 resp.tail = cq->qplib_cq.hwq.cons;
2422 resp.phase = cq->qplib_cq.period;
2423 resp.rsvd = 0;
2424 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2425 if (rc) {
2426 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2427 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2428 goto c2fail;
2429 }
2430 }
2431
2432 return &cq->ib_cq;
2433
2434c2fail:
2435 if (context)
2436 ib_umem_release(cq->umem);
2437fail:
2438 kfree(cq->cql);
2439 kfree(cq);
2440 return ERR_PTR(rc);
2441}
2442
2443static u8 __req_to_ib_wc_status(u8 qstatus)
2444{
2445 switch (qstatus) {
2446 case CQ_REQ_STATUS_OK:
2447 return IB_WC_SUCCESS;
2448 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2449 return IB_WC_BAD_RESP_ERR;
2450 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2451 return IB_WC_LOC_LEN_ERR;
2452 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2453 return IB_WC_LOC_QP_OP_ERR;
2454 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2455 return IB_WC_LOC_PROT_ERR;
2456 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2457 return IB_WC_GENERAL_ERR;
2458 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2459 return IB_WC_REM_INV_REQ_ERR;
2460 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2461 return IB_WC_REM_ACCESS_ERR;
2462 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2463 return IB_WC_REM_OP_ERR;
2464 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2465 return IB_WC_RNR_RETRY_EXC_ERR;
2466 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2467 return IB_WC_RETRY_EXC_ERR;
2468 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2469 return IB_WC_WR_FLUSH_ERR;
2470 default:
2471 return IB_WC_GENERAL_ERR;
2472 }
2473 return 0;
2474}
2475
2476static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2477{
2478 switch (qstatus) {
2479 case CQ_RES_RAWETH_QP1_STATUS_OK:
2480 return IB_WC_SUCCESS;
2481 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2482 return IB_WC_LOC_ACCESS_ERR;
2483 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2484 return IB_WC_LOC_LEN_ERR;
2485 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2486 return IB_WC_LOC_PROT_ERR;
2487 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2488 return IB_WC_LOC_QP_OP_ERR;
2489 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2490 return IB_WC_GENERAL_ERR;
2491 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2492 return IB_WC_WR_FLUSH_ERR;
2493 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2494 return IB_WC_WR_FLUSH_ERR;
2495 default:
2496 return IB_WC_GENERAL_ERR;
2497 }
2498}
2499
2500static u8 __rc_to_ib_wc_status(u8 qstatus)
2501{
2502 switch (qstatus) {
2503 case CQ_RES_RC_STATUS_OK:
2504 return IB_WC_SUCCESS;
2505 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2506 return IB_WC_LOC_ACCESS_ERR;
2507 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2508 return IB_WC_LOC_LEN_ERR;
2509 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2510 return IB_WC_LOC_PROT_ERR;
2511 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2512 return IB_WC_LOC_QP_OP_ERR;
2513 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2514 return IB_WC_GENERAL_ERR;
2515 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2516 return IB_WC_REM_INV_REQ_ERR;
2517 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2518 return IB_WC_WR_FLUSH_ERR;
2519 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2520 return IB_WC_WR_FLUSH_ERR;
2521 default:
2522 return IB_WC_GENERAL_ERR;
2523 }
2524}
2525
2526static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2527{
2528 switch (cqe->type) {
2529 case BNXT_QPLIB_SWQE_TYPE_SEND:
2530 wc->opcode = IB_WC_SEND;
2531 break;
2532 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2533 wc->opcode = IB_WC_SEND;
2534 wc->wc_flags |= IB_WC_WITH_IMM;
2535 break;
2536 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2537 wc->opcode = IB_WC_SEND;
2538 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2539 break;
2540 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2541 wc->opcode = IB_WC_RDMA_WRITE;
2542 break;
2543 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2544 wc->opcode = IB_WC_RDMA_WRITE;
2545 wc->wc_flags |= IB_WC_WITH_IMM;
2546 break;
2547 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2548 wc->opcode = IB_WC_RDMA_READ;
2549 break;
2550 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2551 wc->opcode = IB_WC_COMP_SWAP;
2552 break;
2553 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2554 wc->opcode = IB_WC_FETCH_ADD;
2555 break;
2556 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2557 wc->opcode = IB_WC_LOCAL_INV;
2558 break;
2559 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2560 wc->opcode = IB_WC_REG_MR;
2561 break;
2562 default:
2563 wc->opcode = IB_WC_SEND;
2564 break;
2565 }
2566
2567 wc->status = __req_to_ib_wc_status(cqe->status);
2568}
2569
2570static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2571 u16 raweth_qp1_flags2)
2572{
Bart Van Assche6dfa8ae2017-10-11 10:48:49 -07002573 bool is_ipv6 = false, is_ipv4 = false;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002574
2575 /* raweth_qp1_flags Bit 9-6 indicates itype */
2576 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2577 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2578 return -1;
2579
2580 if (raweth_qp1_flags2 &
2581 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2582 raweth_qp1_flags2 &
2583 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002584 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2585 (raweth_qp1_flags2 &
2586 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2587 (is_ipv6 = true) : (is_ipv4 = true);
2588 return ((is_ipv6) ?
2589 BNXT_RE_ROCEV2_IPV6_PACKET :
2590 BNXT_RE_ROCEV2_IPV4_PACKET);
2591 } else {
2592 return BNXT_RE_ROCE_V1_PACKET;
2593 }
2594}
2595
2596static int bnxt_re_to_ib_nw_type(int nw_type)
2597{
2598 u8 nw_hdr_type = 0xFF;
2599
2600 switch (nw_type) {
2601 case BNXT_RE_ROCE_V1_PACKET:
2602 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2603 break;
2604 case BNXT_RE_ROCEV2_IPV4_PACKET:
2605 nw_hdr_type = RDMA_NETWORK_IPV4;
2606 break;
2607 case BNXT_RE_ROCEV2_IPV6_PACKET:
2608 nw_hdr_type = RDMA_NETWORK_IPV6;
2609 break;
2610 }
2611 return nw_hdr_type;
2612}
2613
2614static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2615 void *rq_hdr_buf)
2616{
2617 u8 *tmp_buf = NULL;
2618 struct ethhdr *eth_hdr;
2619 u16 eth_type;
2620 bool rc = false;
2621
2622 tmp_buf = (u8 *)rq_hdr_buf;
2623 /*
2624 * If dest mac is not same as I/F mac, this could be a
2625 * loopback address or multicast address, check whether
2626 * it is a loopback packet
2627 */
2628 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2629 tmp_buf += 4;
2630 /* Check the ether type */
2631 eth_hdr = (struct ethhdr *)tmp_buf;
2632 eth_type = ntohs(eth_hdr->h_proto);
2633 switch (eth_type) {
2634 case ETH_P_IBOE:
2635 rc = true;
2636 break;
2637 case ETH_P_IP:
2638 case ETH_P_IPV6: {
2639 u32 len;
2640 struct udphdr *udp_hdr;
2641
2642 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2643 sizeof(struct ipv6hdr));
2644 tmp_buf += sizeof(struct ethhdr) + len;
2645 udp_hdr = (struct udphdr *)tmp_buf;
2646 if (ntohs(udp_hdr->dest) ==
2647 ROCE_V2_UDP_DPORT)
2648 rc = true;
2649 break;
2650 }
2651 default:
2652 break;
2653 }
2654 }
2655
2656 return rc;
2657}
2658
2659static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2660 struct bnxt_qplib_cqe *cqe)
2661{
2662 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2663 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2664 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2665 struct ib_send_wr *swr;
2666 struct ib_ud_wr udwr;
2667 struct ib_recv_wr rwr;
2668 int pkt_type = 0;
2669 u32 tbl_idx;
2670 void *rq_hdr_buf;
2671 dma_addr_t rq_hdr_buf_map;
2672 dma_addr_t shrq_hdr_buf_map;
2673 u32 offset = 0;
2674 u32 skip_bytes = 0;
2675 struct ib_sge s_sge[2];
2676 struct ib_sge r_sge[2];
2677 int rc;
2678
2679 memset(&udwr, 0, sizeof(udwr));
2680 memset(&rwr, 0, sizeof(rwr));
2681 memset(&s_sge, 0, sizeof(s_sge));
2682 memset(&r_sge, 0, sizeof(r_sge));
2683
2684 swr = &udwr.wr;
2685 tbl_idx = cqe->wr_id;
2686
2687 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2688 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2689 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2690 tbl_idx);
2691
2692 /* Shadow QP header buffer */
2693 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2694 tbl_idx);
2695 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2696
2697 /* Store this cqe */
2698 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2699 sqp_entry->qp1_qp = qp1_qp;
2700
2701 /* Find packet type from the cqe */
2702
2703 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2704 cqe->raweth_qp1_flags2);
2705 if (pkt_type < 0) {
2706 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2707 return -EINVAL;
2708 }
2709
2710 /* Adjust the offset for the user buffer and post in the rq */
2711
2712 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2713 offset = 20;
2714
2715 /*
2716 * QP1 loopback packet has 4 bytes of internal header before
2717 * ether header. Skip these four bytes.
2718 */
2719 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2720 skip_bytes = 4;
2721
2722 /* First send SGE . Skip the ether header*/
2723 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2724 + skip_bytes;
2725 s_sge[0].lkey = 0xFFFFFFFF;
2726 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2727 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2728
2729 /* Second Send SGE */
2730 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2731 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2732 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2733 s_sge[1].addr += 8;
2734 s_sge[1].lkey = 0xFFFFFFFF;
2735 s_sge[1].length = 256;
2736
2737 /* First recv SGE */
2738
2739 r_sge[0].addr = shrq_hdr_buf_map;
2740 r_sge[0].lkey = 0xFFFFFFFF;
2741 r_sge[0].length = 40;
2742
2743 r_sge[1].addr = sqp_entry->sge.addr + offset;
2744 r_sge[1].lkey = sqp_entry->sge.lkey;
2745 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2746
2747 /* Create receive work request */
2748 rwr.num_sge = 2;
2749 rwr.sg_list = r_sge;
2750 rwr.wr_id = tbl_idx;
2751 rwr.next = NULL;
2752
2753 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2754 if (rc) {
2755 dev_err(rdev_to_dev(rdev),
2756 "Failed to post Rx buffers to shadow QP");
2757 return -ENOMEM;
2758 }
2759
2760 swr->num_sge = 2;
2761 swr->sg_list = s_sge;
2762 swr->wr_id = tbl_idx;
2763 swr->opcode = IB_WR_SEND;
2764 swr->next = NULL;
2765
2766 udwr.ah = &rdev->sqp_ah->ib_ah;
2767 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2768 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2769
2770 /* post data received in the send queue */
2771 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2772
2773 return 0;
2774}
2775
2776static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2777 struct bnxt_qplib_cqe *cqe)
2778{
2779 wc->opcode = IB_WC_RECV;
2780 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2781 wc->wc_flags |= IB_WC_GRH;
2782}
2783
Devesh Sharma84511452017-11-08 02:48:45 -05002784static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
2785 u16 *vid, u8 *sl)
2786{
2787 bool ret = false;
2788 u32 metadata;
2789 u16 tpid;
2790
2791 metadata = orig_cqe->raweth_qp1_metadata;
2792 if (orig_cqe->raweth_qp1_flags2 &
2793 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
2794 tpid = ((metadata &
2795 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
2796 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
2797 if (tpid == ETH_P_8021Q) {
2798 *vid = metadata &
2799 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
2800 *sl = (metadata &
2801 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
2802 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
2803 ret = true;
2804 }
2805 }
2806
2807 return ret;
2808}
2809
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002810static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2811 struct bnxt_qplib_cqe *cqe)
2812{
2813 wc->opcode = IB_WC_RECV;
2814 wc->status = __rc_to_ib_wc_status(cqe->status);
2815
2816 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2817 wc->wc_flags |= IB_WC_WITH_IMM;
2818 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2819 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2820 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2821 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2822 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2823}
2824
2825static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2826 struct ib_wc *wc,
2827 struct bnxt_qplib_cqe *cqe)
2828{
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002829 struct bnxt_re_dev *rdev = qp->rdev;
2830 struct bnxt_re_qp *qp1_qp = NULL;
2831 struct bnxt_qplib_cqe *orig_cqe = NULL;
2832 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2833 int nw_type;
Devesh Sharma84511452017-11-08 02:48:45 -05002834 u32 tbl_idx;
2835 u16 vlan_id;
2836 u8 sl;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002837
2838 tbl_idx = cqe->wr_id;
2839
2840 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2841 qp1_qp = sqp_entry->qp1_qp;
2842 orig_cqe = &sqp_entry->cqe;
2843
2844 wc->wr_id = sqp_entry->wrid;
2845 wc->byte_len = orig_cqe->length;
2846 wc->qp = &qp1_qp->ib_qp;
2847
2848 wc->ex.imm_data = orig_cqe->immdata;
2849 wc->src_qp = orig_cqe->src_qp;
2850 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
Devesh Sharma84511452017-11-08 02:48:45 -05002851 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
2852 wc->vlan_id = vlan_id;
2853 wc->sl = sl;
2854 wc->wc_flags |= IB_WC_WITH_VLAN;
2855 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002856 wc->port_num = 1;
2857 wc->vendor_err = orig_cqe->status;
2858
2859 wc->opcode = IB_WC_RECV;
2860 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2861 wc->wc_flags |= IB_WC_GRH;
2862
2863 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2864 orig_cqe->raweth_qp1_flags2);
2865 if (nw_type >= 0) {
2866 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2867 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2868 }
2869}
2870
2871static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2872 struct bnxt_qplib_cqe *cqe)
2873{
2874 wc->opcode = IB_WC_RECV;
2875 wc->status = __rc_to_ib_wc_status(cqe->status);
2876
2877 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2878 wc->wc_flags |= IB_WC_WITH_IMM;
2879 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2880 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2881 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2882 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2883 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2884}
2885
Eddie Wai9152e0b2017-06-14 03:26:23 -07002886static int send_phantom_wqe(struct bnxt_re_qp *qp)
2887{
2888 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
2889 unsigned long flags;
2890 int rc = 0;
2891
2892 spin_lock_irqsave(&qp->sq_lock, flags);
2893
2894 rc = bnxt_re_bind_fence_mw(lib_qp);
2895 if (!rc) {
2896 lib_qp->sq.phantom_wqe_cnt++;
2897 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
2898 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
2899 lib_qp->id, lib_qp->sq.hwq.prod,
2900 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
2901 lib_qp->sq.phantom_wqe_cnt);
2902 }
2903
2904 spin_unlock_irqrestore(&qp->sq_lock, flags);
2905 return rc;
2906}
2907
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002908int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2909{
2910 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2911 struct bnxt_re_qp *qp;
2912 struct bnxt_qplib_cqe *cqe;
2913 int i, ncqe, budget;
Eddie Wai9152e0b2017-06-14 03:26:23 -07002914 struct bnxt_qplib_q *sq;
2915 struct bnxt_qplib_qp *lib_qp;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002916 u32 tbl_idx;
2917 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2918 unsigned long flags;
2919
2920 spin_lock_irqsave(&cq->cq_lock, flags);
2921 budget = min_t(u32, num_entries, cq->max_cql);
Devesh Sharma10d1ded2017-06-29 12:28:17 -07002922 num_entries = budget;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002923 if (!cq->cql) {
2924 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2925 goto exit;
2926 }
2927 cqe = &cq->cql[0];
2928 while (budget) {
Eddie Wai9152e0b2017-06-14 03:26:23 -07002929 lib_qp = NULL;
2930 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
2931 if (lib_qp) {
2932 sq = &lib_qp->sq;
2933 if (sq->send_phantom) {
2934 qp = container_of(lib_qp,
2935 struct bnxt_re_qp, qplib_qp);
2936 if (send_phantom_wqe(qp) == -ENOMEM)
2937 dev_err(rdev_to_dev(cq->rdev),
2938 "Phantom failed! Scheduled to send again\n");
2939 else
2940 sq->send_phantom = false;
2941 }
2942 }
Selvin Xavierf218d672017-06-29 12:28:15 -07002943 if (ncqe < budget)
2944 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
2945 cqe + ncqe,
2946 budget - ncqe);
Eddie Wai9152e0b2017-06-14 03:26:23 -07002947
Selvin Xavier1ac5a402017-02-10 03:19:33 -08002948 if (!ncqe)
2949 break;
2950
2951 for (i = 0; i < ncqe; i++, cqe++) {
2952 /* Transcribe each qplib_wqe back to ib_wc */
2953 memset(wc, 0, sizeof(*wc));
2954
2955 wc->wr_id = cqe->wr_id;
2956 wc->byte_len = cqe->length;
2957 qp = container_of
2958 ((struct bnxt_qplib_qp *)
2959 (unsigned long)(cqe->qp_handle),
2960 struct bnxt_re_qp, qplib_qp);
2961 if (!qp) {
2962 dev_err(rdev_to_dev(cq->rdev),
2963 "POLL CQ : bad QP handle");
2964 continue;
2965 }
2966 wc->qp = &qp->ib_qp;
2967 wc->ex.imm_data = cqe->immdata;
2968 wc->src_qp = cqe->src_qp;
2969 memcpy(wc->smac, cqe->smac, ETH_ALEN);
2970 wc->port_num = 1;
2971 wc->vendor_err = cqe->status;
2972
2973 switch (cqe->opcode) {
2974 case CQ_BASE_CQE_TYPE_REQ:
2975 if (qp->qplib_qp.id ==
2976 qp->rdev->qp1_sqp->qplib_qp.id) {
2977 /* Handle this completion with
2978 * the stored completion
2979 */
2980 memset(wc, 0, sizeof(*wc));
2981 continue;
2982 }
2983 bnxt_re_process_req_wc(wc, cqe);
2984 break;
2985 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2986 if (!cqe->status) {
2987 int rc = 0;
2988
2989 rc = bnxt_re_process_raw_qp_pkt_rx
2990 (qp, cqe);
2991 if (!rc) {
2992 memset(wc, 0, sizeof(*wc));
2993 continue;
2994 }
2995 cqe->status = -1;
2996 }
2997 /* Errors need not be looped back.
2998 * But change the wr_id to the one
2999 * stored in the table
3000 */
3001 tbl_idx = cqe->wr_id;
3002 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3003 wc->wr_id = sqp_entry->wrid;
3004 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3005 break;
3006 case CQ_BASE_CQE_TYPE_RES_RC:
3007 bnxt_re_process_res_rc_wc(wc, cqe);
3008 break;
3009 case CQ_BASE_CQE_TYPE_RES_UD:
3010 if (qp->qplib_qp.id ==
3011 qp->rdev->qp1_sqp->qplib_qp.id) {
3012 /* Handle this completion with
3013 * the stored completion
3014 */
3015 if (cqe->status) {
3016 continue;
3017 } else {
3018 bnxt_re_process_res_shadow_qp_wc
3019 (qp, wc, cqe);
3020 break;
3021 }
3022 }
3023 bnxt_re_process_res_ud_wc(wc, cqe);
3024 break;
3025 default:
3026 dev_err(rdev_to_dev(cq->rdev),
3027 "POLL CQ : type 0x%x not handled",
3028 cqe->opcode);
3029 continue;
3030 }
3031 wc++;
3032 budget--;
3033 }
3034 }
3035exit:
3036 spin_unlock_irqrestore(&cq->cq_lock, flags);
3037 return num_entries - budget;
3038}
3039
3040int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3041 enum ib_cq_notify_flags ib_cqn_flags)
3042{
3043 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
Selvin Xavier05127662017-11-06 08:07:32 -08003044 int type = 0, rc = 0;
3045 unsigned long flags;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003046
Selvin Xavier05127662017-11-06 08:07:32 -08003047 spin_lock_irqsave(&cq->cq_lock, flags);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003048 /* Trigger on the very next completion */
3049 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3050 type = DBR_DBR_TYPE_CQ_ARMALL;
3051 /* Trigger on the next solicited completion */
3052 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3053 type = DBR_DBR_TYPE_CQ_ARMSE;
3054
Selvin Xavier499e4562017-06-29 12:28:18 -07003055 /* Poll to see if there are missed events */
3056 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
Selvin Xavier05127662017-11-06 08:07:32 -08003057 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3058 rc = 1;
3059 goto exit;
3060 }
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003061 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3062
Selvin Xavier05127662017-11-06 08:07:32 -08003063exit:
3064 spin_unlock_irqrestore(&cq->cq_lock, flags);
3065 return rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003066}
3067
3068/* Memory Regions */
3069struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3070{
3071 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3072 struct bnxt_re_dev *rdev = pd->rdev;
3073 struct bnxt_re_mr *mr;
3074 u64 pbl = 0;
3075 int rc;
3076
3077 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3078 if (!mr)
3079 return ERR_PTR(-ENOMEM);
3080
3081 mr->rdev = rdev;
3082 mr->qplib_mr.pd = &pd->qplib_pd;
3083 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3084 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3085
3086 /* Allocate and register 0 as the address */
3087 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3088 if (rc)
3089 goto fail;
3090
3091 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3092 mr->qplib_mr.total_size = -1; /* Infinte length */
Somnath Kotur872f3572018-01-11 11:52:09 -05003093 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3094 PAGE_SIZE);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003095 if (rc)
3096 goto fail_mr;
3097
3098 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3099 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3100 IB_ACCESS_REMOTE_ATOMIC))
3101 mr->ib_mr.rkey = mr->ib_mr.lkey;
3102 atomic_inc(&rdev->mr_count);
3103
3104 return &mr->ib_mr;
3105
3106fail_mr:
3107 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3108fail:
3109 kfree(mr);
3110 return ERR_PTR(rc);
3111}
3112
3113int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3114{
3115 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3116 struct bnxt_re_dev *rdev = mr->rdev;
Colin Ian Kingebbd1df2017-02-17 15:35:22 +00003117 int rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003118
Selvin Xavier1c980b02017-05-22 03:15:34 -07003119 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
Somnath Kotur872f3572018-01-11 11:52:09 -05003120 if (rc)
Selvin Xavier1c980b02017-05-22 03:15:34 -07003121 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
Selvin Xavier1c980b02017-05-22 03:15:34 -07003122
Selvin Xavier19935192017-08-31 09:27:34 +05303123 if (mr->pages) {
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003124 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3125 &mr->qplib_frpl);
3126 kfree(mr->pages);
3127 mr->npages = 0;
3128 mr->pages = NULL;
3129 }
Doug Ledford374cb862017-04-25 14:00:59 -04003130 if (!IS_ERR_OR_NULL(mr->ib_umem))
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003131 ib_umem_release(mr->ib_umem);
3132
3133 kfree(mr);
3134 atomic_dec(&rdev->mr_count);
3135 return rc;
3136}
3137
3138static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3139{
3140 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3141
3142 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3143 return -ENOMEM;
3144
3145 mr->pages[mr->npages++] = addr;
3146 return 0;
3147}
3148
3149int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3150 unsigned int *sg_offset)
3151{
3152 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3153
3154 mr->npages = 0;
3155 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3156}
3157
3158struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3159 u32 max_num_sg)
3160{
3161 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3162 struct bnxt_re_dev *rdev = pd->rdev;
3163 struct bnxt_re_mr *mr = NULL;
3164 int rc;
3165
3166 if (type != IB_MR_TYPE_MEM_REG) {
3167 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3168 return ERR_PTR(-EINVAL);
3169 }
3170 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3171 return ERR_PTR(-EINVAL);
3172
3173 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3174 if (!mr)
3175 return ERR_PTR(-ENOMEM);
3176
3177 mr->rdev = rdev;
3178 mr->qplib_mr.pd = &pd->qplib_pd;
3179 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3180 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3181
3182 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3183 if (rc)
Somnath Kotur872f3572018-01-11 11:52:09 -05003184 goto bail;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003185
3186 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3187 mr->ib_mr.rkey = mr->ib_mr.lkey;
3188
3189 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3190 if (!mr->pages) {
3191 rc = -ENOMEM;
3192 goto fail;
3193 }
3194 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3195 &mr->qplib_frpl, max_num_sg);
3196 if (rc) {
3197 dev_err(rdev_to_dev(rdev),
3198 "Failed to allocate HW FR page list");
3199 goto fail_mr;
3200 }
3201
3202 atomic_inc(&rdev->mr_count);
3203 return &mr->ib_mr;
3204
3205fail_mr:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003206 kfree(mr->pages);
Somnath Kotur872f3572018-01-11 11:52:09 -05003207fail:
3208 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3209bail:
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003210 kfree(mr);
3211 return ERR_PTR(rc);
3212}
3213
Eddie Wai9152e0b2017-06-14 03:26:23 -07003214struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3215 struct ib_udata *udata)
3216{
3217 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3218 struct bnxt_re_dev *rdev = pd->rdev;
3219 struct bnxt_re_mw *mw;
3220 int rc;
3221
3222 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3223 if (!mw)
3224 return ERR_PTR(-ENOMEM);
3225 mw->rdev = rdev;
3226 mw->qplib_mw.pd = &pd->qplib_pd;
3227
3228 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3229 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3230 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3231 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3232 if (rc) {
3233 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3234 goto fail;
3235 }
3236 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3237
3238 atomic_inc(&rdev->mw_count);
3239 return &mw->ib_mw;
3240
3241fail:
3242 kfree(mw);
3243 return ERR_PTR(rc);
3244}
3245
3246int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3247{
3248 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3249 struct bnxt_re_dev *rdev = mw->rdev;
3250 int rc;
3251
3252 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3253 if (rc) {
3254 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3255 return rc;
3256 }
3257
3258 kfree(mw);
3259 atomic_dec(&rdev->mw_count);
3260 return rc;
3261}
3262
Somnath Kotur872f3572018-01-11 11:52:09 -05003263static int bnxt_re_page_size_ok(int page_shift)
3264{
3265 switch (page_shift) {
3266 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3267 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3268 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3269 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3270 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3271 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3272 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3273 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3274 return 1;
3275 default:
3276 return 0;
3277 }
3278}
3279
3280static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3281 int page_shift)
3282{
3283 u64 *pbl_tbl = pbl_tbl_orig;
3284 u64 paddr;
3285 u64 page_mask = (1ULL << page_shift) - 1;
3286 int i, pages;
3287 struct scatterlist *sg;
3288 int entry;
3289
3290 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3291 pages = sg_dma_len(sg) >> PAGE_SHIFT;
3292 for (i = 0; i < pages; i++) {
3293 paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3294 if (pbl_tbl == pbl_tbl_orig)
3295 *pbl_tbl++ = paddr & ~page_mask;
3296 else if ((paddr & page_mask) == 0)
3297 *pbl_tbl++ = paddr;
3298 }
3299 }
3300 return pbl_tbl - pbl_tbl_orig;
3301}
3302
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003303/* uverbs */
3304struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3305 u64 virt_addr, int mr_access_flags,
3306 struct ib_udata *udata)
3307{
3308 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3309 struct bnxt_re_dev *rdev = pd->rdev;
3310 struct bnxt_re_mr *mr;
3311 struct ib_umem *umem;
Somnath Kotur872f3572018-01-11 11:52:09 -05003312 u64 *pbl_tbl = NULL;
3313 int umem_pgs, page_shift, rc;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003314
Selvin Xavier58d4a672017-06-29 12:28:12 -07003315 if (length > BNXT_RE_MAX_MR_SIZE) {
3316 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3317 length, BNXT_RE_MAX_MR_SIZE);
3318 return ERR_PTR(-ENOMEM);
3319 }
3320
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003321 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3322 if (!mr)
3323 return ERR_PTR(-ENOMEM);
3324
3325 mr->rdev = rdev;
3326 mr->qplib_mr.pd = &pd->qplib_pd;
3327 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3328 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3329
Somnath Kotur872f3572018-01-11 11:52:09 -05003330 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3331 if (rc) {
3332 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3333 goto free_mr;
3334 }
3335 /* The fixed portion of the rkey is the same as the lkey */
3336 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3337
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003338 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3339 mr_access_flags, 0);
3340 if (IS_ERR(umem)) {
3341 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3342 rc = -EFAULT;
Somnath Kotur872f3572018-01-11 11:52:09 -05003343 goto free_mrw;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003344 }
3345 mr->ib_umem = umem;
3346
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003347 mr->qplib_mr.va = virt_addr;
3348 umem_pgs = ib_umem_page_count(umem);
3349 if (!umem_pgs) {
3350 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3351 rc = -EINVAL;
Somnath Kotur872f3572018-01-11 11:52:09 -05003352 goto free_umem;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003353 }
3354 mr->qplib_mr.total_size = length;
3355
3356 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3357 if (!pbl_tbl) {
Somnath Kotur872f3572018-01-11 11:52:09 -05003358 rc = -ENOMEM;
3359 goto free_umem;
3360 }
3361
3362 page_shift = umem->page_shift;
3363
3364 if (!bnxt_re_page_size_ok(page_shift)) {
3365 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3366 rc = -EFAULT;
3367 goto fail;
3368 }
3369
3370 if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3371 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3372 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003373 rc = -EINVAL;
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003374 goto fail;
3375 }
Somnath Kotur872f3572018-01-11 11:52:09 -05003376 if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3377 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3378 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3379 1 << page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003380 }
Somnath Kotur872f3572018-01-11 11:52:09 -05003381
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003382 /* Map umem buf ptrs to the PBL */
Somnath Kotur872f3572018-01-11 11:52:09 -05003383 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3384 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3385 umem_pgs, false, 1 << page_shift);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003386 if (rc) {
3387 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3388 goto fail;
3389 }
3390
Somnath Kotur872f3572018-01-11 11:52:09 -05003391 kfree(pbl_tbl);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003392
3393 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3394 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3395 atomic_inc(&rdev->mr_count);
3396
3397 return &mr->ib_mr;
3398fail:
Somnath Kotur872f3572018-01-11 11:52:09 -05003399 kfree(pbl_tbl);
3400free_umem:
3401 ib_umem_release(umem);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003402free_mrw:
3403 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003404free_mr:
3405 kfree(mr);
3406 return ERR_PTR(rc);
3407}
3408
3409struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3410 struct ib_udata *udata)
3411{
3412 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3413 struct bnxt_re_uctx_resp resp;
3414 struct bnxt_re_ucontext *uctx;
3415 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3416 int rc;
3417
3418 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3419 ibdev->uverbs_abi_ver);
3420
3421 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3422 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3423 BNXT_RE_ABI_VERSION);
3424 return ERR_PTR(-EPERM);
3425 }
3426
3427 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3428 if (!uctx)
3429 return ERR_PTR(-ENOMEM);
3430
3431 uctx->rdev = rdev;
3432
3433 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3434 if (!uctx->shpg) {
3435 rc = -ENOMEM;
3436 goto fail;
3437 }
3438 spin_lock_init(&uctx->sh_lock);
3439
3440 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3441 resp.max_qp = rdev->qplib_ctx.qpc_count;
3442 resp.pg_size = PAGE_SIZE;
3443 resp.cqe_sz = sizeof(struct cq_base);
3444 resp.max_cqd = dev_attr->max_cq_wqes;
3445 resp.rsvd = 0;
3446
3447 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3448 if (rc) {
3449 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3450 rc = -EFAULT;
3451 goto cfail;
3452 }
3453
3454 return &uctx->ib_uctx;
3455cfail:
3456 free_page((unsigned long)uctx->shpg);
3457 uctx->shpg = NULL;
3458fail:
3459 kfree(uctx);
3460 return ERR_PTR(rc);
3461}
3462
3463int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3464{
3465 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3466 struct bnxt_re_ucontext,
3467 ib_uctx);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003468
3469 struct bnxt_re_dev *rdev = uctx->rdev;
3470 int rc = 0;
3471
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003472 if (uctx->shpg)
3473 free_page((unsigned long)uctx->shpg);
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003474
3475 if (uctx->dpi.dbr) {
3476 /* Free DPI only if this is the first PD allocated by the
3477 * application and mark the context dpi as NULL
3478 */
3479 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3480 &rdev->qplib_res.dpi_tbl,
3481 &uctx->dpi);
3482 if (rc)
Colin Ian King24bb4d82017-07-14 08:30:10 +01003483 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
Devesh Sharmab3b2c7c2017-06-29 12:28:08 -07003484 /* Don't fail, continue*/
3485 uctx->dpi.dbr = NULL;
3486 }
3487
Selvin Xavier1ac5a402017-02-10 03:19:33 -08003488 kfree(uctx);
3489 return 0;
3490}
3491
3492/* Helper function to mmap the virtual memory from user app */
3493int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3494{
3495 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3496 struct bnxt_re_ucontext,
3497 ib_uctx);
3498 struct bnxt_re_dev *rdev = uctx->rdev;
3499 u64 pfn;
3500
3501 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3502 return -EINVAL;
3503
3504 if (vma->vm_pgoff) {
3505 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3506 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3507 PAGE_SIZE, vma->vm_page_prot)) {
3508 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3509 return -EAGAIN;
3510 }
3511 } else {
3512 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3513 if (remap_pfn_range(vma, vma->vm_start,
3514 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3515 dev_err(rdev_to_dev(rdev),
3516 "Failed to map shared page");
3517 return -EAGAIN;
3518 }
3519 }
3520
3521 return 0;
3522}